1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 if (flags & BTRFS_BLOCK_GROUP_RAID10) 168 return BTRFS_RAID_RAID10; 169 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 170 return BTRFS_RAID_RAID1; 171 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 172 return BTRFS_RAID_RAID1C3; 173 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 174 return BTRFS_RAID_RAID1C4; 175 else if (flags & BTRFS_BLOCK_GROUP_DUP) 176 return BTRFS_RAID_DUP; 177 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 178 return BTRFS_RAID_RAID0; 179 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 180 return BTRFS_RAID_RAID5; 181 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 182 return BTRFS_RAID_RAID6; 183 184 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 /* 198 * Fill @buf with textual description of @bg_flags, no more than @size_buf 199 * bytes including terminating null byte. 200 */ 201 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 202 { 203 int i; 204 int ret; 205 char *bp = buf; 206 u64 flags = bg_flags; 207 u32 size_bp = size_buf; 208 209 if (!flags) { 210 strcpy(bp, "NONE"); 211 return; 212 } 213 214 #define DESCRIBE_FLAG(flag, desc) \ 215 do { \ 216 if (flags & (flag)) { \ 217 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 218 if (ret < 0 || ret >= size_bp) \ 219 goto out_overflow; \ 220 size_bp -= ret; \ 221 bp += ret; \ 222 flags &= ~(flag); \ 223 } \ 224 } while (0) 225 226 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 227 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 228 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 229 230 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 231 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 232 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 233 btrfs_raid_array[i].raid_name); 234 #undef DESCRIBE_FLAG 235 236 if (flags) { 237 ret = snprintf(bp, size_bp, "0x%llx|", flags); 238 size_bp -= ret; 239 } 240 241 if (size_bp < size_buf) 242 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 243 244 /* 245 * The text is trimmed, it's up to the caller to provide sufficiently 246 * large buffer 247 */ 248 out_overflow:; 249 } 250 251 static int init_first_rw_device(struct btrfs_trans_handle *trans); 252 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 253 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 254 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 255 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 256 enum btrfs_map_op op, 257 u64 logical, u64 *length, 258 struct btrfs_io_context **bioc_ret, 259 int mirror_num, int need_raid_map); 260 261 /* 262 * Device locking 263 * ============== 264 * 265 * There are several mutexes that protect manipulation of devices and low-level 266 * structures like chunks but not block groups, extents or files 267 * 268 * uuid_mutex (global lock) 269 * ------------------------ 270 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 271 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 272 * device) or requested by the device= mount option 273 * 274 * the mutex can be very coarse and can cover long-running operations 275 * 276 * protects: updates to fs_devices counters like missing devices, rw devices, 277 * seeding, structure cloning, opening/closing devices at mount/umount time 278 * 279 * global::fs_devs - add, remove, updates to the global list 280 * 281 * does not protect: manipulation of the fs_devices::devices list in general 282 * but in mount context it could be used to exclude list modifications by eg. 283 * scan ioctl 284 * 285 * btrfs_device::name - renames (write side), read is RCU 286 * 287 * fs_devices::device_list_mutex (per-fs, with RCU) 288 * ------------------------------------------------ 289 * protects updates to fs_devices::devices, ie. adding and deleting 290 * 291 * simple list traversal with read-only actions can be done with RCU protection 292 * 293 * may be used to exclude some operations from running concurrently without any 294 * modifications to the list (see write_all_supers) 295 * 296 * Is not required at mount and close times, because our device list is 297 * protected by the uuid_mutex at that point. 298 * 299 * balance_mutex 300 * ------------- 301 * protects balance structures (status, state) and context accessed from 302 * several places (internally, ioctl) 303 * 304 * chunk_mutex 305 * ----------- 306 * protects chunks, adding or removing during allocation, trim or when a new 307 * device is added/removed. Additionally it also protects post_commit_list of 308 * individual devices, since they can be added to the transaction's 309 * post_commit_list only with chunk_mutex held. 310 * 311 * cleaner_mutex 312 * ------------- 313 * a big lock that is held by the cleaner thread and prevents running subvolume 314 * cleaning together with relocation or delayed iputs 315 * 316 * 317 * Lock nesting 318 * ============ 319 * 320 * uuid_mutex 321 * device_list_mutex 322 * chunk_mutex 323 * balance_mutex 324 * 325 * 326 * Exclusive operations 327 * ==================== 328 * 329 * Maintains the exclusivity of the following operations that apply to the 330 * whole filesystem and cannot run in parallel. 331 * 332 * - Balance (*) 333 * - Device add 334 * - Device remove 335 * - Device replace (*) 336 * - Resize 337 * 338 * The device operations (as above) can be in one of the following states: 339 * 340 * - Running state 341 * - Paused state 342 * - Completed state 343 * 344 * Only device operations marked with (*) can go into the Paused state for the 345 * following reasons: 346 * 347 * - ioctl (only Balance can be Paused through ioctl) 348 * - filesystem remounted as read-only 349 * - filesystem unmounted and mounted as read-only 350 * - system power-cycle and filesystem mounted as read-only 351 * - filesystem or device errors leading to forced read-only 352 * 353 * The status of exclusive operation is set and cleared atomically. 354 * During the course of Paused state, fs_info::exclusive_operation remains set. 355 * A device operation in Paused or Running state can be canceled or resumed 356 * either by ioctl (Balance only) or when remounted as read-write. 357 * The exclusive status is cleared when the device operation is canceled or 358 * completed. 359 */ 360 361 DEFINE_MUTEX(uuid_mutex); 362 static LIST_HEAD(fs_uuids); 363 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 364 { 365 return &fs_uuids; 366 } 367 368 /* 369 * alloc_fs_devices - allocate struct btrfs_fs_devices 370 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 371 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 372 * 373 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 374 * The returned struct is not linked onto any lists and can be destroyed with 375 * kfree() right away. 376 */ 377 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 378 const u8 *metadata_fsid) 379 { 380 struct btrfs_fs_devices *fs_devs; 381 382 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 383 if (!fs_devs) 384 return ERR_PTR(-ENOMEM); 385 386 mutex_init(&fs_devs->device_list_mutex); 387 388 INIT_LIST_HEAD(&fs_devs->devices); 389 INIT_LIST_HEAD(&fs_devs->alloc_list); 390 INIT_LIST_HEAD(&fs_devs->fs_list); 391 INIT_LIST_HEAD(&fs_devs->seed_list); 392 if (fsid) 393 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 394 395 if (metadata_fsid) 396 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 397 else if (fsid) 398 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 399 400 return fs_devs; 401 } 402 403 void btrfs_free_device(struct btrfs_device *device) 404 { 405 WARN_ON(!list_empty(&device->post_commit_list)); 406 rcu_string_free(device->name); 407 extent_io_tree_release(&device->alloc_state); 408 bio_put(device->flush_bio); 409 btrfs_destroy_dev_zone_info(device); 410 kfree(device); 411 } 412 413 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 414 { 415 struct btrfs_device *device; 416 WARN_ON(fs_devices->opened); 417 while (!list_empty(&fs_devices->devices)) { 418 device = list_entry(fs_devices->devices.next, 419 struct btrfs_device, dev_list); 420 list_del(&device->dev_list); 421 btrfs_free_device(device); 422 } 423 kfree(fs_devices); 424 } 425 426 void __exit btrfs_cleanup_fs_uuids(void) 427 { 428 struct btrfs_fs_devices *fs_devices; 429 430 while (!list_empty(&fs_uuids)) { 431 fs_devices = list_entry(fs_uuids.next, 432 struct btrfs_fs_devices, fs_list); 433 list_del(&fs_devices->fs_list); 434 free_fs_devices(fs_devices); 435 } 436 } 437 438 static noinline struct btrfs_fs_devices *find_fsid( 439 const u8 *fsid, const u8 *metadata_fsid) 440 { 441 struct btrfs_fs_devices *fs_devices; 442 443 ASSERT(fsid); 444 445 /* Handle non-split brain cases */ 446 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 447 if (metadata_fsid) { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 449 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 450 BTRFS_FSID_SIZE) == 0) 451 return fs_devices; 452 } else { 453 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 454 return fs_devices; 455 } 456 } 457 return NULL; 458 } 459 460 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 461 struct btrfs_super_block *disk_super) 462 { 463 464 struct btrfs_fs_devices *fs_devices; 465 466 /* 467 * Handle scanned device having completed its fsid change but 468 * belonging to a fs_devices that was created by first scanning 469 * a device which didn't have its fsid/metadata_uuid changed 470 * at all and the CHANGING_FSID_V2 flag set. 471 */ 472 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 473 if (fs_devices->fsid_change && 474 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 475 BTRFS_FSID_SIZE) == 0 && 476 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 477 BTRFS_FSID_SIZE) == 0) { 478 return fs_devices; 479 } 480 } 481 /* 482 * Handle scanned device having completed its fsid change but 483 * belonging to a fs_devices that was created by a device that 484 * has an outdated pair of fsid/metadata_uuid and 485 * CHANGING_FSID_V2 flag set. 486 */ 487 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 488 if (fs_devices->fsid_change && 489 memcmp(fs_devices->metadata_uuid, 490 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 491 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 492 BTRFS_FSID_SIZE) == 0) { 493 return fs_devices; 494 } 495 } 496 497 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 498 } 499 500 501 static int 502 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 503 int flush, struct block_device **bdev, 504 struct btrfs_super_block **disk_super) 505 { 506 int ret; 507 508 *bdev = blkdev_get_by_path(device_path, flags, holder); 509 510 if (IS_ERR(*bdev)) { 511 ret = PTR_ERR(*bdev); 512 goto error; 513 } 514 515 if (flush) 516 sync_blockdev(*bdev); 517 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 518 if (ret) { 519 blkdev_put(*bdev, flags); 520 goto error; 521 } 522 invalidate_bdev(*bdev); 523 *disk_super = btrfs_read_dev_super(*bdev); 524 if (IS_ERR(*disk_super)) { 525 ret = PTR_ERR(*disk_super); 526 blkdev_put(*bdev, flags); 527 goto error; 528 } 529 530 return 0; 531 532 error: 533 *bdev = NULL; 534 return ret; 535 } 536 537 /** 538 * Search and remove all stale devices (which are not mounted). 539 * When both inputs are NULL, it will search and release all stale devices. 540 * 541 * @devt: Optional. When provided will it release all unmounted devices 542 * matching this devt only. 543 * @skip_device: Optional. Will skip this device when searching for the stale 544 * devices. 545 * 546 * Return: 0 for success or if @devt is 0. 547 * -EBUSY if @devt is a mounted device. 548 * -ENOENT if @devt does not match any device in the list. 549 */ 550 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 551 { 552 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 553 struct btrfs_device *device, *tmp_device; 554 int ret = 0; 555 556 lockdep_assert_held(&uuid_mutex); 557 558 if (devt) 559 ret = -ENOENT; 560 561 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 562 563 mutex_lock(&fs_devices->device_list_mutex); 564 list_for_each_entry_safe(device, tmp_device, 565 &fs_devices->devices, dev_list) { 566 if (skip_device && skip_device == device) 567 continue; 568 if (devt && devt != device->devt) 569 continue; 570 if (fs_devices->opened) { 571 /* for an already deleted device return 0 */ 572 if (devt && ret != 0) 573 ret = -EBUSY; 574 break; 575 } 576 577 /* delete the stale device */ 578 fs_devices->num_devices--; 579 list_del(&device->dev_list); 580 btrfs_free_device(device); 581 582 ret = 0; 583 } 584 mutex_unlock(&fs_devices->device_list_mutex); 585 586 if (fs_devices->num_devices == 0) { 587 btrfs_sysfs_remove_fsid(fs_devices); 588 list_del(&fs_devices->fs_list); 589 free_fs_devices(fs_devices); 590 } 591 } 592 593 return ret; 594 } 595 596 /* 597 * This is only used on mount, and we are protected from competing things 598 * messing with our fs_devices by the uuid_mutex, thus we do not need the 599 * fs_devices->device_list_mutex here. 600 */ 601 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 602 struct btrfs_device *device, fmode_t flags, 603 void *holder) 604 { 605 struct block_device *bdev; 606 struct btrfs_super_block *disk_super; 607 u64 devid; 608 int ret; 609 610 if (device->bdev) 611 return -EINVAL; 612 if (!device->name) 613 return -EINVAL; 614 615 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 616 &bdev, &disk_super); 617 if (ret) 618 return ret; 619 620 devid = btrfs_stack_device_id(&disk_super->dev_item); 621 if (devid != device->devid) 622 goto error_free_page; 623 624 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 625 goto error_free_page; 626 627 device->generation = btrfs_super_generation(disk_super); 628 629 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 630 if (btrfs_super_incompat_flags(disk_super) & 631 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 632 pr_err( 633 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 634 goto error_free_page; 635 } 636 637 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 638 fs_devices->seeding = true; 639 } else { 640 if (bdev_read_only(bdev)) 641 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 642 else 643 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 644 } 645 646 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 647 fs_devices->rotating = true; 648 649 device->bdev = bdev; 650 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 651 device->mode = flags; 652 653 fs_devices->open_devices++; 654 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 655 device->devid != BTRFS_DEV_REPLACE_DEVID) { 656 fs_devices->rw_devices++; 657 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 658 } 659 btrfs_release_disk_super(disk_super); 660 661 return 0; 662 663 error_free_page: 664 btrfs_release_disk_super(disk_super); 665 blkdev_put(bdev, flags); 666 667 return -EINVAL; 668 } 669 670 /* 671 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 672 * being created with a disk that has already completed its fsid change. Such 673 * disk can belong to an fs which has its FSID changed or to one which doesn't. 674 * Handle both cases here. 675 */ 676 static struct btrfs_fs_devices *find_fsid_inprogress( 677 struct btrfs_super_block *disk_super) 678 { 679 struct btrfs_fs_devices *fs_devices; 680 681 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 682 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 683 BTRFS_FSID_SIZE) != 0 && 684 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 685 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 686 return fs_devices; 687 } 688 } 689 690 return find_fsid(disk_super->fsid, NULL); 691 } 692 693 694 static struct btrfs_fs_devices *find_fsid_changed( 695 struct btrfs_super_block *disk_super) 696 { 697 struct btrfs_fs_devices *fs_devices; 698 699 /* 700 * Handles the case where scanned device is part of an fs that had 701 * multiple successful changes of FSID but currently device didn't 702 * observe it. Meaning our fsid will be different than theirs. We need 703 * to handle two subcases : 704 * 1 - The fs still continues to have different METADATA/FSID uuids. 705 * 2 - The fs is switched back to its original FSID (METADATA/FSID 706 * are equal). 707 */ 708 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 709 /* Changed UUIDs */ 710 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 711 BTRFS_FSID_SIZE) != 0 && 712 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 713 BTRFS_FSID_SIZE) == 0 && 714 memcmp(fs_devices->fsid, disk_super->fsid, 715 BTRFS_FSID_SIZE) != 0) 716 return fs_devices; 717 718 /* Unchanged UUIDs */ 719 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 720 BTRFS_FSID_SIZE) == 0 && 721 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 722 BTRFS_FSID_SIZE) == 0) 723 return fs_devices; 724 } 725 726 return NULL; 727 } 728 729 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 730 struct btrfs_super_block *disk_super) 731 { 732 struct btrfs_fs_devices *fs_devices; 733 734 /* 735 * Handle the case where the scanned device is part of an fs whose last 736 * metadata UUID change reverted it to the original FSID. At the same 737 * time * fs_devices was first created by another constitutent device 738 * which didn't fully observe the operation. This results in an 739 * btrfs_fs_devices created with metadata/fsid different AND 740 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 741 * fs_devices equal to the FSID of the disk. 742 */ 743 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 744 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 745 BTRFS_FSID_SIZE) != 0 && 746 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 747 BTRFS_FSID_SIZE) == 0 && 748 fs_devices->fsid_change) 749 return fs_devices; 750 } 751 752 return NULL; 753 } 754 /* 755 * Add new device to list of registered devices 756 * 757 * Returns: 758 * device pointer which was just added or updated when successful 759 * error pointer when failed 760 */ 761 static noinline struct btrfs_device *device_list_add(const char *path, 762 struct btrfs_super_block *disk_super, 763 bool *new_device_added) 764 { 765 struct btrfs_device *device; 766 struct btrfs_fs_devices *fs_devices = NULL; 767 struct rcu_string *name; 768 u64 found_transid = btrfs_super_generation(disk_super); 769 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 770 dev_t path_devt; 771 int error; 772 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 773 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 774 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 775 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 776 777 error = lookup_bdev(path, &path_devt); 778 if (error) 779 return ERR_PTR(error); 780 781 if (fsid_change_in_progress) { 782 if (!has_metadata_uuid) 783 fs_devices = find_fsid_inprogress(disk_super); 784 else 785 fs_devices = find_fsid_changed(disk_super); 786 } else if (has_metadata_uuid) { 787 fs_devices = find_fsid_with_metadata_uuid(disk_super); 788 } else { 789 fs_devices = find_fsid_reverted_metadata(disk_super); 790 if (!fs_devices) 791 fs_devices = find_fsid(disk_super->fsid, NULL); 792 } 793 794 795 if (!fs_devices) { 796 if (has_metadata_uuid) 797 fs_devices = alloc_fs_devices(disk_super->fsid, 798 disk_super->metadata_uuid); 799 else 800 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 801 802 if (IS_ERR(fs_devices)) 803 return ERR_CAST(fs_devices); 804 805 fs_devices->fsid_change = fsid_change_in_progress; 806 807 mutex_lock(&fs_devices->device_list_mutex); 808 list_add(&fs_devices->fs_list, &fs_uuids); 809 810 device = NULL; 811 } else { 812 struct btrfs_dev_lookup_args args = { 813 .devid = devid, 814 .uuid = disk_super->dev_item.uuid, 815 }; 816 817 mutex_lock(&fs_devices->device_list_mutex); 818 device = btrfs_find_device(fs_devices, &args); 819 820 /* 821 * If this disk has been pulled into an fs devices created by 822 * a device which had the CHANGING_FSID_V2 flag then replace the 823 * metadata_uuid/fsid values of the fs_devices. 824 */ 825 if (fs_devices->fsid_change && 826 found_transid > fs_devices->latest_generation) { 827 memcpy(fs_devices->fsid, disk_super->fsid, 828 BTRFS_FSID_SIZE); 829 830 if (has_metadata_uuid) 831 memcpy(fs_devices->metadata_uuid, 832 disk_super->metadata_uuid, 833 BTRFS_FSID_SIZE); 834 else 835 memcpy(fs_devices->metadata_uuid, 836 disk_super->fsid, BTRFS_FSID_SIZE); 837 838 fs_devices->fsid_change = false; 839 } 840 } 841 842 if (!device) { 843 if (fs_devices->opened) { 844 mutex_unlock(&fs_devices->device_list_mutex); 845 return ERR_PTR(-EBUSY); 846 } 847 848 device = btrfs_alloc_device(NULL, &devid, 849 disk_super->dev_item.uuid); 850 if (IS_ERR(device)) { 851 mutex_unlock(&fs_devices->device_list_mutex); 852 /* we can safely leave the fs_devices entry around */ 853 return device; 854 } 855 856 name = rcu_string_strdup(path, GFP_NOFS); 857 if (!name) { 858 btrfs_free_device(device); 859 mutex_unlock(&fs_devices->device_list_mutex); 860 return ERR_PTR(-ENOMEM); 861 } 862 rcu_assign_pointer(device->name, name); 863 device->devt = path_devt; 864 865 list_add_rcu(&device->dev_list, &fs_devices->devices); 866 fs_devices->num_devices++; 867 868 device->fs_devices = fs_devices; 869 *new_device_added = true; 870 871 if (disk_super->label[0]) 872 pr_info( 873 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 874 disk_super->label, devid, found_transid, path, 875 current->comm, task_pid_nr(current)); 876 else 877 pr_info( 878 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 879 disk_super->fsid, devid, found_transid, path, 880 current->comm, task_pid_nr(current)); 881 882 } else if (!device->name || strcmp(device->name->str, path)) { 883 /* 884 * When FS is already mounted. 885 * 1. If you are here and if the device->name is NULL that 886 * means this device was missing at time of FS mount. 887 * 2. If you are here and if the device->name is different 888 * from 'path' that means either 889 * a. The same device disappeared and reappeared with 890 * different name. or 891 * b. The missing-disk-which-was-replaced, has 892 * reappeared now. 893 * 894 * We must allow 1 and 2a above. But 2b would be a spurious 895 * and unintentional. 896 * 897 * Further in case of 1 and 2a above, the disk at 'path' 898 * would have missed some transaction when it was away and 899 * in case of 2a the stale bdev has to be updated as well. 900 * 2b must not be allowed at all time. 901 */ 902 903 /* 904 * For now, we do allow update to btrfs_fs_device through the 905 * btrfs dev scan cli after FS has been mounted. We're still 906 * tracking a problem where systems fail mount by subvolume id 907 * when we reject replacement on a mounted FS. 908 */ 909 if (!fs_devices->opened && found_transid < device->generation) { 910 /* 911 * That is if the FS is _not_ mounted and if you 912 * are here, that means there is more than one 913 * disk with same uuid and devid.We keep the one 914 * with larger generation number or the last-in if 915 * generation are equal. 916 */ 917 mutex_unlock(&fs_devices->device_list_mutex); 918 return ERR_PTR(-EEXIST); 919 } 920 921 /* 922 * We are going to replace the device path for a given devid, 923 * make sure it's the same device if the device is mounted 924 */ 925 if (device->bdev) { 926 if (device->devt != path_devt) { 927 mutex_unlock(&fs_devices->device_list_mutex); 928 /* 929 * device->fs_info may not be reliable here, so 930 * pass in a NULL instead. This avoids a 931 * possible use-after-free when the fs_info and 932 * fs_info->sb are already torn down. 933 */ 934 btrfs_warn_in_rcu(NULL, 935 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 936 path, devid, found_transid, 937 current->comm, 938 task_pid_nr(current)); 939 return ERR_PTR(-EEXIST); 940 } 941 btrfs_info_in_rcu(device->fs_info, 942 "devid %llu device path %s changed to %s scanned by %s (%d)", 943 devid, rcu_str_deref(device->name), 944 path, current->comm, 945 task_pid_nr(current)); 946 } 947 948 name = rcu_string_strdup(path, GFP_NOFS); 949 if (!name) { 950 mutex_unlock(&fs_devices->device_list_mutex); 951 return ERR_PTR(-ENOMEM); 952 } 953 rcu_string_free(device->name); 954 rcu_assign_pointer(device->name, name); 955 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 956 fs_devices->missing_devices--; 957 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 958 } 959 device->devt = path_devt; 960 } 961 962 /* 963 * Unmount does not free the btrfs_device struct but would zero 964 * generation along with most of the other members. So just update 965 * it back. We need it to pick the disk with largest generation 966 * (as above). 967 */ 968 if (!fs_devices->opened) { 969 device->generation = found_transid; 970 fs_devices->latest_generation = max_t(u64, found_transid, 971 fs_devices->latest_generation); 972 } 973 974 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 975 976 mutex_unlock(&fs_devices->device_list_mutex); 977 return device; 978 } 979 980 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 981 { 982 struct btrfs_fs_devices *fs_devices; 983 struct btrfs_device *device; 984 struct btrfs_device *orig_dev; 985 int ret = 0; 986 987 lockdep_assert_held(&uuid_mutex); 988 989 fs_devices = alloc_fs_devices(orig->fsid, NULL); 990 if (IS_ERR(fs_devices)) 991 return fs_devices; 992 993 fs_devices->total_devices = orig->total_devices; 994 995 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 996 struct rcu_string *name; 997 998 device = btrfs_alloc_device(NULL, &orig_dev->devid, 999 orig_dev->uuid); 1000 if (IS_ERR(device)) { 1001 ret = PTR_ERR(device); 1002 goto error; 1003 } 1004 1005 /* 1006 * This is ok to do without rcu read locked because we hold the 1007 * uuid mutex so nothing we touch in here is going to disappear. 1008 */ 1009 if (orig_dev->name) { 1010 name = rcu_string_strdup(orig_dev->name->str, 1011 GFP_KERNEL); 1012 if (!name) { 1013 btrfs_free_device(device); 1014 ret = -ENOMEM; 1015 goto error; 1016 } 1017 rcu_assign_pointer(device->name, name); 1018 } 1019 1020 list_add(&device->dev_list, &fs_devices->devices); 1021 device->fs_devices = fs_devices; 1022 fs_devices->num_devices++; 1023 } 1024 return fs_devices; 1025 error: 1026 free_fs_devices(fs_devices); 1027 return ERR_PTR(ret); 1028 } 1029 1030 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1031 struct btrfs_device **latest_dev) 1032 { 1033 struct btrfs_device *device, *next; 1034 1035 /* This is the initialized path, it is safe to release the devices. */ 1036 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1037 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1038 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1039 &device->dev_state) && 1040 !test_bit(BTRFS_DEV_STATE_MISSING, 1041 &device->dev_state) && 1042 (!*latest_dev || 1043 device->generation > (*latest_dev)->generation)) { 1044 *latest_dev = device; 1045 } 1046 continue; 1047 } 1048 1049 /* 1050 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1051 * in btrfs_init_dev_replace() so just continue. 1052 */ 1053 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1054 continue; 1055 1056 if (device->bdev) { 1057 blkdev_put(device->bdev, device->mode); 1058 device->bdev = NULL; 1059 fs_devices->open_devices--; 1060 } 1061 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1062 list_del_init(&device->dev_alloc_list); 1063 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1064 fs_devices->rw_devices--; 1065 } 1066 list_del_init(&device->dev_list); 1067 fs_devices->num_devices--; 1068 btrfs_free_device(device); 1069 } 1070 1071 } 1072 1073 /* 1074 * After we have read the system tree and know devids belonging to this 1075 * filesystem, remove the device which does not belong there. 1076 */ 1077 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1078 { 1079 struct btrfs_device *latest_dev = NULL; 1080 struct btrfs_fs_devices *seed_dev; 1081 1082 mutex_lock(&uuid_mutex); 1083 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1084 1085 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1086 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1087 1088 fs_devices->latest_dev = latest_dev; 1089 1090 mutex_unlock(&uuid_mutex); 1091 } 1092 1093 static void btrfs_close_bdev(struct btrfs_device *device) 1094 { 1095 if (!device->bdev) 1096 return; 1097 1098 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1099 sync_blockdev(device->bdev); 1100 invalidate_bdev(device->bdev); 1101 } 1102 1103 blkdev_put(device->bdev, device->mode); 1104 } 1105 1106 static void btrfs_close_one_device(struct btrfs_device *device) 1107 { 1108 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1109 1110 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1111 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1112 list_del_init(&device->dev_alloc_list); 1113 fs_devices->rw_devices--; 1114 } 1115 1116 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1117 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1118 1119 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1120 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1121 fs_devices->missing_devices--; 1122 } 1123 1124 btrfs_close_bdev(device); 1125 if (device->bdev) { 1126 fs_devices->open_devices--; 1127 device->bdev = NULL; 1128 } 1129 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1130 btrfs_destroy_dev_zone_info(device); 1131 1132 device->fs_info = NULL; 1133 atomic_set(&device->dev_stats_ccnt, 0); 1134 extent_io_tree_release(&device->alloc_state); 1135 1136 /* 1137 * Reset the flush error record. We might have a transient flush error 1138 * in this mount, and if so we aborted the current transaction and set 1139 * the fs to an error state, guaranteeing no super blocks can be further 1140 * committed. However that error might be transient and if we unmount the 1141 * filesystem and mount it again, we should allow the mount to succeed 1142 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1143 * filesystem again we still get flush errors, then we will again abort 1144 * any transaction and set the error state, guaranteeing no commits of 1145 * unsafe super blocks. 1146 */ 1147 device->last_flush_error = 0; 1148 1149 /* Verify the device is back in a pristine state */ 1150 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1151 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1152 ASSERT(list_empty(&device->dev_alloc_list)); 1153 ASSERT(list_empty(&device->post_commit_list)); 1154 } 1155 1156 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1157 { 1158 struct btrfs_device *device, *tmp; 1159 1160 lockdep_assert_held(&uuid_mutex); 1161 1162 if (--fs_devices->opened > 0) 1163 return; 1164 1165 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1166 btrfs_close_one_device(device); 1167 1168 WARN_ON(fs_devices->open_devices); 1169 WARN_ON(fs_devices->rw_devices); 1170 fs_devices->opened = 0; 1171 fs_devices->seeding = false; 1172 fs_devices->fs_info = NULL; 1173 } 1174 1175 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1176 { 1177 LIST_HEAD(list); 1178 struct btrfs_fs_devices *tmp; 1179 1180 mutex_lock(&uuid_mutex); 1181 close_fs_devices(fs_devices); 1182 if (!fs_devices->opened) 1183 list_splice_init(&fs_devices->seed_list, &list); 1184 1185 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1186 close_fs_devices(fs_devices); 1187 list_del(&fs_devices->seed_list); 1188 free_fs_devices(fs_devices); 1189 } 1190 mutex_unlock(&uuid_mutex); 1191 } 1192 1193 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1194 fmode_t flags, void *holder) 1195 { 1196 struct btrfs_device *device; 1197 struct btrfs_device *latest_dev = NULL; 1198 struct btrfs_device *tmp_device; 1199 1200 flags |= FMODE_EXCL; 1201 1202 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1203 dev_list) { 1204 int ret; 1205 1206 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1207 if (ret == 0 && 1208 (!latest_dev || device->generation > latest_dev->generation)) { 1209 latest_dev = device; 1210 } else if (ret == -ENODATA) { 1211 fs_devices->num_devices--; 1212 list_del(&device->dev_list); 1213 btrfs_free_device(device); 1214 } 1215 } 1216 if (fs_devices->open_devices == 0) 1217 return -EINVAL; 1218 1219 fs_devices->opened = 1; 1220 fs_devices->latest_dev = latest_dev; 1221 fs_devices->total_rw_bytes = 0; 1222 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1223 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1224 1225 return 0; 1226 } 1227 1228 static int devid_cmp(void *priv, const struct list_head *a, 1229 const struct list_head *b) 1230 { 1231 const struct btrfs_device *dev1, *dev2; 1232 1233 dev1 = list_entry(a, struct btrfs_device, dev_list); 1234 dev2 = list_entry(b, struct btrfs_device, dev_list); 1235 1236 if (dev1->devid < dev2->devid) 1237 return -1; 1238 else if (dev1->devid > dev2->devid) 1239 return 1; 1240 return 0; 1241 } 1242 1243 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1244 fmode_t flags, void *holder) 1245 { 1246 int ret; 1247 1248 lockdep_assert_held(&uuid_mutex); 1249 /* 1250 * The device_list_mutex cannot be taken here in case opening the 1251 * underlying device takes further locks like open_mutex. 1252 * 1253 * We also don't need the lock here as this is called during mount and 1254 * exclusion is provided by uuid_mutex 1255 */ 1256 1257 if (fs_devices->opened) { 1258 fs_devices->opened++; 1259 ret = 0; 1260 } else { 1261 list_sort(NULL, &fs_devices->devices, devid_cmp); 1262 ret = open_fs_devices(fs_devices, flags, holder); 1263 } 1264 1265 return ret; 1266 } 1267 1268 void btrfs_release_disk_super(struct btrfs_super_block *super) 1269 { 1270 struct page *page = virt_to_page(super); 1271 1272 put_page(page); 1273 } 1274 1275 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1276 u64 bytenr, u64 bytenr_orig) 1277 { 1278 struct btrfs_super_block *disk_super; 1279 struct page *page; 1280 void *p; 1281 pgoff_t index; 1282 1283 /* make sure our super fits in the device */ 1284 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1285 return ERR_PTR(-EINVAL); 1286 1287 /* make sure our super fits in the page */ 1288 if (sizeof(*disk_super) > PAGE_SIZE) 1289 return ERR_PTR(-EINVAL); 1290 1291 /* make sure our super doesn't straddle pages on disk */ 1292 index = bytenr >> PAGE_SHIFT; 1293 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1294 return ERR_PTR(-EINVAL); 1295 1296 /* pull in the page with our super */ 1297 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1298 1299 if (IS_ERR(page)) 1300 return ERR_CAST(page); 1301 1302 p = page_address(page); 1303 1304 /* align our pointer to the offset of the super block */ 1305 disk_super = p + offset_in_page(bytenr); 1306 1307 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1308 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1309 btrfs_release_disk_super(p); 1310 return ERR_PTR(-EINVAL); 1311 } 1312 1313 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1314 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1315 1316 return disk_super; 1317 } 1318 1319 int btrfs_forget_devices(dev_t devt) 1320 { 1321 int ret; 1322 1323 mutex_lock(&uuid_mutex); 1324 ret = btrfs_free_stale_devices(devt, NULL); 1325 mutex_unlock(&uuid_mutex); 1326 1327 return ret; 1328 } 1329 1330 /* 1331 * Look for a btrfs signature on a device. This may be called out of the mount path 1332 * and we are not allowed to call set_blocksize during the scan. The superblock 1333 * is read via pagecache 1334 */ 1335 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1336 void *holder) 1337 { 1338 struct btrfs_super_block *disk_super; 1339 bool new_device_added = false; 1340 struct btrfs_device *device = NULL; 1341 struct block_device *bdev; 1342 u64 bytenr, bytenr_orig; 1343 int ret; 1344 1345 lockdep_assert_held(&uuid_mutex); 1346 1347 /* 1348 * we would like to check all the supers, but that would make 1349 * a btrfs mount succeed after a mkfs from a different FS. 1350 * So, we need to add a special mount option to scan for 1351 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1352 */ 1353 flags |= FMODE_EXCL; 1354 1355 bdev = blkdev_get_by_path(path, flags, holder); 1356 if (IS_ERR(bdev)) 1357 return ERR_CAST(bdev); 1358 1359 bytenr_orig = btrfs_sb_offset(0); 1360 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1361 if (ret) { 1362 device = ERR_PTR(ret); 1363 goto error_bdev_put; 1364 } 1365 1366 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1367 if (IS_ERR(disk_super)) { 1368 device = ERR_CAST(disk_super); 1369 goto error_bdev_put; 1370 } 1371 1372 device = device_list_add(path, disk_super, &new_device_added); 1373 if (!IS_ERR(device) && new_device_added) 1374 btrfs_free_stale_devices(device->devt, device); 1375 1376 btrfs_release_disk_super(disk_super); 1377 1378 error_bdev_put: 1379 blkdev_put(bdev, flags); 1380 1381 return device; 1382 } 1383 1384 /* 1385 * Try to find a chunk that intersects [start, start + len] range and when one 1386 * such is found, record the end of it in *start 1387 */ 1388 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1389 u64 len) 1390 { 1391 u64 physical_start, physical_end; 1392 1393 lockdep_assert_held(&device->fs_info->chunk_mutex); 1394 1395 if (!find_first_extent_bit(&device->alloc_state, *start, 1396 &physical_start, &physical_end, 1397 CHUNK_ALLOCATED, NULL)) { 1398 1399 if (in_range(physical_start, *start, len) || 1400 in_range(*start, physical_start, 1401 physical_end - physical_start)) { 1402 *start = physical_end + 1; 1403 return true; 1404 } 1405 } 1406 return false; 1407 } 1408 1409 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1410 { 1411 switch (device->fs_devices->chunk_alloc_policy) { 1412 case BTRFS_CHUNK_ALLOC_REGULAR: 1413 /* 1414 * We don't want to overwrite the superblock on the drive nor 1415 * any area used by the boot loader (grub for example), so we 1416 * make sure to start at an offset of at least 1MB. 1417 */ 1418 return max_t(u64, start, SZ_1M); 1419 case BTRFS_CHUNK_ALLOC_ZONED: 1420 /* 1421 * We don't care about the starting region like regular 1422 * allocator, because we anyway use/reserve the first two zones 1423 * for superblock logging. 1424 */ 1425 return ALIGN(start, device->zone_info->zone_size); 1426 default: 1427 BUG(); 1428 } 1429 } 1430 1431 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1432 u64 *hole_start, u64 *hole_size, 1433 u64 num_bytes) 1434 { 1435 u64 zone_size = device->zone_info->zone_size; 1436 u64 pos; 1437 int ret; 1438 bool changed = false; 1439 1440 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1441 1442 while (*hole_size > 0) { 1443 pos = btrfs_find_allocatable_zones(device, *hole_start, 1444 *hole_start + *hole_size, 1445 num_bytes); 1446 if (pos != *hole_start) { 1447 *hole_size = *hole_start + *hole_size - pos; 1448 *hole_start = pos; 1449 changed = true; 1450 if (*hole_size < num_bytes) 1451 break; 1452 } 1453 1454 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1455 1456 /* Range is ensured to be empty */ 1457 if (!ret) 1458 return changed; 1459 1460 /* Given hole range was invalid (outside of device) */ 1461 if (ret == -ERANGE) { 1462 *hole_start += *hole_size; 1463 *hole_size = 0; 1464 return true; 1465 } 1466 1467 *hole_start += zone_size; 1468 *hole_size -= zone_size; 1469 changed = true; 1470 } 1471 1472 return changed; 1473 } 1474 1475 /** 1476 * dev_extent_hole_check - check if specified hole is suitable for allocation 1477 * @device: the device which we have the hole 1478 * @hole_start: starting position of the hole 1479 * @hole_size: the size of the hole 1480 * @num_bytes: the size of the free space that we need 1481 * 1482 * This function may modify @hole_start and @hole_size to reflect the suitable 1483 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1484 */ 1485 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1486 u64 *hole_size, u64 num_bytes) 1487 { 1488 bool changed = false; 1489 u64 hole_end = *hole_start + *hole_size; 1490 1491 for (;;) { 1492 /* 1493 * Check before we set max_hole_start, otherwise we could end up 1494 * sending back this offset anyway. 1495 */ 1496 if (contains_pending_extent(device, hole_start, *hole_size)) { 1497 if (hole_end >= *hole_start) 1498 *hole_size = hole_end - *hole_start; 1499 else 1500 *hole_size = 0; 1501 changed = true; 1502 } 1503 1504 switch (device->fs_devices->chunk_alloc_policy) { 1505 case BTRFS_CHUNK_ALLOC_REGULAR: 1506 /* No extra check */ 1507 break; 1508 case BTRFS_CHUNK_ALLOC_ZONED: 1509 if (dev_extent_hole_check_zoned(device, hole_start, 1510 hole_size, num_bytes)) { 1511 changed = true; 1512 /* 1513 * The changed hole can contain pending extent. 1514 * Loop again to check that. 1515 */ 1516 continue; 1517 } 1518 break; 1519 default: 1520 BUG(); 1521 } 1522 1523 break; 1524 } 1525 1526 return changed; 1527 } 1528 1529 /* 1530 * find_free_dev_extent_start - find free space in the specified device 1531 * @device: the device which we search the free space in 1532 * @num_bytes: the size of the free space that we need 1533 * @search_start: the position from which to begin the search 1534 * @start: store the start of the free space. 1535 * @len: the size of the free space. that we find, or the size 1536 * of the max free space if we don't find suitable free space 1537 * 1538 * this uses a pretty simple search, the expectation is that it is 1539 * called very infrequently and that a given device has a small number 1540 * of extents 1541 * 1542 * @start is used to store the start of the free space if we find. But if we 1543 * don't find suitable free space, it will be used to store the start position 1544 * of the max free space. 1545 * 1546 * @len is used to store the size of the free space that we find. 1547 * But if we don't find suitable free space, it is used to store the size of 1548 * the max free space. 1549 * 1550 * NOTE: This function will search *commit* root of device tree, and does extra 1551 * check to ensure dev extents are not double allocated. 1552 * This makes the function safe to allocate dev extents but may not report 1553 * correct usable device space, as device extent freed in current transaction 1554 * is not reported as available. 1555 */ 1556 static int find_free_dev_extent_start(struct btrfs_device *device, 1557 u64 num_bytes, u64 search_start, u64 *start, 1558 u64 *len) 1559 { 1560 struct btrfs_fs_info *fs_info = device->fs_info; 1561 struct btrfs_root *root = fs_info->dev_root; 1562 struct btrfs_key key; 1563 struct btrfs_dev_extent *dev_extent; 1564 struct btrfs_path *path; 1565 u64 hole_size; 1566 u64 max_hole_start; 1567 u64 max_hole_size; 1568 u64 extent_end; 1569 u64 search_end = device->total_bytes; 1570 int ret; 1571 int slot; 1572 struct extent_buffer *l; 1573 1574 search_start = dev_extent_search_start(device, search_start); 1575 1576 WARN_ON(device->zone_info && 1577 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1578 1579 path = btrfs_alloc_path(); 1580 if (!path) 1581 return -ENOMEM; 1582 1583 max_hole_start = search_start; 1584 max_hole_size = 0; 1585 1586 again: 1587 if (search_start >= search_end || 1588 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1589 ret = -ENOSPC; 1590 goto out; 1591 } 1592 1593 path->reada = READA_FORWARD; 1594 path->search_commit_root = 1; 1595 path->skip_locking = 1; 1596 1597 key.objectid = device->devid; 1598 key.offset = search_start; 1599 key.type = BTRFS_DEV_EXTENT_KEY; 1600 1601 ret = btrfs_search_backwards(root, &key, path); 1602 if (ret < 0) 1603 goto out; 1604 1605 while (1) { 1606 l = path->nodes[0]; 1607 slot = path->slots[0]; 1608 if (slot >= btrfs_header_nritems(l)) { 1609 ret = btrfs_next_leaf(root, path); 1610 if (ret == 0) 1611 continue; 1612 if (ret < 0) 1613 goto out; 1614 1615 break; 1616 } 1617 btrfs_item_key_to_cpu(l, &key, slot); 1618 1619 if (key.objectid < device->devid) 1620 goto next; 1621 1622 if (key.objectid > device->devid) 1623 break; 1624 1625 if (key.type != BTRFS_DEV_EXTENT_KEY) 1626 goto next; 1627 1628 if (key.offset > search_start) { 1629 hole_size = key.offset - search_start; 1630 dev_extent_hole_check(device, &search_start, &hole_size, 1631 num_bytes); 1632 1633 if (hole_size > max_hole_size) { 1634 max_hole_start = search_start; 1635 max_hole_size = hole_size; 1636 } 1637 1638 /* 1639 * If this free space is greater than which we need, 1640 * it must be the max free space that we have found 1641 * until now, so max_hole_start must point to the start 1642 * of this free space and the length of this free space 1643 * is stored in max_hole_size. Thus, we return 1644 * max_hole_start and max_hole_size and go back to the 1645 * caller. 1646 */ 1647 if (hole_size >= num_bytes) { 1648 ret = 0; 1649 goto out; 1650 } 1651 } 1652 1653 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1654 extent_end = key.offset + btrfs_dev_extent_length(l, 1655 dev_extent); 1656 if (extent_end > search_start) 1657 search_start = extent_end; 1658 next: 1659 path->slots[0]++; 1660 cond_resched(); 1661 } 1662 1663 /* 1664 * At this point, search_start should be the end of 1665 * allocated dev extents, and when shrinking the device, 1666 * search_end may be smaller than search_start. 1667 */ 1668 if (search_end > search_start) { 1669 hole_size = search_end - search_start; 1670 if (dev_extent_hole_check(device, &search_start, &hole_size, 1671 num_bytes)) { 1672 btrfs_release_path(path); 1673 goto again; 1674 } 1675 1676 if (hole_size > max_hole_size) { 1677 max_hole_start = search_start; 1678 max_hole_size = hole_size; 1679 } 1680 } 1681 1682 /* See above. */ 1683 if (max_hole_size < num_bytes) 1684 ret = -ENOSPC; 1685 else 1686 ret = 0; 1687 1688 out: 1689 btrfs_free_path(path); 1690 *start = max_hole_start; 1691 if (len) 1692 *len = max_hole_size; 1693 return ret; 1694 } 1695 1696 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1697 u64 *start, u64 *len) 1698 { 1699 /* FIXME use last free of some kind */ 1700 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1701 } 1702 1703 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1704 struct btrfs_device *device, 1705 u64 start, u64 *dev_extent_len) 1706 { 1707 struct btrfs_fs_info *fs_info = device->fs_info; 1708 struct btrfs_root *root = fs_info->dev_root; 1709 int ret; 1710 struct btrfs_path *path; 1711 struct btrfs_key key; 1712 struct btrfs_key found_key; 1713 struct extent_buffer *leaf = NULL; 1714 struct btrfs_dev_extent *extent = NULL; 1715 1716 path = btrfs_alloc_path(); 1717 if (!path) 1718 return -ENOMEM; 1719 1720 key.objectid = device->devid; 1721 key.offset = start; 1722 key.type = BTRFS_DEV_EXTENT_KEY; 1723 again: 1724 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1725 if (ret > 0) { 1726 ret = btrfs_previous_item(root, path, key.objectid, 1727 BTRFS_DEV_EXTENT_KEY); 1728 if (ret) 1729 goto out; 1730 leaf = path->nodes[0]; 1731 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1732 extent = btrfs_item_ptr(leaf, path->slots[0], 1733 struct btrfs_dev_extent); 1734 BUG_ON(found_key.offset > start || found_key.offset + 1735 btrfs_dev_extent_length(leaf, extent) < start); 1736 key = found_key; 1737 btrfs_release_path(path); 1738 goto again; 1739 } else if (ret == 0) { 1740 leaf = path->nodes[0]; 1741 extent = btrfs_item_ptr(leaf, path->slots[0], 1742 struct btrfs_dev_extent); 1743 } else { 1744 goto out; 1745 } 1746 1747 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1748 1749 ret = btrfs_del_item(trans, root, path); 1750 if (ret == 0) 1751 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1752 out: 1753 btrfs_free_path(path); 1754 return ret; 1755 } 1756 1757 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1758 { 1759 struct extent_map_tree *em_tree; 1760 struct extent_map *em; 1761 struct rb_node *n; 1762 u64 ret = 0; 1763 1764 em_tree = &fs_info->mapping_tree; 1765 read_lock(&em_tree->lock); 1766 n = rb_last(&em_tree->map.rb_root); 1767 if (n) { 1768 em = rb_entry(n, struct extent_map, rb_node); 1769 ret = em->start + em->len; 1770 } 1771 read_unlock(&em_tree->lock); 1772 1773 return ret; 1774 } 1775 1776 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1777 u64 *devid_ret) 1778 { 1779 int ret; 1780 struct btrfs_key key; 1781 struct btrfs_key found_key; 1782 struct btrfs_path *path; 1783 1784 path = btrfs_alloc_path(); 1785 if (!path) 1786 return -ENOMEM; 1787 1788 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1789 key.type = BTRFS_DEV_ITEM_KEY; 1790 key.offset = (u64)-1; 1791 1792 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1793 if (ret < 0) 1794 goto error; 1795 1796 if (ret == 0) { 1797 /* Corruption */ 1798 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1799 ret = -EUCLEAN; 1800 goto error; 1801 } 1802 1803 ret = btrfs_previous_item(fs_info->chunk_root, path, 1804 BTRFS_DEV_ITEMS_OBJECTID, 1805 BTRFS_DEV_ITEM_KEY); 1806 if (ret) { 1807 *devid_ret = 1; 1808 } else { 1809 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1810 path->slots[0]); 1811 *devid_ret = found_key.offset + 1; 1812 } 1813 ret = 0; 1814 error: 1815 btrfs_free_path(path); 1816 return ret; 1817 } 1818 1819 /* 1820 * the device information is stored in the chunk root 1821 * the btrfs_device struct should be fully filled in 1822 */ 1823 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1824 struct btrfs_device *device) 1825 { 1826 int ret; 1827 struct btrfs_path *path; 1828 struct btrfs_dev_item *dev_item; 1829 struct extent_buffer *leaf; 1830 struct btrfs_key key; 1831 unsigned long ptr; 1832 1833 path = btrfs_alloc_path(); 1834 if (!path) 1835 return -ENOMEM; 1836 1837 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1838 key.type = BTRFS_DEV_ITEM_KEY; 1839 key.offset = device->devid; 1840 1841 btrfs_reserve_chunk_metadata(trans, true); 1842 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1843 &key, sizeof(*dev_item)); 1844 btrfs_trans_release_chunk_metadata(trans); 1845 if (ret) 1846 goto out; 1847 1848 leaf = path->nodes[0]; 1849 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1850 1851 btrfs_set_device_id(leaf, dev_item, device->devid); 1852 btrfs_set_device_generation(leaf, dev_item, 0); 1853 btrfs_set_device_type(leaf, dev_item, device->type); 1854 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1855 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1856 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1857 btrfs_set_device_total_bytes(leaf, dev_item, 1858 btrfs_device_get_disk_total_bytes(device)); 1859 btrfs_set_device_bytes_used(leaf, dev_item, 1860 btrfs_device_get_bytes_used(device)); 1861 btrfs_set_device_group(leaf, dev_item, 0); 1862 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1863 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1864 btrfs_set_device_start_offset(leaf, dev_item, 0); 1865 1866 ptr = btrfs_device_uuid(dev_item); 1867 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1868 ptr = btrfs_device_fsid(dev_item); 1869 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1870 ptr, BTRFS_FSID_SIZE); 1871 btrfs_mark_buffer_dirty(leaf); 1872 1873 ret = 0; 1874 out: 1875 btrfs_free_path(path); 1876 return ret; 1877 } 1878 1879 /* 1880 * Function to update ctime/mtime for a given device path. 1881 * Mainly used for ctime/mtime based probe like libblkid. 1882 * 1883 * We don't care about errors here, this is just to be kind to userspace. 1884 */ 1885 static void update_dev_time(const char *device_path) 1886 { 1887 struct path path; 1888 struct timespec64 now; 1889 int ret; 1890 1891 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1892 if (ret) 1893 return; 1894 1895 now = current_time(d_inode(path.dentry)); 1896 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1897 path_put(&path); 1898 } 1899 1900 static int btrfs_rm_dev_item(struct btrfs_device *device) 1901 { 1902 struct btrfs_root *root = device->fs_info->chunk_root; 1903 int ret; 1904 struct btrfs_path *path; 1905 struct btrfs_key key; 1906 struct btrfs_trans_handle *trans; 1907 1908 path = btrfs_alloc_path(); 1909 if (!path) 1910 return -ENOMEM; 1911 1912 trans = btrfs_start_transaction(root, 0); 1913 if (IS_ERR(trans)) { 1914 btrfs_free_path(path); 1915 return PTR_ERR(trans); 1916 } 1917 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1918 key.type = BTRFS_DEV_ITEM_KEY; 1919 key.offset = device->devid; 1920 1921 btrfs_reserve_chunk_metadata(trans, false); 1922 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1923 btrfs_trans_release_chunk_metadata(trans); 1924 if (ret) { 1925 if (ret > 0) 1926 ret = -ENOENT; 1927 btrfs_abort_transaction(trans, ret); 1928 btrfs_end_transaction(trans); 1929 goto out; 1930 } 1931 1932 ret = btrfs_del_item(trans, root, path); 1933 if (ret) { 1934 btrfs_abort_transaction(trans, ret); 1935 btrfs_end_transaction(trans); 1936 } 1937 1938 out: 1939 btrfs_free_path(path); 1940 if (!ret) 1941 ret = btrfs_commit_transaction(trans); 1942 return ret; 1943 } 1944 1945 /* 1946 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1947 * filesystem. It's up to the caller to adjust that number regarding eg. device 1948 * replace. 1949 */ 1950 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1951 u64 num_devices) 1952 { 1953 u64 all_avail; 1954 unsigned seq; 1955 int i; 1956 1957 do { 1958 seq = read_seqbegin(&fs_info->profiles_lock); 1959 1960 all_avail = fs_info->avail_data_alloc_bits | 1961 fs_info->avail_system_alloc_bits | 1962 fs_info->avail_metadata_alloc_bits; 1963 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1964 1965 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1966 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1967 continue; 1968 1969 if (num_devices < btrfs_raid_array[i].devs_min) 1970 return btrfs_raid_array[i].mindev_error; 1971 } 1972 1973 return 0; 1974 } 1975 1976 static struct btrfs_device * btrfs_find_next_active_device( 1977 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1978 { 1979 struct btrfs_device *next_device; 1980 1981 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1982 if (next_device != device && 1983 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1984 && next_device->bdev) 1985 return next_device; 1986 } 1987 1988 return NULL; 1989 } 1990 1991 /* 1992 * Helper function to check if the given device is part of s_bdev / latest_dev 1993 * and replace it with the provided or the next active device, in the context 1994 * where this function called, there should be always be another device (or 1995 * this_dev) which is active. 1996 */ 1997 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1998 struct btrfs_device *next_device) 1999 { 2000 struct btrfs_fs_info *fs_info = device->fs_info; 2001 2002 if (!next_device) 2003 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2004 device); 2005 ASSERT(next_device); 2006 2007 if (fs_info->sb->s_bdev && 2008 (fs_info->sb->s_bdev == device->bdev)) 2009 fs_info->sb->s_bdev = next_device->bdev; 2010 2011 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2012 fs_info->fs_devices->latest_dev = next_device; 2013 } 2014 2015 /* 2016 * Return btrfs_fs_devices::num_devices excluding the device that's being 2017 * currently replaced. 2018 */ 2019 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2020 { 2021 u64 num_devices = fs_info->fs_devices->num_devices; 2022 2023 down_read(&fs_info->dev_replace.rwsem); 2024 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2025 ASSERT(num_devices > 1); 2026 num_devices--; 2027 } 2028 up_read(&fs_info->dev_replace.rwsem); 2029 2030 return num_devices; 2031 } 2032 2033 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2034 struct block_device *bdev, 2035 const char *device_path) 2036 { 2037 struct btrfs_super_block *disk_super; 2038 int copy_num; 2039 2040 if (!bdev) 2041 return; 2042 2043 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2044 struct page *page; 2045 int ret; 2046 2047 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2048 if (IS_ERR(disk_super)) 2049 continue; 2050 2051 if (bdev_is_zoned(bdev)) { 2052 btrfs_reset_sb_log_zones(bdev, copy_num); 2053 continue; 2054 } 2055 2056 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2057 2058 page = virt_to_page(disk_super); 2059 set_page_dirty(page); 2060 lock_page(page); 2061 /* write_on_page() unlocks the page */ 2062 ret = write_one_page(page); 2063 if (ret) 2064 btrfs_warn(fs_info, 2065 "error clearing superblock number %d (%d)", 2066 copy_num, ret); 2067 btrfs_release_disk_super(disk_super); 2068 2069 } 2070 2071 /* Notify udev that device has changed */ 2072 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2073 2074 /* Update ctime/mtime for device path for libblkid */ 2075 update_dev_time(device_path); 2076 } 2077 2078 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2079 struct btrfs_dev_lookup_args *args, 2080 struct block_device **bdev, fmode_t *mode) 2081 { 2082 struct btrfs_device *device; 2083 struct btrfs_fs_devices *cur_devices; 2084 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2085 u64 num_devices; 2086 int ret = 0; 2087 2088 /* 2089 * The device list in fs_devices is accessed without locks (neither 2090 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2091 * filesystem and another device rm cannot run. 2092 */ 2093 num_devices = btrfs_num_devices(fs_info); 2094 2095 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2096 if (ret) 2097 goto out; 2098 2099 device = btrfs_find_device(fs_info->fs_devices, args); 2100 if (!device) { 2101 if (args->missing) 2102 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2103 else 2104 ret = -ENOENT; 2105 goto out; 2106 } 2107 2108 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2109 btrfs_warn_in_rcu(fs_info, 2110 "cannot remove device %s (devid %llu) due to active swapfile", 2111 rcu_str_deref(device->name), device->devid); 2112 ret = -ETXTBSY; 2113 goto out; 2114 } 2115 2116 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2117 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2118 goto out; 2119 } 2120 2121 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2122 fs_info->fs_devices->rw_devices == 1) { 2123 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2124 goto out; 2125 } 2126 2127 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2128 mutex_lock(&fs_info->chunk_mutex); 2129 list_del_init(&device->dev_alloc_list); 2130 device->fs_devices->rw_devices--; 2131 mutex_unlock(&fs_info->chunk_mutex); 2132 } 2133 2134 ret = btrfs_shrink_device(device, 0); 2135 if (ret) 2136 goto error_undo; 2137 2138 /* 2139 * TODO: the superblock still includes this device in its num_devices 2140 * counter although write_all_supers() is not locked out. This 2141 * could give a filesystem state which requires a degraded mount. 2142 */ 2143 ret = btrfs_rm_dev_item(device); 2144 if (ret) 2145 goto error_undo; 2146 2147 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2148 btrfs_scrub_cancel_dev(device); 2149 2150 /* 2151 * the device list mutex makes sure that we don't change 2152 * the device list while someone else is writing out all 2153 * the device supers. Whoever is writing all supers, should 2154 * lock the device list mutex before getting the number of 2155 * devices in the super block (super_copy). Conversely, 2156 * whoever updates the number of devices in the super block 2157 * (super_copy) should hold the device list mutex. 2158 */ 2159 2160 /* 2161 * In normal cases the cur_devices == fs_devices. But in case 2162 * of deleting a seed device, the cur_devices should point to 2163 * its own fs_devices listed under the fs_devices->seed_list. 2164 */ 2165 cur_devices = device->fs_devices; 2166 mutex_lock(&fs_devices->device_list_mutex); 2167 list_del_rcu(&device->dev_list); 2168 2169 cur_devices->num_devices--; 2170 cur_devices->total_devices--; 2171 /* Update total_devices of the parent fs_devices if it's seed */ 2172 if (cur_devices != fs_devices) 2173 fs_devices->total_devices--; 2174 2175 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2176 cur_devices->missing_devices--; 2177 2178 btrfs_assign_next_active_device(device, NULL); 2179 2180 if (device->bdev) { 2181 cur_devices->open_devices--; 2182 /* remove sysfs entry */ 2183 btrfs_sysfs_remove_device(device); 2184 } 2185 2186 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2187 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2188 mutex_unlock(&fs_devices->device_list_mutex); 2189 2190 /* 2191 * At this point, the device is zero sized and detached from the 2192 * devices list. All that's left is to zero out the old supers and 2193 * free the device. 2194 * 2195 * We cannot call btrfs_close_bdev() here because we're holding the sb 2196 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2197 * block device and it's dependencies. Instead just flush the device 2198 * and let the caller do the final blkdev_put. 2199 */ 2200 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2201 btrfs_scratch_superblocks(fs_info, device->bdev, 2202 device->name->str); 2203 if (device->bdev) { 2204 sync_blockdev(device->bdev); 2205 invalidate_bdev(device->bdev); 2206 } 2207 } 2208 2209 *bdev = device->bdev; 2210 *mode = device->mode; 2211 synchronize_rcu(); 2212 btrfs_free_device(device); 2213 2214 /* 2215 * This can happen if cur_devices is the private seed devices list. We 2216 * cannot call close_fs_devices() here because it expects the uuid_mutex 2217 * to be held, but in fact we don't need that for the private 2218 * seed_devices, we can simply decrement cur_devices->opened and then 2219 * remove it from our list and free the fs_devices. 2220 */ 2221 if (cur_devices->num_devices == 0) { 2222 list_del_init(&cur_devices->seed_list); 2223 ASSERT(cur_devices->opened == 1); 2224 cur_devices->opened--; 2225 free_fs_devices(cur_devices); 2226 } 2227 2228 out: 2229 return ret; 2230 2231 error_undo: 2232 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2233 mutex_lock(&fs_info->chunk_mutex); 2234 list_add(&device->dev_alloc_list, 2235 &fs_devices->alloc_list); 2236 device->fs_devices->rw_devices++; 2237 mutex_unlock(&fs_info->chunk_mutex); 2238 } 2239 goto out; 2240 } 2241 2242 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2243 { 2244 struct btrfs_fs_devices *fs_devices; 2245 2246 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2247 2248 /* 2249 * in case of fs with no seed, srcdev->fs_devices will point 2250 * to fs_devices of fs_info. However when the dev being replaced is 2251 * a seed dev it will point to the seed's local fs_devices. In short 2252 * srcdev will have its correct fs_devices in both the cases. 2253 */ 2254 fs_devices = srcdev->fs_devices; 2255 2256 list_del_rcu(&srcdev->dev_list); 2257 list_del(&srcdev->dev_alloc_list); 2258 fs_devices->num_devices--; 2259 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2260 fs_devices->missing_devices--; 2261 2262 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2263 fs_devices->rw_devices--; 2264 2265 if (srcdev->bdev) 2266 fs_devices->open_devices--; 2267 } 2268 2269 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2270 { 2271 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2272 2273 mutex_lock(&uuid_mutex); 2274 2275 btrfs_close_bdev(srcdev); 2276 synchronize_rcu(); 2277 btrfs_free_device(srcdev); 2278 2279 /* if this is no devs we rather delete the fs_devices */ 2280 if (!fs_devices->num_devices) { 2281 /* 2282 * On a mounted FS, num_devices can't be zero unless it's a 2283 * seed. In case of a seed device being replaced, the replace 2284 * target added to the sprout FS, so there will be no more 2285 * device left under the seed FS. 2286 */ 2287 ASSERT(fs_devices->seeding); 2288 2289 list_del_init(&fs_devices->seed_list); 2290 close_fs_devices(fs_devices); 2291 free_fs_devices(fs_devices); 2292 } 2293 mutex_unlock(&uuid_mutex); 2294 } 2295 2296 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2297 { 2298 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2299 2300 mutex_lock(&fs_devices->device_list_mutex); 2301 2302 btrfs_sysfs_remove_device(tgtdev); 2303 2304 if (tgtdev->bdev) 2305 fs_devices->open_devices--; 2306 2307 fs_devices->num_devices--; 2308 2309 btrfs_assign_next_active_device(tgtdev, NULL); 2310 2311 list_del_rcu(&tgtdev->dev_list); 2312 2313 mutex_unlock(&fs_devices->device_list_mutex); 2314 2315 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2316 tgtdev->name->str); 2317 2318 btrfs_close_bdev(tgtdev); 2319 synchronize_rcu(); 2320 btrfs_free_device(tgtdev); 2321 } 2322 2323 /** 2324 * Populate args from device at path 2325 * 2326 * @fs_info: the filesystem 2327 * @args: the args to populate 2328 * @path: the path to the device 2329 * 2330 * This will read the super block of the device at @path and populate @args with 2331 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2332 * lookup a device to operate on, but need to do it before we take any locks. 2333 * This properly handles the special case of "missing" that a user may pass in, 2334 * and does some basic sanity checks. The caller must make sure that @path is 2335 * properly NUL terminated before calling in, and must call 2336 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2337 * uuid buffers. 2338 * 2339 * Return: 0 for success, -errno for failure 2340 */ 2341 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2342 struct btrfs_dev_lookup_args *args, 2343 const char *path) 2344 { 2345 struct btrfs_super_block *disk_super; 2346 struct block_device *bdev; 2347 int ret; 2348 2349 if (!path || !path[0]) 2350 return -EINVAL; 2351 if (!strcmp(path, "missing")) { 2352 args->missing = true; 2353 return 0; 2354 } 2355 2356 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2357 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2358 if (!args->uuid || !args->fsid) { 2359 btrfs_put_dev_args_from_path(args); 2360 return -ENOMEM; 2361 } 2362 2363 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2364 &bdev, &disk_super); 2365 if (ret) 2366 return ret; 2367 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2368 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2369 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2370 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2371 else 2372 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2373 btrfs_release_disk_super(disk_super); 2374 blkdev_put(bdev, FMODE_READ); 2375 return 0; 2376 } 2377 2378 /* 2379 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2380 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2381 * that don't need to be freed. 2382 */ 2383 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2384 { 2385 kfree(args->uuid); 2386 kfree(args->fsid); 2387 args->uuid = NULL; 2388 args->fsid = NULL; 2389 } 2390 2391 struct btrfs_device *btrfs_find_device_by_devspec( 2392 struct btrfs_fs_info *fs_info, u64 devid, 2393 const char *device_path) 2394 { 2395 BTRFS_DEV_LOOKUP_ARGS(args); 2396 struct btrfs_device *device; 2397 int ret; 2398 2399 if (devid) { 2400 args.devid = devid; 2401 device = btrfs_find_device(fs_info->fs_devices, &args); 2402 if (!device) 2403 return ERR_PTR(-ENOENT); 2404 return device; 2405 } 2406 2407 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2408 if (ret) 2409 return ERR_PTR(ret); 2410 device = btrfs_find_device(fs_info->fs_devices, &args); 2411 btrfs_put_dev_args_from_path(&args); 2412 if (!device) 2413 return ERR_PTR(-ENOENT); 2414 return device; 2415 } 2416 2417 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2418 { 2419 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2420 struct btrfs_fs_devices *old_devices; 2421 struct btrfs_fs_devices *seed_devices; 2422 2423 lockdep_assert_held(&uuid_mutex); 2424 if (!fs_devices->seeding) 2425 return ERR_PTR(-EINVAL); 2426 2427 /* 2428 * Private copy of the seed devices, anchored at 2429 * fs_info->fs_devices->seed_list 2430 */ 2431 seed_devices = alloc_fs_devices(NULL, NULL); 2432 if (IS_ERR(seed_devices)) 2433 return seed_devices; 2434 2435 /* 2436 * It's necessary to retain a copy of the original seed fs_devices in 2437 * fs_uuids so that filesystems which have been seeded can successfully 2438 * reference the seed device from open_seed_devices. This also supports 2439 * multiple fs seed. 2440 */ 2441 old_devices = clone_fs_devices(fs_devices); 2442 if (IS_ERR(old_devices)) { 2443 kfree(seed_devices); 2444 return old_devices; 2445 } 2446 2447 list_add(&old_devices->fs_list, &fs_uuids); 2448 2449 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2450 seed_devices->opened = 1; 2451 INIT_LIST_HEAD(&seed_devices->devices); 2452 INIT_LIST_HEAD(&seed_devices->alloc_list); 2453 mutex_init(&seed_devices->device_list_mutex); 2454 2455 return seed_devices; 2456 } 2457 2458 /* 2459 * Splice seed devices into the sprout fs_devices. 2460 * Generate a new fsid for the sprouted read-write filesystem. 2461 */ 2462 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2463 struct btrfs_fs_devices *seed_devices) 2464 { 2465 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2466 struct btrfs_super_block *disk_super = fs_info->super_copy; 2467 struct btrfs_device *device; 2468 u64 super_flags; 2469 2470 /* 2471 * We are updating the fsid, the thread leading to device_list_add() 2472 * could race, so uuid_mutex is needed. 2473 */ 2474 lockdep_assert_held(&uuid_mutex); 2475 2476 /* 2477 * The threads listed below may traverse dev_list but can do that without 2478 * device_list_mutex: 2479 * - All device ops and balance - as we are in btrfs_exclop_start. 2480 * - Various dev_list readers - are using RCU. 2481 * - btrfs_ioctl_fitrim() - is using RCU. 2482 * 2483 * For-read threads as below are using device_list_mutex: 2484 * - Readonly scrub btrfs_scrub_dev() 2485 * - Readonly scrub btrfs_scrub_progress() 2486 * - btrfs_get_dev_stats() 2487 */ 2488 lockdep_assert_held(&fs_devices->device_list_mutex); 2489 2490 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2491 synchronize_rcu); 2492 list_for_each_entry(device, &seed_devices->devices, dev_list) 2493 device->fs_devices = seed_devices; 2494 2495 fs_devices->seeding = false; 2496 fs_devices->num_devices = 0; 2497 fs_devices->open_devices = 0; 2498 fs_devices->missing_devices = 0; 2499 fs_devices->rotating = false; 2500 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2501 2502 generate_random_uuid(fs_devices->fsid); 2503 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2504 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2505 2506 super_flags = btrfs_super_flags(disk_super) & 2507 ~BTRFS_SUPER_FLAG_SEEDING; 2508 btrfs_set_super_flags(disk_super, super_flags); 2509 } 2510 2511 /* 2512 * Store the expected generation for seed devices in device items. 2513 */ 2514 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2515 { 2516 BTRFS_DEV_LOOKUP_ARGS(args); 2517 struct btrfs_fs_info *fs_info = trans->fs_info; 2518 struct btrfs_root *root = fs_info->chunk_root; 2519 struct btrfs_path *path; 2520 struct extent_buffer *leaf; 2521 struct btrfs_dev_item *dev_item; 2522 struct btrfs_device *device; 2523 struct btrfs_key key; 2524 u8 fs_uuid[BTRFS_FSID_SIZE]; 2525 u8 dev_uuid[BTRFS_UUID_SIZE]; 2526 int ret; 2527 2528 path = btrfs_alloc_path(); 2529 if (!path) 2530 return -ENOMEM; 2531 2532 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2533 key.offset = 0; 2534 key.type = BTRFS_DEV_ITEM_KEY; 2535 2536 while (1) { 2537 btrfs_reserve_chunk_metadata(trans, false); 2538 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2539 btrfs_trans_release_chunk_metadata(trans); 2540 if (ret < 0) 2541 goto error; 2542 2543 leaf = path->nodes[0]; 2544 next_slot: 2545 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2546 ret = btrfs_next_leaf(root, path); 2547 if (ret > 0) 2548 break; 2549 if (ret < 0) 2550 goto error; 2551 leaf = path->nodes[0]; 2552 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2553 btrfs_release_path(path); 2554 continue; 2555 } 2556 2557 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2558 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2559 key.type != BTRFS_DEV_ITEM_KEY) 2560 break; 2561 2562 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2563 struct btrfs_dev_item); 2564 args.devid = btrfs_device_id(leaf, dev_item); 2565 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2566 BTRFS_UUID_SIZE); 2567 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2568 BTRFS_FSID_SIZE); 2569 args.uuid = dev_uuid; 2570 args.fsid = fs_uuid; 2571 device = btrfs_find_device(fs_info->fs_devices, &args); 2572 BUG_ON(!device); /* Logic error */ 2573 2574 if (device->fs_devices->seeding) { 2575 btrfs_set_device_generation(leaf, dev_item, 2576 device->generation); 2577 btrfs_mark_buffer_dirty(leaf); 2578 } 2579 2580 path->slots[0]++; 2581 goto next_slot; 2582 } 2583 ret = 0; 2584 error: 2585 btrfs_free_path(path); 2586 return ret; 2587 } 2588 2589 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2590 { 2591 struct btrfs_root *root = fs_info->dev_root; 2592 struct btrfs_trans_handle *trans; 2593 struct btrfs_device *device; 2594 struct block_device *bdev; 2595 struct super_block *sb = fs_info->sb; 2596 struct rcu_string *name; 2597 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2598 struct btrfs_fs_devices *seed_devices; 2599 u64 orig_super_total_bytes; 2600 u64 orig_super_num_devices; 2601 int ret = 0; 2602 bool seeding_dev = false; 2603 bool locked = false; 2604 2605 if (sb_rdonly(sb) && !fs_devices->seeding) 2606 return -EROFS; 2607 2608 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2609 fs_info->bdev_holder); 2610 if (IS_ERR(bdev)) 2611 return PTR_ERR(bdev); 2612 2613 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2614 ret = -EINVAL; 2615 goto error; 2616 } 2617 2618 if (fs_devices->seeding) { 2619 seeding_dev = true; 2620 down_write(&sb->s_umount); 2621 mutex_lock(&uuid_mutex); 2622 locked = true; 2623 } 2624 2625 sync_blockdev(bdev); 2626 2627 rcu_read_lock(); 2628 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2629 if (device->bdev == bdev) { 2630 ret = -EEXIST; 2631 rcu_read_unlock(); 2632 goto error; 2633 } 2634 } 2635 rcu_read_unlock(); 2636 2637 device = btrfs_alloc_device(fs_info, NULL, NULL); 2638 if (IS_ERR(device)) { 2639 /* we can safely leave the fs_devices entry around */ 2640 ret = PTR_ERR(device); 2641 goto error; 2642 } 2643 2644 name = rcu_string_strdup(device_path, GFP_KERNEL); 2645 if (!name) { 2646 ret = -ENOMEM; 2647 goto error_free_device; 2648 } 2649 rcu_assign_pointer(device->name, name); 2650 2651 device->fs_info = fs_info; 2652 device->bdev = bdev; 2653 ret = lookup_bdev(device_path, &device->devt); 2654 if (ret) 2655 goto error_free_device; 2656 2657 ret = btrfs_get_dev_zone_info(device, false); 2658 if (ret) 2659 goto error_free_device; 2660 2661 trans = btrfs_start_transaction(root, 0); 2662 if (IS_ERR(trans)) { 2663 ret = PTR_ERR(trans); 2664 goto error_free_zone; 2665 } 2666 2667 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2668 device->generation = trans->transid; 2669 device->io_width = fs_info->sectorsize; 2670 device->io_align = fs_info->sectorsize; 2671 device->sector_size = fs_info->sectorsize; 2672 device->total_bytes = 2673 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2674 device->disk_total_bytes = device->total_bytes; 2675 device->commit_total_bytes = device->total_bytes; 2676 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2677 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2678 device->mode = FMODE_EXCL; 2679 device->dev_stats_valid = 1; 2680 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2681 2682 if (seeding_dev) { 2683 btrfs_clear_sb_rdonly(sb); 2684 2685 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2686 seed_devices = btrfs_init_sprout(fs_info); 2687 if (IS_ERR(seed_devices)) { 2688 ret = PTR_ERR(seed_devices); 2689 btrfs_abort_transaction(trans, ret); 2690 goto error_trans; 2691 } 2692 } 2693 2694 mutex_lock(&fs_devices->device_list_mutex); 2695 if (seeding_dev) { 2696 btrfs_setup_sprout(fs_info, seed_devices); 2697 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2698 device); 2699 } 2700 2701 device->fs_devices = fs_devices; 2702 2703 mutex_lock(&fs_info->chunk_mutex); 2704 list_add_rcu(&device->dev_list, &fs_devices->devices); 2705 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2706 fs_devices->num_devices++; 2707 fs_devices->open_devices++; 2708 fs_devices->rw_devices++; 2709 fs_devices->total_devices++; 2710 fs_devices->total_rw_bytes += device->total_bytes; 2711 2712 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2713 2714 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2715 fs_devices->rotating = true; 2716 2717 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2718 btrfs_set_super_total_bytes(fs_info->super_copy, 2719 round_down(orig_super_total_bytes + device->total_bytes, 2720 fs_info->sectorsize)); 2721 2722 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2723 btrfs_set_super_num_devices(fs_info->super_copy, 2724 orig_super_num_devices + 1); 2725 2726 /* 2727 * we've got more storage, clear any full flags on the space 2728 * infos 2729 */ 2730 btrfs_clear_space_info_full(fs_info); 2731 2732 mutex_unlock(&fs_info->chunk_mutex); 2733 2734 /* Add sysfs device entry */ 2735 btrfs_sysfs_add_device(device); 2736 2737 mutex_unlock(&fs_devices->device_list_mutex); 2738 2739 if (seeding_dev) { 2740 mutex_lock(&fs_info->chunk_mutex); 2741 ret = init_first_rw_device(trans); 2742 mutex_unlock(&fs_info->chunk_mutex); 2743 if (ret) { 2744 btrfs_abort_transaction(trans, ret); 2745 goto error_sysfs; 2746 } 2747 } 2748 2749 ret = btrfs_add_dev_item(trans, device); 2750 if (ret) { 2751 btrfs_abort_transaction(trans, ret); 2752 goto error_sysfs; 2753 } 2754 2755 if (seeding_dev) { 2756 ret = btrfs_finish_sprout(trans); 2757 if (ret) { 2758 btrfs_abort_transaction(trans, ret); 2759 goto error_sysfs; 2760 } 2761 2762 /* 2763 * fs_devices now represents the newly sprouted filesystem and 2764 * its fsid has been changed by btrfs_sprout_splice(). 2765 */ 2766 btrfs_sysfs_update_sprout_fsid(fs_devices); 2767 } 2768 2769 ret = btrfs_commit_transaction(trans); 2770 2771 if (seeding_dev) { 2772 mutex_unlock(&uuid_mutex); 2773 up_write(&sb->s_umount); 2774 locked = false; 2775 2776 if (ret) /* transaction commit */ 2777 return ret; 2778 2779 ret = btrfs_relocate_sys_chunks(fs_info); 2780 if (ret < 0) 2781 btrfs_handle_fs_error(fs_info, ret, 2782 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2783 trans = btrfs_attach_transaction(root); 2784 if (IS_ERR(trans)) { 2785 if (PTR_ERR(trans) == -ENOENT) 2786 return 0; 2787 ret = PTR_ERR(trans); 2788 trans = NULL; 2789 goto error_sysfs; 2790 } 2791 ret = btrfs_commit_transaction(trans); 2792 } 2793 2794 /* 2795 * Now that we have written a new super block to this device, check all 2796 * other fs_devices list if device_path alienates any other scanned 2797 * device. 2798 * We can ignore the return value as it typically returns -EINVAL and 2799 * only succeeds if the device was an alien. 2800 */ 2801 btrfs_forget_devices(device->devt); 2802 2803 /* Update ctime/mtime for blkid or udev */ 2804 update_dev_time(device_path); 2805 2806 return ret; 2807 2808 error_sysfs: 2809 btrfs_sysfs_remove_device(device); 2810 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2811 mutex_lock(&fs_info->chunk_mutex); 2812 list_del_rcu(&device->dev_list); 2813 list_del(&device->dev_alloc_list); 2814 fs_info->fs_devices->num_devices--; 2815 fs_info->fs_devices->open_devices--; 2816 fs_info->fs_devices->rw_devices--; 2817 fs_info->fs_devices->total_devices--; 2818 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2819 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2820 btrfs_set_super_total_bytes(fs_info->super_copy, 2821 orig_super_total_bytes); 2822 btrfs_set_super_num_devices(fs_info->super_copy, 2823 orig_super_num_devices); 2824 mutex_unlock(&fs_info->chunk_mutex); 2825 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2826 error_trans: 2827 if (seeding_dev) 2828 btrfs_set_sb_rdonly(sb); 2829 if (trans) 2830 btrfs_end_transaction(trans); 2831 error_free_zone: 2832 btrfs_destroy_dev_zone_info(device); 2833 error_free_device: 2834 btrfs_free_device(device); 2835 error: 2836 blkdev_put(bdev, FMODE_EXCL); 2837 if (locked) { 2838 mutex_unlock(&uuid_mutex); 2839 up_write(&sb->s_umount); 2840 } 2841 return ret; 2842 } 2843 2844 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2845 struct btrfs_device *device) 2846 { 2847 int ret; 2848 struct btrfs_path *path; 2849 struct btrfs_root *root = device->fs_info->chunk_root; 2850 struct btrfs_dev_item *dev_item; 2851 struct extent_buffer *leaf; 2852 struct btrfs_key key; 2853 2854 path = btrfs_alloc_path(); 2855 if (!path) 2856 return -ENOMEM; 2857 2858 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2859 key.type = BTRFS_DEV_ITEM_KEY; 2860 key.offset = device->devid; 2861 2862 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2863 if (ret < 0) 2864 goto out; 2865 2866 if (ret > 0) { 2867 ret = -ENOENT; 2868 goto out; 2869 } 2870 2871 leaf = path->nodes[0]; 2872 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2873 2874 btrfs_set_device_id(leaf, dev_item, device->devid); 2875 btrfs_set_device_type(leaf, dev_item, device->type); 2876 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2877 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2878 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2879 btrfs_set_device_total_bytes(leaf, dev_item, 2880 btrfs_device_get_disk_total_bytes(device)); 2881 btrfs_set_device_bytes_used(leaf, dev_item, 2882 btrfs_device_get_bytes_used(device)); 2883 btrfs_mark_buffer_dirty(leaf); 2884 2885 out: 2886 btrfs_free_path(path); 2887 return ret; 2888 } 2889 2890 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2891 struct btrfs_device *device, u64 new_size) 2892 { 2893 struct btrfs_fs_info *fs_info = device->fs_info; 2894 struct btrfs_super_block *super_copy = fs_info->super_copy; 2895 u64 old_total; 2896 u64 diff; 2897 int ret; 2898 2899 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2900 return -EACCES; 2901 2902 new_size = round_down(new_size, fs_info->sectorsize); 2903 2904 mutex_lock(&fs_info->chunk_mutex); 2905 old_total = btrfs_super_total_bytes(super_copy); 2906 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2907 2908 if (new_size <= device->total_bytes || 2909 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2910 mutex_unlock(&fs_info->chunk_mutex); 2911 return -EINVAL; 2912 } 2913 2914 btrfs_set_super_total_bytes(super_copy, 2915 round_down(old_total + diff, fs_info->sectorsize)); 2916 device->fs_devices->total_rw_bytes += diff; 2917 2918 btrfs_device_set_total_bytes(device, new_size); 2919 btrfs_device_set_disk_total_bytes(device, new_size); 2920 btrfs_clear_space_info_full(device->fs_info); 2921 if (list_empty(&device->post_commit_list)) 2922 list_add_tail(&device->post_commit_list, 2923 &trans->transaction->dev_update_list); 2924 mutex_unlock(&fs_info->chunk_mutex); 2925 2926 btrfs_reserve_chunk_metadata(trans, false); 2927 ret = btrfs_update_device(trans, device); 2928 btrfs_trans_release_chunk_metadata(trans); 2929 2930 return ret; 2931 } 2932 2933 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2934 { 2935 struct btrfs_fs_info *fs_info = trans->fs_info; 2936 struct btrfs_root *root = fs_info->chunk_root; 2937 int ret; 2938 struct btrfs_path *path; 2939 struct btrfs_key key; 2940 2941 path = btrfs_alloc_path(); 2942 if (!path) 2943 return -ENOMEM; 2944 2945 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2946 key.offset = chunk_offset; 2947 key.type = BTRFS_CHUNK_ITEM_KEY; 2948 2949 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2950 if (ret < 0) 2951 goto out; 2952 else if (ret > 0) { /* Logic error or corruption */ 2953 btrfs_handle_fs_error(fs_info, -ENOENT, 2954 "Failed lookup while freeing chunk."); 2955 ret = -ENOENT; 2956 goto out; 2957 } 2958 2959 ret = btrfs_del_item(trans, root, path); 2960 if (ret < 0) 2961 btrfs_handle_fs_error(fs_info, ret, 2962 "Failed to delete chunk item."); 2963 out: 2964 btrfs_free_path(path); 2965 return ret; 2966 } 2967 2968 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2969 { 2970 struct btrfs_super_block *super_copy = fs_info->super_copy; 2971 struct btrfs_disk_key *disk_key; 2972 struct btrfs_chunk *chunk; 2973 u8 *ptr; 2974 int ret = 0; 2975 u32 num_stripes; 2976 u32 array_size; 2977 u32 len = 0; 2978 u32 cur; 2979 struct btrfs_key key; 2980 2981 lockdep_assert_held(&fs_info->chunk_mutex); 2982 array_size = btrfs_super_sys_array_size(super_copy); 2983 2984 ptr = super_copy->sys_chunk_array; 2985 cur = 0; 2986 2987 while (cur < array_size) { 2988 disk_key = (struct btrfs_disk_key *)ptr; 2989 btrfs_disk_key_to_cpu(&key, disk_key); 2990 2991 len = sizeof(*disk_key); 2992 2993 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2994 chunk = (struct btrfs_chunk *)(ptr + len); 2995 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2996 len += btrfs_chunk_item_size(num_stripes); 2997 } else { 2998 ret = -EIO; 2999 break; 3000 } 3001 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3002 key.offset == chunk_offset) { 3003 memmove(ptr, ptr + len, array_size - (cur + len)); 3004 array_size -= len; 3005 btrfs_set_super_sys_array_size(super_copy, array_size); 3006 } else { 3007 ptr += len; 3008 cur += len; 3009 } 3010 } 3011 return ret; 3012 } 3013 3014 /* 3015 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3016 * @logical: Logical block offset in bytes. 3017 * @length: Length of extent in bytes. 3018 * 3019 * Return: Chunk mapping or ERR_PTR. 3020 */ 3021 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3022 u64 logical, u64 length) 3023 { 3024 struct extent_map_tree *em_tree; 3025 struct extent_map *em; 3026 3027 em_tree = &fs_info->mapping_tree; 3028 read_lock(&em_tree->lock); 3029 em = lookup_extent_mapping(em_tree, logical, length); 3030 read_unlock(&em_tree->lock); 3031 3032 if (!em) { 3033 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3034 logical, length); 3035 return ERR_PTR(-EINVAL); 3036 } 3037 3038 if (em->start > logical || em->start + em->len < logical) { 3039 btrfs_crit(fs_info, 3040 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3041 logical, length, em->start, em->start + em->len); 3042 free_extent_map(em); 3043 return ERR_PTR(-EINVAL); 3044 } 3045 3046 /* callers are responsible for dropping em's ref. */ 3047 return em; 3048 } 3049 3050 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3051 struct map_lookup *map, u64 chunk_offset) 3052 { 3053 int i; 3054 3055 /* 3056 * Removing chunk items and updating the device items in the chunks btree 3057 * requires holding the chunk_mutex. 3058 * See the comment at btrfs_chunk_alloc() for the details. 3059 */ 3060 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3061 3062 for (i = 0; i < map->num_stripes; i++) { 3063 int ret; 3064 3065 ret = btrfs_update_device(trans, map->stripes[i].dev); 3066 if (ret) 3067 return ret; 3068 } 3069 3070 return btrfs_free_chunk(trans, chunk_offset); 3071 } 3072 3073 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3074 { 3075 struct btrfs_fs_info *fs_info = trans->fs_info; 3076 struct extent_map *em; 3077 struct map_lookup *map; 3078 u64 dev_extent_len = 0; 3079 int i, ret = 0; 3080 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3081 3082 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3083 if (IS_ERR(em)) { 3084 /* 3085 * This is a logic error, but we don't want to just rely on the 3086 * user having built with ASSERT enabled, so if ASSERT doesn't 3087 * do anything we still error out. 3088 */ 3089 ASSERT(0); 3090 return PTR_ERR(em); 3091 } 3092 map = em->map_lookup; 3093 3094 /* 3095 * First delete the device extent items from the devices btree. 3096 * We take the device_list_mutex to avoid racing with the finishing phase 3097 * of a device replace operation. See the comment below before acquiring 3098 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3099 * because that can result in a deadlock when deleting the device extent 3100 * items from the devices btree - COWing an extent buffer from the btree 3101 * may result in allocating a new metadata chunk, which would attempt to 3102 * lock again fs_info->chunk_mutex. 3103 */ 3104 mutex_lock(&fs_devices->device_list_mutex); 3105 for (i = 0; i < map->num_stripes; i++) { 3106 struct btrfs_device *device = map->stripes[i].dev; 3107 ret = btrfs_free_dev_extent(trans, device, 3108 map->stripes[i].physical, 3109 &dev_extent_len); 3110 if (ret) { 3111 mutex_unlock(&fs_devices->device_list_mutex); 3112 btrfs_abort_transaction(trans, ret); 3113 goto out; 3114 } 3115 3116 if (device->bytes_used > 0) { 3117 mutex_lock(&fs_info->chunk_mutex); 3118 btrfs_device_set_bytes_used(device, 3119 device->bytes_used - dev_extent_len); 3120 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3121 btrfs_clear_space_info_full(fs_info); 3122 mutex_unlock(&fs_info->chunk_mutex); 3123 } 3124 } 3125 mutex_unlock(&fs_devices->device_list_mutex); 3126 3127 /* 3128 * We acquire fs_info->chunk_mutex for 2 reasons: 3129 * 3130 * 1) Just like with the first phase of the chunk allocation, we must 3131 * reserve system space, do all chunk btree updates and deletions, and 3132 * update the system chunk array in the superblock while holding this 3133 * mutex. This is for similar reasons as explained on the comment at 3134 * the top of btrfs_chunk_alloc(); 3135 * 3136 * 2) Prevent races with the final phase of a device replace operation 3137 * that replaces the device object associated with the map's stripes, 3138 * because the device object's id can change at any time during that 3139 * final phase of the device replace operation 3140 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3141 * replaced device and then see it with an ID of 3142 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3143 * the device item, which does not exists on the chunk btree. 3144 * The finishing phase of device replace acquires both the 3145 * device_list_mutex and the chunk_mutex, in that order, so we are 3146 * safe by just acquiring the chunk_mutex. 3147 */ 3148 trans->removing_chunk = true; 3149 mutex_lock(&fs_info->chunk_mutex); 3150 3151 check_system_chunk(trans, map->type); 3152 3153 ret = remove_chunk_item(trans, map, chunk_offset); 3154 /* 3155 * Normally we should not get -ENOSPC since we reserved space before 3156 * through the call to check_system_chunk(). 3157 * 3158 * Despite our system space_info having enough free space, we may not 3159 * be able to allocate extents from its block groups, because all have 3160 * an incompatible profile, which will force us to allocate a new system 3161 * block group with the right profile, or right after we called 3162 * check_system_space() above, a scrub turned the only system block group 3163 * with enough free space into RO mode. 3164 * This is explained with more detail at do_chunk_alloc(). 3165 * 3166 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3167 */ 3168 if (ret == -ENOSPC) { 3169 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3170 struct btrfs_block_group *sys_bg; 3171 3172 sys_bg = btrfs_create_chunk(trans, sys_flags); 3173 if (IS_ERR(sys_bg)) { 3174 ret = PTR_ERR(sys_bg); 3175 btrfs_abort_transaction(trans, ret); 3176 goto out; 3177 } 3178 3179 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3180 if (ret) { 3181 btrfs_abort_transaction(trans, ret); 3182 goto out; 3183 } 3184 3185 ret = remove_chunk_item(trans, map, chunk_offset); 3186 if (ret) { 3187 btrfs_abort_transaction(trans, ret); 3188 goto out; 3189 } 3190 } else if (ret) { 3191 btrfs_abort_transaction(trans, ret); 3192 goto out; 3193 } 3194 3195 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3196 3197 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3198 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3199 if (ret) { 3200 btrfs_abort_transaction(trans, ret); 3201 goto out; 3202 } 3203 } 3204 3205 mutex_unlock(&fs_info->chunk_mutex); 3206 trans->removing_chunk = false; 3207 3208 /* 3209 * We are done with chunk btree updates and deletions, so release the 3210 * system space we previously reserved (with check_system_chunk()). 3211 */ 3212 btrfs_trans_release_chunk_metadata(trans); 3213 3214 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3215 if (ret) { 3216 btrfs_abort_transaction(trans, ret); 3217 goto out; 3218 } 3219 3220 out: 3221 if (trans->removing_chunk) { 3222 mutex_unlock(&fs_info->chunk_mutex); 3223 trans->removing_chunk = false; 3224 } 3225 /* once for us */ 3226 free_extent_map(em); 3227 return ret; 3228 } 3229 3230 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3231 { 3232 struct btrfs_root *root = fs_info->chunk_root; 3233 struct btrfs_trans_handle *trans; 3234 struct btrfs_block_group *block_group; 3235 u64 length; 3236 int ret; 3237 3238 /* 3239 * Prevent races with automatic removal of unused block groups. 3240 * After we relocate and before we remove the chunk with offset 3241 * chunk_offset, automatic removal of the block group can kick in, 3242 * resulting in a failure when calling btrfs_remove_chunk() below. 3243 * 3244 * Make sure to acquire this mutex before doing a tree search (dev 3245 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3246 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3247 * we release the path used to search the chunk/dev tree and before 3248 * the current task acquires this mutex and calls us. 3249 */ 3250 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3251 3252 /* step one, relocate all the extents inside this chunk */ 3253 btrfs_scrub_pause(fs_info); 3254 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3255 btrfs_scrub_continue(fs_info); 3256 if (ret) 3257 return ret; 3258 3259 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3260 if (!block_group) 3261 return -ENOENT; 3262 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3263 length = block_group->length; 3264 btrfs_put_block_group(block_group); 3265 3266 /* 3267 * On a zoned file system, discard the whole block group, this will 3268 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3269 * resetting the zone fails, don't treat it as a fatal problem from the 3270 * filesystem's point of view. 3271 */ 3272 if (btrfs_is_zoned(fs_info)) { 3273 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3274 if (ret) 3275 btrfs_info(fs_info, 3276 "failed to reset zone %llu after relocation", 3277 chunk_offset); 3278 } 3279 3280 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3281 chunk_offset); 3282 if (IS_ERR(trans)) { 3283 ret = PTR_ERR(trans); 3284 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3285 return ret; 3286 } 3287 3288 /* 3289 * step two, delete the device extents and the 3290 * chunk tree entries 3291 */ 3292 ret = btrfs_remove_chunk(trans, chunk_offset); 3293 btrfs_end_transaction(trans); 3294 return ret; 3295 } 3296 3297 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3298 { 3299 struct btrfs_root *chunk_root = fs_info->chunk_root; 3300 struct btrfs_path *path; 3301 struct extent_buffer *leaf; 3302 struct btrfs_chunk *chunk; 3303 struct btrfs_key key; 3304 struct btrfs_key found_key; 3305 u64 chunk_type; 3306 bool retried = false; 3307 int failed = 0; 3308 int ret; 3309 3310 path = btrfs_alloc_path(); 3311 if (!path) 3312 return -ENOMEM; 3313 3314 again: 3315 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3316 key.offset = (u64)-1; 3317 key.type = BTRFS_CHUNK_ITEM_KEY; 3318 3319 while (1) { 3320 mutex_lock(&fs_info->reclaim_bgs_lock); 3321 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3322 if (ret < 0) { 3323 mutex_unlock(&fs_info->reclaim_bgs_lock); 3324 goto error; 3325 } 3326 BUG_ON(ret == 0); /* Corruption */ 3327 3328 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3329 key.type); 3330 if (ret) 3331 mutex_unlock(&fs_info->reclaim_bgs_lock); 3332 if (ret < 0) 3333 goto error; 3334 if (ret > 0) 3335 break; 3336 3337 leaf = path->nodes[0]; 3338 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3339 3340 chunk = btrfs_item_ptr(leaf, path->slots[0], 3341 struct btrfs_chunk); 3342 chunk_type = btrfs_chunk_type(leaf, chunk); 3343 btrfs_release_path(path); 3344 3345 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3346 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3347 if (ret == -ENOSPC) 3348 failed++; 3349 else 3350 BUG_ON(ret); 3351 } 3352 mutex_unlock(&fs_info->reclaim_bgs_lock); 3353 3354 if (found_key.offset == 0) 3355 break; 3356 key.offset = found_key.offset - 1; 3357 } 3358 ret = 0; 3359 if (failed && !retried) { 3360 failed = 0; 3361 retried = true; 3362 goto again; 3363 } else if (WARN_ON(failed && retried)) { 3364 ret = -ENOSPC; 3365 } 3366 error: 3367 btrfs_free_path(path); 3368 return ret; 3369 } 3370 3371 /* 3372 * return 1 : allocate a data chunk successfully, 3373 * return <0: errors during allocating a data chunk, 3374 * return 0 : no need to allocate a data chunk. 3375 */ 3376 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3377 u64 chunk_offset) 3378 { 3379 struct btrfs_block_group *cache; 3380 u64 bytes_used; 3381 u64 chunk_type; 3382 3383 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3384 ASSERT(cache); 3385 chunk_type = cache->flags; 3386 btrfs_put_block_group(cache); 3387 3388 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3389 return 0; 3390 3391 spin_lock(&fs_info->data_sinfo->lock); 3392 bytes_used = fs_info->data_sinfo->bytes_used; 3393 spin_unlock(&fs_info->data_sinfo->lock); 3394 3395 if (!bytes_used) { 3396 struct btrfs_trans_handle *trans; 3397 int ret; 3398 3399 trans = btrfs_join_transaction(fs_info->tree_root); 3400 if (IS_ERR(trans)) 3401 return PTR_ERR(trans); 3402 3403 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3404 btrfs_end_transaction(trans); 3405 if (ret < 0) 3406 return ret; 3407 return 1; 3408 } 3409 3410 return 0; 3411 } 3412 3413 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3414 struct btrfs_balance_control *bctl) 3415 { 3416 struct btrfs_root *root = fs_info->tree_root; 3417 struct btrfs_trans_handle *trans; 3418 struct btrfs_balance_item *item; 3419 struct btrfs_disk_balance_args disk_bargs; 3420 struct btrfs_path *path; 3421 struct extent_buffer *leaf; 3422 struct btrfs_key key; 3423 int ret, err; 3424 3425 path = btrfs_alloc_path(); 3426 if (!path) 3427 return -ENOMEM; 3428 3429 trans = btrfs_start_transaction(root, 0); 3430 if (IS_ERR(trans)) { 3431 btrfs_free_path(path); 3432 return PTR_ERR(trans); 3433 } 3434 3435 key.objectid = BTRFS_BALANCE_OBJECTID; 3436 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3437 key.offset = 0; 3438 3439 ret = btrfs_insert_empty_item(trans, root, path, &key, 3440 sizeof(*item)); 3441 if (ret) 3442 goto out; 3443 3444 leaf = path->nodes[0]; 3445 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3446 3447 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3448 3449 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3450 btrfs_set_balance_data(leaf, item, &disk_bargs); 3451 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3452 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3453 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3454 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3455 3456 btrfs_set_balance_flags(leaf, item, bctl->flags); 3457 3458 btrfs_mark_buffer_dirty(leaf); 3459 out: 3460 btrfs_free_path(path); 3461 err = btrfs_commit_transaction(trans); 3462 if (err && !ret) 3463 ret = err; 3464 return ret; 3465 } 3466 3467 static int del_balance_item(struct btrfs_fs_info *fs_info) 3468 { 3469 struct btrfs_root *root = fs_info->tree_root; 3470 struct btrfs_trans_handle *trans; 3471 struct btrfs_path *path; 3472 struct btrfs_key key; 3473 int ret, err; 3474 3475 path = btrfs_alloc_path(); 3476 if (!path) 3477 return -ENOMEM; 3478 3479 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3480 if (IS_ERR(trans)) { 3481 btrfs_free_path(path); 3482 return PTR_ERR(trans); 3483 } 3484 3485 key.objectid = BTRFS_BALANCE_OBJECTID; 3486 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3487 key.offset = 0; 3488 3489 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3490 if (ret < 0) 3491 goto out; 3492 if (ret > 0) { 3493 ret = -ENOENT; 3494 goto out; 3495 } 3496 3497 ret = btrfs_del_item(trans, root, path); 3498 out: 3499 btrfs_free_path(path); 3500 err = btrfs_commit_transaction(trans); 3501 if (err && !ret) 3502 ret = err; 3503 return ret; 3504 } 3505 3506 /* 3507 * This is a heuristic used to reduce the number of chunks balanced on 3508 * resume after balance was interrupted. 3509 */ 3510 static void update_balance_args(struct btrfs_balance_control *bctl) 3511 { 3512 /* 3513 * Turn on soft mode for chunk types that were being converted. 3514 */ 3515 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3516 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3517 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3518 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3519 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3520 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3521 3522 /* 3523 * Turn on usage filter if is not already used. The idea is 3524 * that chunks that we have already balanced should be 3525 * reasonably full. Don't do it for chunks that are being 3526 * converted - that will keep us from relocating unconverted 3527 * (albeit full) chunks. 3528 */ 3529 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3530 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3531 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3532 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3533 bctl->data.usage = 90; 3534 } 3535 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3536 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3537 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3538 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3539 bctl->sys.usage = 90; 3540 } 3541 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3542 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3543 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3544 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3545 bctl->meta.usage = 90; 3546 } 3547 } 3548 3549 /* 3550 * Clear the balance status in fs_info and delete the balance item from disk. 3551 */ 3552 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3553 { 3554 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3555 int ret; 3556 3557 BUG_ON(!fs_info->balance_ctl); 3558 3559 spin_lock(&fs_info->balance_lock); 3560 fs_info->balance_ctl = NULL; 3561 spin_unlock(&fs_info->balance_lock); 3562 3563 kfree(bctl); 3564 ret = del_balance_item(fs_info); 3565 if (ret) 3566 btrfs_handle_fs_error(fs_info, ret, NULL); 3567 } 3568 3569 /* 3570 * Balance filters. Return 1 if chunk should be filtered out 3571 * (should not be balanced). 3572 */ 3573 static int chunk_profiles_filter(u64 chunk_type, 3574 struct btrfs_balance_args *bargs) 3575 { 3576 chunk_type = chunk_to_extended(chunk_type) & 3577 BTRFS_EXTENDED_PROFILE_MASK; 3578 3579 if (bargs->profiles & chunk_type) 3580 return 0; 3581 3582 return 1; 3583 } 3584 3585 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3586 struct btrfs_balance_args *bargs) 3587 { 3588 struct btrfs_block_group *cache; 3589 u64 chunk_used; 3590 u64 user_thresh_min; 3591 u64 user_thresh_max; 3592 int ret = 1; 3593 3594 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3595 chunk_used = cache->used; 3596 3597 if (bargs->usage_min == 0) 3598 user_thresh_min = 0; 3599 else 3600 user_thresh_min = div_factor_fine(cache->length, 3601 bargs->usage_min); 3602 3603 if (bargs->usage_max == 0) 3604 user_thresh_max = 1; 3605 else if (bargs->usage_max > 100) 3606 user_thresh_max = cache->length; 3607 else 3608 user_thresh_max = div_factor_fine(cache->length, 3609 bargs->usage_max); 3610 3611 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3612 ret = 0; 3613 3614 btrfs_put_block_group(cache); 3615 return ret; 3616 } 3617 3618 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3619 u64 chunk_offset, struct btrfs_balance_args *bargs) 3620 { 3621 struct btrfs_block_group *cache; 3622 u64 chunk_used, user_thresh; 3623 int ret = 1; 3624 3625 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3626 chunk_used = cache->used; 3627 3628 if (bargs->usage_min == 0) 3629 user_thresh = 1; 3630 else if (bargs->usage > 100) 3631 user_thresh = cache->length; 3632 else 3633 user_thresh = div_factor_fine(cache->length, bargs->usage); 3634 3635 if (chunk_used < user_thresh) 3636 ret = 0; 3637 3638 btrfs_put_block_group(cache); 3639 return ret; 3640 } 3641 3642 static int chunk_devid_filter(struct extent_buffer *leaf, 3643 struct btrfs_chunk *chunk, 3644 struct btrfs_balance_args *bargs) 3645 { 3646 struct btrfs_stripe *stripe; 3647 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3648 int i; 3649 3650 for (i = 0; i < num_stripes; i++) { 3651 stripe = btrfs_stripe_nr(chunk, i); 3652 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3653 return 0; 3654 } 3655 3656 return 1; 3657 } 3658 3659 static u64 calc_data_stripes(u64 type, int num_stripes) 3660 { 3661 const int index = btrfs_bg_flags_to_raid_index(type); 3662 const int ncopies = btrfs_raid_array[index].ncopies; 3663 const int nparity = btrfs_raid_array[index].nparity; 3664 3665 return (num_stripes - nparity) / ncopies; 3666 } 3667 3668 /* [pstart, pend) */ 3669 static int chunk_drange_filter(struct extent_buffer *leaf, 3670 struct btrfs_chunk *chunk, 3671 struct btrfs_balance_args *bargs) 3672 { 3673 struct btrfs_stripe *stripe; 3674 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3675 u64 stripe_offset; 3676 u64 stripe_length; 3677 u64 type; 3678 int factor; 3679 int i; 3680 3681 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3682 return 0; 3683 3684 type = btrfs_chunk_type(leaf, chunk); 3685 factor = calc_data_stripes(type, num_stripes); 3686 3687 for (i = 0; i < num_stripes; i++) { 3688 stripe = btrfs_stripe_nr(chunk, i); 3689 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3690 continue; 3691 3692 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3693 stripe_length = btrfs_chunk_length(leaf, chunk); 3694 stripe_length = div_u64(stripe_length, factor); 3695 3696 if (stripe_offset < bargs->pend && 3697 stripe_offset + stripe_length > bargs->pstart) 3698 return 0; 3699 } 3700 3701 return 1; 3702 } 3703 3704 /* [vstart, vend) */ 3705 static int chunk_vrange_filter(struct extent_buffer *leaf, 3706 struct btrfs_chunk *chunk, 3707 u64 chunk_offset, 3708 struct btrfs_balance_args *bargs) 3709 { 3710 if (chunk_offset < bargs->vend && 3711 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3712 /* at least part of the chunk is inside this vrange */ 3713 return 0; 3714 3715 return 1; 3716 } 3717 3718 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3719 struct btrfs_chunk *chunk, 3720 struct btrfs_balance_args *bargs) 3721 { 3722 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3723 3724 if (bargs->stripes_min <= num_stripes 3725 && num_stripes <= bargs->stripes_max) 3726 return 0; 3727 3728 return 1; 3729 } 3730 3731 static int chunk_soft_convert_filter(u64 chunk_type, 3732 struct btrfs_balance_args *bargs) 3733 { 3734 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3735 return 0; 3736 3737 chunk_type = chunk_to_extended(chunk_type) & 3738 BTRFS_EXTENDED_PROFILE_MASK; 3739 3740 if (bargs->target == chunk_type) 3741 return 1; 3742 3743 return 0; 3744 } 3745 3746 static int should_balance_chunk(struct extent_buffer *leaf, 3747 struct btrfs_chunk *chunk, u64 chunk_offset) 3748 { 3749 struct btrfs_fs_info *fs_info = leaf->fs_info; 3750 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3751 struct btrfs_balance_args *bargs = NULL; 3752 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3753 3754 /* type filter */ 3755 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3756 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3757 return 0; 3758 } 3759 3760 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3761 bargs = &bctl->data; 3762 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3763 bargs = &bctl->sys; 3764 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3765 bargs = &bctl->meta; 3766 3767 /* profiles filter */ 3768 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3769 chunk_profiles_filter(chunk_type, bargs)) { 3770 return 0; 3771 } 3772 3773 /* usage filter */ 3774 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3775 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3776 return 0; 3777 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3778 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3779 return 0; 3780 } 3781 3782 /* devid filter */ 3783 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3784 chunk_devid_filter(leaf, chunk, bargs)) { 3785 return 0; 3786 } 3787 3788 /* drange filter, makes sense only with devid filter */ 3789 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3790 chunk_drange_filter(leaf, chunk, bargs)) { 3791 return 0; 3792 } 3793 3794 /* vrange filter */ 3795 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3796 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3797 return 0; 3798 } 3799 3800 /* stripes filter */ 3801 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3802 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3803 return 0; 3804 } 3805 3806 /* soft profile changing mode */ 3807 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3808 chunk_soft_convert_filter(chunk_type, bargs)) { 3809 return 0; 3810 } 3811 3812 /* 3813 * limited by count, must be the last filter 3814 */ 3815 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3816 if (bargs->limit == 0) 3817 return 0; 3818 else 3819 bargs->limit--; 3820 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3821 /* 3822 * Same logic as the 'limit' filter; the minimum cannot be 3823 * determined here because we do not have the global information 3824 * about the count of all chunks that satisfy the filters. 3825 */ 3826 if (bargs->limit_max == 0) 3827 return 0; 3828 else 3829 bargs->limit_max--; 3830 } 3831 3832 return 1; 3833 } 3834 3835 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3836 { 3837 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3838 struct btrfs_root *chunk_root = fs_info->chunk_root; 3839 u64 chunk_type; 3840 struct btrfs_chunk *chunk; 3841 struct btrfs_path *path = NULL; 3842 struct btrfs_key key; 3843 struct btrfs_key found_key; 3844 struct extent_buffer *leaf; 3845 int slot; 3846 int ret; 3847 int enospc_errors = 0; 3848 bool counting = true; 3849 /* The single value limit and min/max limits use the same bytes in the */ 3850 u64 limit_data = bctl->data.limit; 3851 u64 limit_meta = bctl->meta.limit; 3852 u64 limit_sys = bctl->sys.limit; 3853 u32 count_data = 0; 3854 u32 count_meta = 0; 3855 u32 count_sys = 0; 3856 int chunk_reserved = 0; 3857 3858 path = btrfs_alloc_path(); 3859 if (!path) { 3860 ret = -ENOMEM; 3861 goto error; 3862 } 3863 3864 /* zero out stat counters */ 3865 spin_lock(&fs_info->balance_lock); 3866 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3867 spin_unlock(&fs_info->balance_lock); 3868 again: 3869 if (!counting) { 3870 /* 3871 * The single value limit and min/max limits use the same bytes 3872 * in the 3873 */ 3874 bctl->data.limit = limit_data; 3875 bctl->meta.limit = limit_meta; 3876 bctl->sys.limit = limit_sys; 3877 } 3878 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3879 key.offset = (u64)-1; 3880 key.type = BTRFS_CHUNK_ITEM_KEY; 3881 3882 while (1) { 3883 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3884 atomic_read(&fs_info->balance_cancel_req)) { 3885 ret = -ECANCELED; 3886 goto error; 3887 } 3888 3889 mutex_lock(&fs_info->reclaim_bgs_lock); 3890 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3891 if (ret < 0) { 3892 mutex_unlock(&fs_info->reclaim_bgs_lock); 3893 goto error; 3894 } 3895 3896 /* 3897 * this shouldn't happen, it means the last relocate 3898 * failed 3899 */ 3900 if (ret == 0) 3901 BUG(); /* FIXME break ? */ 3902 3903 ret = btrfs_previous_item(chunk_root, path, 0, 3904 BTRFS_CHUNK_ITEM_KEY); 3905 if (ret) { 3906 mutex_unlock(&fs_info->reclaim_bgs_lock); 3907 ret = 0; 3908 break; 3909 } 3910 3911 leaf = path->nodes[0]; 3912 slot = path->slots[0]; 3913 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3914 3915 if (found_key.objectid != key.objectid) { 3916 mutex_unlock(&fs_info->reclaim_bgs_lock); 3917 break; 3918 } 3919 3920 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3921 chunk_type = btrfs_chunk_type(leaf, chunk); 3922 3923 if (!counting) { 3924 spin_lock(&fs_info->balance_lock); 3925 bctl->stat.considered++; 3926 spin_unlock(&fs_info->balance_lock); 3927 } 3928 3929 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3930 3931 btrfs_release_path(path); 3932 if (!ret) { 3933 mutex_unlock(&fs_info->reclaim_bgs_lock); 3934 goto loop; 3935 } 3936 3937 if (counting) { 3938 mutex_unlock(&fs_info->reclaim_bgs_lock); 3939 spin_lock(&fs_info->balance_lock); 3940 bctl->stat.expected++; 3941 spin_unlock(&fs_info->balance_lock); 3942 3943 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3944 count_data++; 3945 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3946 count_sys++; 3947 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3948 count_meta++; 3949 3950 goto loop; 3951 } 3952 3953 /* 3954 * Apply limit_min filter, no need to check if the LIMITS 3955 * filter is used, limit_min is 0 by default 3956 */ 3957 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3958 count_data < bctl->data.limit_min) 3959 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3960 count_meta < bctl->meta.limit_min) 3961 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3962 count_sys < bctl->sys.limit_min)) { 3963 mutex_unlock(&fs_info->reclaim_bgs_lock); 3964 goto loop; 3965 } 3966 3967 if (!chunk_reserved) { 3968 /* 3969 * We may be relocating the only data chunk we have, 3970 * which could potentially end up with losing data's 3971 * raid profile, so lets allocate an empty one in 3972 * advance. 3973 */ 3974 ret = btrfs_may_alloc_data_chunk(fs_info, 3975 found_key.offset); 3976 if (ret < 0) { 3977 mutex_unlock(&fs_info->reclaim_bgs_lock); 3978 goto error; 3979 } else if (ret == 1) { 3980 chunk_reserved = 1; 3981 } 3982 } 3983 3984 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3985 mutex_unlock(&fs_info->reclaim_bgs_lock); 3986 if (ret == -ENOSPC) { 3987 enospc_errors++; 3988 } else if (ret == -ETXTBSY) { 3989 btrfs_info(fs_info, 3990 "skipping relocation of block group %llu due to active swapfile", 3991 found_key.offset); 3992 ret = 0; 3993 } else if (ret) { 3994 goto error; 3995 } else { 3996 spin_lock(&fs_info->balance_lock); 3997 bctl->stat.completed++; 3998 spin_unlock(&fs_info->balance_lock); 3999 } 4000 loop: 4001 if (found_key.offset == 0) 4002 break; 4003 key.offset = found_key.offset - 1; 4004 } 4005 4006 if (counting) { 4007 btrfs_release_path(path); 4008 counting = false; 4009 goto again; 4010 } 4011 error: 4012 btrfs_free_path(path); 4013 if (enospc_errors) { 4014 btrfs_info(fs_info, "%d enospc errors during balance", 4015 enospc_errors); 4016 if (!ret) 4017 ret = -ENOSPC; 4018 } 4019 4020 return ret; 4021 } 4022 4023 /** 4024 * alloc_profile_is_valid - see if a given profile is valid and reduced 4025 * @flags: profile to validate 4026 * @extended: if true @flags is treated as an extended profile 4027 */ 4028 static int alloc_profile_is_valid(u64 flags, int extended) 4029 { 4030 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4031 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4032 4033 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4034 4035 /* 1) check that all other bits are zeroed */ 4036 if (flags & ~mask) 4037 return 0; 4038 4039 /* 2) see if profile is reduced */ 4040 if (flags == 0) 4041 return !extended; /* "0" is valid for usual profiles */ 4042 4043 return has_single_bit_set(flags); 4044 } 4045 4046 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4047 { 4048 /* cancel requested || normal exit path */ 4049 return atomic_read(&fs_info->balance_cancel_req) || 4050 (atomic_read(&fs_info->balance_pause_req) == 0 && 4051 atomic_read(&fs_info->balance_cancel_req) == 0); 4052 } 4053 4054 /* 4055 * Validate target profile against allowed profiles and return true if it's OK. 4056 * Otherwise print the error message and return false. 4057 */ 4058 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4059 const struct btrfs_balance_args *bargs, 4060 u64 allowed, const char *type) 4061 { 4062 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4063 return true; 4064 4065 if (fs_info->sectorsize < PAGE_SIZE && 4066 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4067 btrfs_err(fs_info, 4068 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4069 fs_info->sectorsize, PAGE_SIZE); 4070 return false; 4071 } 4072 /* Profile is valid and does not have bits outside of the allowed set */ 4073 if (alloc_profile_is_valid(bargs->target, 1) && 4074 (bargs->target & ~allowed) == 0) 4075 return true; 4076 4077 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4078 type, btrfs_bg_type_to_raid_name(bargs->target)); 4079 return false; 4080 } 4081 4082 /* 4083 * Fill @buf with textual description of balance filter flags @bargs, up to 4084 * @size_buf including the terminating null. The output may be trimmed if it 4085 * does not fit into the provided buffer. 4086 */ 4087 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4088 u32 size_buf) 4089 { 4090 int ret; 4091 u32 size_bp = size_buf; 4092 char *bp = buf; 4093 u64 flags = bargs->flags; 4094 char tmp_buf[128] = {'\0'}; 4095 4096 if (!flags) 4097 return; 4098 4099 #define CHECK_APPEND_NOARG(a) \ 4100 do { \ 4101 ret = snprintf(bp, size_bp, (a)); \ 4102 if (ret < 0 || ret >= size_bp) \ 4103 goto out_overflow; \ 4104 size_bp -= ret; \ 4105 bp += ret; \ 4106 } while (0) 4107 4108 #define CHECK_APPEND_1ARG(a, v1) \ 4109 do { \ 4110 ret = snprintf(bp, size_bp, (a), (v1)); \ 4111 if (ret < 0 || ret >= size_bp) \ 4112 goto out_overflow; \ 4113 size_bp -= ret; \ 4114 bp += ret; \ 4115 } while (0) 4116 4117 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4118 do { \ 4119 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4120 if (ret < 0 || ret >= size_bp) \ 4121 goto out_overflow; \ 4122 size_bp -= ret; \ 4123 bp += ret; \ 4124 } while (0) 4125 4126 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4127 CHECK_APPEND_1ARG("convert=%s,", 4128 btrfs_bg_type_to_raid_name(bargs->target)); 4129 4130 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4131 CHECK_APPEND_NOARG("soft,"); 4132 4133 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4134 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4135 sizeof(tmp_buf)); 4136 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4137 } 4138 4139 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4140 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4141 4142 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4143 CHECK_APPEND_2ARG("usage=%u..%u,", 4144 bargs->usage_min, bargs->usage_max); 4145 4146 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4147 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4148 4149 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4150 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4151 bargs->pstart, bargs->pend); 4152 4153 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4154 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4155 bargs->vstart, bargs->vend); 4156 4157 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4158 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4159 4160 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4161 CHECK_APPEND_2ARG("limit=%u..%u,", 4162 bargs->limit_min, bargs->limit_max); 4163 4164 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4165 CHECK_APPEND_2ARG("stripes=%u..%u,", 4166 bargs->stripes_min, bargs->stripes_max); 4167 4168 #undef CHECK_APPEND_2ARG 4169 #undef CHECK_APPEND_1ARG 4170 #undef CHECK_APPEND_NOARG 4171 4172 out_overflow: 4173 4174 if (size_bp < size_buf) 4175 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4176 else 4177 buf[0] = '\0'; 4178 } 4179 4180 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4181 { 4182 u32 size_buf = 1024; 4183 char tmp_buf[192] = {'\0'}; 4184 char *buf; 4185 char *bp; 4186 u32 size_bp = size_buf; 4187 int ret; 4188 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4189 4190 buf = kzalloc(size_buf, GFP_KERNEL); 4191 if (!buf) 4192 return; 4193 4194 bp = buf; 4195 4196 #define CHECK_APPEND_1ARG(a, v1) \ 4197 do { \ 4198 ret = snprintf(bp, size_bp, (a), (v1)); \ 4199 if (ret < 0 || ret >= size_bp) \ 4200 goto out_overflow; \ 4201 size_bp -= ret; \ 4202 bp += ret; \ 4203 } while (0) 4204 4205 if (bctl->flags & BTRFS_BALANCE_FORCE) 4206 CHECK_APPEND_1ARG("%s", "-f "); 4207 4208 if (bctl->flags & BTRFS_BALANCE_DATA) { 4209 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4210 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4211 } 4212 4213 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4214 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4215 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4216 } 4217 4218 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4219 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4220 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4221 } 4222 4223 #undef CHECK_APPEND_1ARG 4224 4225 out_overflow: 4226 4227 if (size_bp < size_buf) 4228 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4229 btrfs_info(fs_info, "balance: %s %s", 4230 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4231 "resume" : "start", buf); 4232 4233 kfree(buf); 4234 } 4235 4236 /* 4237 * Should be called with balance mutexe held 4238 */ 4239 int btrfs_balance(struct btrfs_fs_info *fs_info, 4240 struct btrfs_balance_control *bctl, 4241 struct btrfs_ioctl_balance_args *bargs) 4242 { 4243 u64 meta_target, data_target; 4244 u64 allowed; 4245 int mixed = 0; 4246 int ret; 4247 u64 num_devices; 4248 unsigned seq; 4249 bool reducing_redundancy; 4250 int i; 4251 4252 if (btrfs_fs_closing(fs_info) || 4253 atomic_read(&fs_info->balance_pause_req) || 4254 btrfs_should_cancel_balance(fs_info)) { 4255 ret = -EINVAL; 4256 goto out; 4257 } 4258 4259 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4260 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4261 mixed = 1; 4262 4263 /* 4264 * In case of mixed groups both data and meta should be picked, 4265 * and identical options should be given for both of them. 4266 */ 4267 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4268 if (mixed && (bctl->flags & allowed)) { 4269 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4270 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4271 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4272 btrfs_err(fs_info, 4273 "balance: mixed groups data and metadata options must be the same"); 4274 ret = -EINVAL; 4275 goto out; 4276 } 4277 } 4278 4279 /* 4280 * rw_devices will not change at the moment, device add/delete/replace 4281 * are exclusive 4282 */ 4283 num_devices = fs_info->fs_devices->rw_devices; 4284 4285 /* 4286 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4287 * special bit for it, to make it easier to distinguish. Thus we need 4288 * to set it manually, or balance would refuse the profile. 4289 */ 4290 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4291 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4292 if (num_devices >= btrfs_raid_array[i].devs_min) 4293 allowed |= btrfs_raid_array[i].bg_flag; 4294 4295 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4296 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4297 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4298 ret = -EINVAL; 4299 goto out; 4300 } 4301 4302 /* 4303 * Allow to reduce metadata or system integrity only if force set for 4304 * profiles with redundancy (copies, parity) 4305 */ 4306 allowed = 0; 4307 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4308 if (btrfs_raid_array[i].ncopies >= 2 || 4309 btrfs_raid_array[i].tolerated_failures >= 1) 4310 allowed |= btrfs_raid_array[i].bg_flag; 4311 } 4312 do { 4313 seq = read_seqbegin(&fs_info->profiles_lock); 4314 4315 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4316 (fs_info->avail_system_alloc_bits & allowed) && 4317 !(bctl->sys.target & allowed)) || 4318 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4319 (fs_info->avail_metadata_alloc_bits & allowed) && 4320 !(bctl->meta.target & allowed))) 4321 reducing_redundancy = true; 4322 else 4323 reducing_redundancy = false; 4324 4325 /* if we're not converting, the target field is uninitialized */ 4326 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4327 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4328 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4329 bctl->data.target : fs_info->avail_data_alloc_bits; 4330 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4331 4332 if (reducing_redundancy) { 4333 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4334 btrfs_info(fs_info, 4335 "balance: force reducing metadata redundancy"); 4336 } else { 4337 btrfs_err(fs_info, 4338 "balance: reduces metadata redundancy, use --force if you want this"); 4339 ret = -EINVAL; 4340 goto out; 4341 } 4342 } 4343 4344 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4345 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4346 btrfs_warn(fs_info, 4347 "balance: metadata profile %s has lower redundancy than data profile %s", 4348 btrfs_bg_type_to_raid_name(meta_target), 4349 btrfs_bg_type_to_raid_name(data_target)); 4350 } 4351 4352 ret = insert_balance_item(fs_info, bctl); 4353 if (ret && ret != -EEXIST) 4354 goto out; 4355 4356 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4357 BUG_ON(ret == -EEXIST); 4358 BUG_ON(fs_info->balance_ctl); 4359 spin_lock(&fs_info->balance_lock); 4360 fs_info->balance_ctl = bctl; 4361 spin_unlock(&fs_info->balance_lock); 4362 } else { 4363 BUG_ON(ret != -EEXIST); 4364 spin_lock(&fs_info->balance_lock); 4365 update_balance_args(bctl); 4366 spin_unlock(&fs_info->balance_lock); 4367 } 4368 4369 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4370 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4371 describe_balance_start_or_resume(fs_info); 4372 mutex_unlock(&fs_info->balance_mutex); 4373 4374 ret = __btrfs_balance(fs_info); 4375 4376 mutex_lock(&fs_info->balance_mutex); 4377 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4378 btrfs_info(fs_info, "balance: paused"); 4379 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4380 } 4381 /* 4382 * Balance can be canceled by: 4383 * 4384 * - Regular cancel request 4385 * Then ret == -ECANCELED and balance_cancel_req > 0 4386 * 4387 * - Fatal signal to "btrfs" process 4388 * Either the signal caught by wait_reserve_ticket() and callers 4389 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4390 * got -ECANCELED. 4391 * Either way, in this case balance_cancel_req = 0, and 4392 * ret == -EINTR or ret == -ECANCELED. 4393 * 4394 * So here we only check the return value to catch canceled balance. 4395 */ 4396 else if (ret == -ECANCELED || ret == -EINTR) 4397 btrfs_info(fs_info, "balance: canceled"); 4398 else 4399 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4400 4401 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4402 4403 if (bargs) { 4404 memset(bargs, 0, sizeof(*bargs)); 4405 btrfs_update_ioctl_balance_args(fs_info, bargs); 4406 } 4407 4408 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4409 balance_need_close(fs_info)) { 4410 reset_balance_state(fs_info); 4411 btrfs_exclop_finish(fs_info); 4412 } 4413 4414 wake_up(&fs_info->balance_wait_q); 4415 4416 return ret; 4417 out: 4418 if (bctl->flags & BTRFS_BALANCE_RESUME) 4419 reset_balance_state(fs_info); 4420 else 4421 kfree(bctl); 4422 btrfs_exclop_finish(fs_info); 4423 4424 return ret; 4425 } 4426 4427 static int balance_kthread(void *data) 4428 { 4429 struct btrfs_fs_info *fs_info = data; 4430 int ret = 0; 4431 4432 mutex_lock(&fs_info->balance_mutex); 4433 if (fs_info->balance_ctl) 4434 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4435 mutex_unlock(&fs_info->balance_mutex); 4436 4437 return ret; 4438 } 4439 4440 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4441 { 4442 struct task_struct *tsk; 4443 4444 mutex_lock(&fs_info->balance_mutex); 4445 if (!fs_info->balance_ctl) { 4446 mutex_unlock(&fs_info->balance_mutex); 4447 return 0; 4448 } 4449 mutex_unlock(&fs_info->balance_mutex); 4450 4451 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4452 btrfs_info(fs_info, "balance: resume skipped"); 4453 return 0; 4454 } 4455 4456 spin_lock(&fs_info->super_lock); 4457 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4458 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4459 spin_unlock(&fs_info->super_lock); 4460 /* 4461 * A ro->rw remount sequence should continue with the paused balance 4462 * regardless of who pauses it, system or the user as of now, so set 4463 * the resume flag. 4464 */ 4465 spin_lock(&fs_info->balance_lock); 4466 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4467 spin_unlock(&fs_info->balance_lock); 4468 4469 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4470 return PTR_ERR_OR_ZERO(tsk); 4471 } 4472 4473 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4474 { 4475 struct btrfs_balance_control *bctl; 4476 struct btrfs_balance_item *item; 4477 struct btrfs_disk_balance_args disk_bargs; 4478 struct btrfs_path *path; 4479 struct extent_buffer *leaf; 4480 struct btrfs_key key; 4481 int ret; 4482 4483 path = btrfs_alloc_path(); 4484 if (!path) 4485 return -ENOMEM; 4486 4487 key.objectid = BTRFS_BALANCE_OBJECTID; 4488 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4489 key.offset = 0; 4490 4491 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4492 if (ret < 0) 4493 goto out; 4494 if (ret > 0) { /* ret = -ENOENT; */ 4495 ret = 0; 4496 goto out; 4497 } 4498 4499 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4500 if (!bctl) { 4501 ret = -ENOMEM; 4502 goto out; 4503 } 4504 4505 leaf = path->nodes[0]; 4506 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4507 4508 bctl->flags = btrfs_balance_flags(leaf, item); 4509 bctl->flags |= BTRFS_BALANCE_RESUME; 4510 4511 btrfs_balance_data(leaf, item, &disk_bargs); 4512 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4513 btrfs_balance_meta(leaf, item, &disk_bargs); 4514 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4515 btrfs_balance_sys(leaf, item, &disk_bargs); 4516 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4517 4518 /* 4519 * This should never happen, as the paused balance state is recovered 4520 * during mount without any chance of other exclusive ops to collide. 4521 * 4522 * This gives the exclusive op status to balance and keeps in paused 4523 * state until user intervention (cancel or umount). If the ownership 4524 * cannot be assigned, show a message but do not fail. The balance 4525 * is in a paused state and must have fs_info::balance_ctl properly 4526 * set up. 4527 */ 4528 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4529 btrfs_warn(fs_info, 4530 "balance: cannot set exclusive op status, resume manually"); 4531 4532 btrfs_release_path(path); 4533 4534 mutex_lock(&fs_info->balance_mutex); 4535 BUG_ON(fs_info->balance_ctl); 4536 spin_lock(&fs_info->balance_lock); 4537 fs_info->balance_ctl = bctl; 4538 spin_unlock(&fs_info->balance_lock); 4539 mutex_unlock(&fs_info->balance_mutex); 4540 out: 4541 btrfs_free_path(path); 4542 return ret; 4543 } 4544 4545 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4546 { 4547 int ret = 0; 4548 4549 mutex_lock(&fs_info->balance_mutex); 4550 if (!fs_info->balance_ctl) { 4551 mutex_unlock(&fs_info->balance_mutex); 4552 return -ENOTCONN; 4553 } 4554 4555 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4556 atomic_inc(&fs_info->balance_pause_req); 4557 mutex_unlock(&fs_info->balance_mutex); 4558 4559 wait_event(fs_info->balance_wait_q, 4560 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4561 4562 mutex_lock(&fs_info->balance_mutex); 4563 /* we are good with balance_ctl ripped off from under us */ 4564 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4565 atomic_dec(&fs_info->balance_pause_req); 4566 } else { 4567 ret = -ENOTCONN; 4568 } 4569 4570 mutex_unlock(&fs_info->balance_mutex); 4571 return ret; 4572 } 4573 4574 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4575 { 4576 mutex_lock(&fs_info->balance_mutex); 4577 if (!fs_info->balance_ctl) { 4578 mutex_unlock(&fs_info->balance_mutex); 4579 return -ENOTCONN; 4580 } 4581 4582 /* 4583 * A paused balance with the item stored on disk can be resumed at 4584 * mount time if the mount is read-write. Otherwise it's still paused 4585 * and we must not allow cancelling as it deletes the item. 4586 */ 4587 if (sb_rdonly(fs_info->sb)) { 4588 mutex_unlock(&fs_info->balance_mutex); 4589 return -EROFS; 4590 } 4591 4592 atomic_inc(&fs_info->balance_cancel_req); 4593 /* 4594 * if we are running just wait and return, balance item is 4595 * deleted in btrfs_balance in this case 4596 */ 4597 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4598 mutex_unlock(&fs_info->balance_mutex); 4599 wait_event(fs_info->balance_wait_q, 4600 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4601 mutex_lock(&fs_info->balance_mutex); 4602 } else { 4603 mutex_unlock(&fs_info->balance_mutex); 4604 /* 4605 * Lock released to allow other waiters to continue, we'll 4606 * reexamine the status again. 4607 */ 4608 mutex_lock(&fs_info->balance_mutex); 4609 4610 if (fs_info->balance_ctl) { 4611 reset_balance_state(fs_info); 4612 btrfs_exclop_finish(fs_info); 4613 btrfs_info(fs_info, "balance: canceled"); 4614 } 4615 } 4616 4617 BUG_ON(fs_info->balance_ctl || 4618 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4619 atomic_dec(&fs_info->balance_cancel_req); 4620 mutex_unlock(&fs_info->balance_mutex); 4621 return 0; 4622 } 4623 4624 int btrfs_uuid_scan_kthread(void *data) 4625 { 4626 struct btrfs_fs_info *fs_info = data; 4627 struct btrfs_root *root = fs_info->tree_root; 4628 struct btrfs_key key; 4629 struct btrfs_path *path = NULL; 4630 int ret = 0; 4631 struct extent_buffer *eb; 4632 int slot; 4633 struct btrfs_root_item root_item; 4634 u32 item_size; 4635 struct btrfs_trans_handle *trans = NULL; 4636 bool closing = false; 4637 4638 path = btrfs_alloc_path(); 4639 if (!path) { 4640 ret = -ENOMEM; 4641 goto out; 4642 } 4643 4644 key.objectid = 0; 4645 key.type = BTRFS_ROOT_ITEM_KEY; 4646 key.offset = 0; 4647 4648 while (1) { 4649 if (btrfs_fs_closing(fs_info)) { 4650 closing = true; 4651 break; 4652 } 4653 ret = btrfs_search_forward(root, &key, path, 4654 BTRFS_OLDEST_GENERATION); 4655 if (ret) { 4656 if (ret > 0) 4657 ret = 0; 4658 break; 4659 } 4660 4661 if (key.type != BTRFS_ROOT_ITEM_KEY || 4662 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4663 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4664 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4665 goto skip; 4666 4667 eb = path->nodes[0]; 4668 slot = path->slots[0]; 4669 item_size = btrfs_item_size(eb, slot); 4670 if (item_size < sizeof(root_item)) 4671 goto skip; 4672 4673 read_extent_buffer(eb, &root_item, 4674 btrfs_item_ptr_offset(eb, slot), 4675 (int)sizeof(root_item)); 4676 if (btrfs_root_refs(&root_item) == 0) 4677 goto skip; 4678 4679 if (!btrfs_is_empty_uuid(root_item.uuid) || 4680 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4681 if (trans) 4682 goto update_tree; 4683 4684 btrfs_release_path(path); 4685 /* 4686 * 1 - subvol uuid item 4687 * 1 - received_subvol uuid item 4688 */ 4689 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4690 if (IS_ERR(trans)) { 4691 ret = PTR_ERR(trans); 4692 break; 4693 } 4694 continue; 4695 } else { 4696 goto skip; 4697 } 4698 update_tree: 4699 btrfs_release_path(path); 4700 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4701 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4702 BTRFS_UUID_KEY_SUBVOL, 4703 key.objectid); 4704 if (ret < 0) { 4705 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4706 ret); 4707 break; 4708 } 4709 } 4710 4711 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4712 ret = btrfs_uuid_tree_add(trans, 4713 root_item.received_uuid, 4714 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4715 key.objectid); 4716 if (ret < 0) { 4717 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4718 ret); 4719 break; 4720 } 4721 } 4722 4723 skip: 4724 btrfs_release_path(path); 4725 if (trans) { 4726 ret = btrfs_end_transaction(trans); 4727 trans = NULL; 4728 if (ret) 4729 break; 4730 } 4731 4732 if (key.offset < (u64)-1) { 4733 key.offset++; 4734 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4735 key.offset = 0; 4736 key.type = BTRFS_ROOT_ITEM_KEY; 4737 } else if (key.objectid < (u64)-1) { 4738 key.offset = 0; 4739 key.type = BTRFS_ROOT_ITEM_KEY; 4740 key.objectid++; 4741 } else { 4742 break; 4743 } 4744 cond_resched(); 4745 } 4746 4747 out: 4748 btrfs_free_path(path); 4749 if (trans && !IS_ERR(trans)) 4750 btrfs_end_transaction(trans); 4751 if (ret) 4752 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4753 else if (!closing) 4754 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4755 up(&fs_info->uuid_tree_rescan_sem); 4756 return 0; 4757 } 4758 4759 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4760 { 4761 struct btrfs_trans_handle *trans; 4762 struct btrfs_root *tree_root = fs_info->tree_root; 4763 struct btrfs_root *uuid_root; 4764 struct task_struct *task; 4765 int ret; 4766 4767 /* 4768 * 1 - root node 4769 * 1 - root item 4770 */ 4771 trans = btrfs_start_transaction(tree_root, 2); 4772 if (IS_ERR(trans)) 4773 return PTR_ERR(trans); 4774 4775 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4776 if (IS_ERR(uuid_root)) { 4777 ret = PTR_ERR(uuid_root); 4778 btrfs_abort_transaction(trans, ret); 4779 btrfs_end_transaction(trans); 4780 return ret; 4781 } 4782 4783 fs_info->uuid_root = uuid_root; 4784 4785 ret = btrfs_commit_transaction(trans); 4786 if (ret) 4787 return ret; 4788 4789 down(&fs_info->uuid_tree_rescan_sem); 4790 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4791 if (IS_ERR(task)) { 4792 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4793 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4794 up(&fs_info->uuid_tree_rescan_sem); 4795 return PTR_ERR(task); 4796 } 4797 4798 return 0; 4799 } 4800 4801 /* 4802 * shrinking a device means finding all of the device extents past 4803 * the new size, and then following the back refs to the chunks. 4804 * The chunk relocation code actually frees the device extent 4805 */ 4806 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4807 { 4808 struct btrfs_fs_info *fs_info = device->fs_info; 4809 struct btrfs_root *root = fs_info->dev_root; 4810 struct btrfs_trans_handle *trans; 4811 struct btrfs_dev_extent *dev_extent = NULL; 4812 struct btrfs_path *path; 4813 u64 length; 4814 u64 chunk_offset; 4815 int ret; 4816 int slot; 4817 int failed = 0; 4818 bool retried = false; 4819 struct extent_buffer *l; 4820 struct btrfs_key key; 4821 struct btrfs_super_block *super_copy = fs_info->super_copy; 4822 u64 old_total = btrfs_super_total_bytes(super_copy); 4823 u64 old_size = btrfs_device_get_total_bytes(device); 4824 u64 diff; 4825 u64 start; 4826 4827 new_size = round_down(new_size, fs_info->sectorsize); 4828 start = new_size; 4829 diff = round_down(old_size - new_size, fs_info->sectorsize); 4830 4831 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4832 return -EINVAL; 4833 4834 path = btrfs_alloc_path(); 4835 if (!path) 4836 return -ENOMEM; 4837 4838 path->reada = READA_BACK; 4839 4840 trans = btrfs_start_transaction(root, 0); 4841 if (IS_ERR(trans)) { 4842 btrfs_free_path(path); 4843 return PTR_ERR(trans); 4844 } 4845 4846 mutex_lock(&fs_info->chunk_mutex); 4847 4848 btrfs_device_set_total_bytes(device, new_size); 4849 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4850 device->fs_devices->total_rw_bytes -= diff; 4851 atomic64_sub(diff, &fs_info->free_chunk_space); 4852 } 4853 4854 /* 4855 * Once the device's size has been set to the new size, ensure all 4856 * in-memory chunks are synced to disk so that the loop below sees them 4857 * and relocates them accordingly. 4858 */ 4859 if (contains_pending_extent(device, &start, diff)) { 4860 mutex_unlock(&fs_info->chunk_mutex); 4861 ret = btrfs_commit_transaction(trans); 4862 if (ret) 4863 goto done; 4864 } else { 4865 mutex_unlock(&fs_info->chunk_mutex); 4866 btrfs_end_transaction(trans); 4867 } 4868 4869 again: 4870 key.objectid = device->devid; 4871 key.offset = (u64)-1; 4872 key.type = BTRFS_DEV_EXTENT_KEY; 4873 4874 do { 4875 mutex_lock(&fs_info->reclaim_bgs_lock); 4876 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4877 if (ret < 0) { 4878 mutex_unlock(&fs_info->reclaim_bgs_lock); 4879 goto done; 4880 } 4881 4882 ret = btrfs_previous_item(root, path, 0, key.type); 4883 if (ret) { 4884 mutex_unlock(&fs_info->reclaim_bgs_lock); 4885 if (ret < 0) 4886 goto done; 4887 ret = 0; 4888 btrfs_release_path(path); 4889 break; 4890 } 4891 4892 l = path->nodes[0]; 4893 slot = path->slots[0]; 4894 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4895 4896 if (key.objectid != device->devid) { 4897 mutex_unlock(&fs_info->reclaim_bgs_lock); 4898 btrfs_release_path(path); 4899 break; 4900 } 4901 4902 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4903 length = btrfs_dev_extent_length(l, dev_extent); 4904 4905 if (key.offset + length <= new_size) { 4906 mutex_unlock(&fs_info->reclaim_bgs_lock); 4907 btrfs_release_path(path); 4908 break; 4909 } 4910 4911 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4912 btrfs_release_path(path); 4913 4914 /* 4915 * We may be relocating the only data chunk we have, 4916 * which could potentially end up with losing data's 4917 * raid profile, so lets allocate an empty one in 4918 * advance. 4919 */ 4920 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4921 if (ret < 0) { 4922 mutex_unlock(&fs_info->reclaim_bgs_lock); 4923 goto done; 4924 } 4925 4926 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4927 mutex_unlock(&fs_info->reclaim_bgs_lock); 4928 if (ret == -ENOSPC) { 4929 failed++; 4930 } else if (ret) { 4931 if (ret == -ETXTBSY) { 4932 btrfs_warn(fs_info, 4933 "could not shrink block group %llu due to active swapfile", 4934 chunk_offset); 4935 } 4936 goto done; 4937 } 4938 } while (key.offset-- > 0); 4939 4940 if (failed && !retried) { 4941 failed = 0; 4942 retried = true; 4943 goto again; 4944 } else if (failed && retried) { 4945 ret = -ENOSPC; 4946 goto done; 4947 } 4948 4949 /* Shrinking succeeded, else we would be at "done". */ 4950 trans = btrfs_start_transaction(root, 0); 4951 if (IS_ERR(trans)) { 4952 ret = PTR_ERR(trans); 4953 goto done; 4954 } 4955 4956 mutex_lock(&fs_info->chunk_mutex); 4957 /* Clear all state bits beyond the shrunk device size */ 4958 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4959 CHUNK_STATE_MASK); 4960 4961 btrfs_device_set_disk_total_bytes(device, new_size); 4962 if (list_empty(&device->post_commit_list)) 4963 list_add_tail(&device->post_commit_list, 4964 &trans->transaction->dev_update_list); 4965 4966 WARN_ON(diff > old_total); 4967 btrfs_set_super_total_bytes(super_copy, 4968 round_down(old_total - diff, fs_info->sectorsize)); 4969 mutex_unlock(&fs_info->chunk_mutex); 4970 4971 btrfs_reserve_chunk_metadata(trans, false); 4972 /* Now btrfs_update_device() will change the on-disk size. */ 4973 ret = btrfs_update_device(trans, device); 4974 btrfs_trans_release_chunk_metadata(trans); 4975 if (ret < 0) { 4976 btrfs_abort_transaction(trans, ret); 4977 btrfs_end_transaction(trans); 4978 } else { 4979 ret = btrfs_commit_transaction(trans); 4980 } 4981 done: 4982 btrfs_free_path(path); 4983 if (ret) { 4984 mutex_lock(&fs_info->chunk_mutex); 4985 btrfs_device_set_total_bytes(device, old_size); 4986 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4987 device->fs_devices->total_rw_bytes += diff; 4988 atomic64_add(diff, &fs_info->free_chunk_space); 4989 mutex_unlock(&fs_info->chunk_mutex); 4990 } 4991 return ret; 4992 } 4993 4994 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4995 struct btrfs_key *key, 4996 struct btrfs_chunk *chunk, int item_size) 4997 { 4998 struct btrfs_super_block *super_copy = fs_info->super_copy; 4999 struct btrfs_disk_key disk_key; 5000 u32 array_size; 5001 u8 *ptr; 5002 5003 lockdep_assert_held(&fs_info->chunk_mutex); 5004 5005 array_size = btrfs_super_sys_array_size(super_copy); 5006 if (array_size + item_size + sizeof(disk_key) 5007 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5008 return -EFBIG; 5009 5010 ptr = super_copy->sys_chunk_array + array_size; 5011 btrfs_cpu_key_to_disk(&disk_key, key); 5012 memcpy(ptr, &disk_key, sizeof(disk_key)); 5013 ptr += sizeof(disk_key); 5014 memcpy(ptr, chunk, item_size); 5015 item_size += sizeof(disk_key); 5016 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5017 5018 return 0; 5019 } 5020 5021 /* 5022 * sort the devices in descending order by max_avail, total_avail 5023 */ 5024 static int btrfs_cmp_device_info(const void *a, const void *b) 5025 { 5026 const struct btrfs_device_info *di_a = a; 5027 const struct btrfs_device_info *di_b = b; 5028 5029 if (di_a->max_avail > di_b->max_avail) 5030 return -1; 5031 if (di_a->max_avail < di_b->max_avail) 5032 return 1; 5033 if (di_a->total_avail > di_b->total_avail) 5034 return -1; 5035 if (di_a->total_avail < di_b->total_avail) 5036 return 1; 5037 return 0; 5038 } 5039 5040 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5041 { 5042 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5043 return; 5044 5045 btrfs_set_fs_incompat(info, RAID56); 5046 } 5047 5048 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5049 { 5050 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5051 return; 5052 5053 btrfs_set_fs_incompat(info, RAID1C34); 5054 } 5055 5056 /* 5057 * Structure used internally for btrfs_create_chunk() function. 5058 * Wraps needed parameters. 5059 */ 5060 struct alloc_chunk_ctl { 5061 u64 start; 5062 u64 type; 5063 /* Total number of stripes to allocate */ 5064 int num_stripes; 5065 /* sub_stripes info for map */ 5066 int sub_stripes; 5067 /* Stripes per device */ 5068 int dev_stripes; 5069 /* Maximum number of devices to use */ 5070 int devs_max; 5071 /* Minimum number of devices to use */ 5072 int devs_min; 5073 /* ndevs has to be a multiple of this */ 5074 int devs_increment; 5075 /* Number of copies */ 5076 int ncopies; 5077 /* Number of stripes worth of bytes to store parity information */ 5078 int nparity; 5079 u64 max_stripe_size; 5080 u64 max_chunk_size; 5081 u64 dev_extent_min; 5082 u64 stripe_size; 5083 u64 chunk_size; 5084 int ndevs; 5085 }; 5086 5087 static void init_alloc_chunk_ctl_policy_regular( 5088 struct btrfs_fs_devices *fs_devices, 5089 struct alloc_chunk_ctl *ctl) 5090 { 5091 u64 type = ctl->type; 5092 5093 if (type & BTRFS_BLOCK_GROUP_DATA) { 5094 ctl->max_stripe_size = SZ_1G; 5095 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5096 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5097 /* For larger filesystems, use larger metadata chunks */ 5098 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5099 ctl->max_stripe_size = SZ_1G; 5100 else 5101 ctl->max_stripe_size = SZ_256M; 5102 ctl->max_chunk_size = ctl->max_stripe_size; 5103 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5104 ctl->max_stripe_size = SZ_32M; 5105 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5106 ctl->devs_max = min_t(int, ctl->devs_max, 5107 BTRFS_MAX_DEVS_SYS_CHUNK); 5108 } else { 5109 BUG(); 5110 } 5111 5112 /* We don't want a chunk larger than 10% of writable space */ 5113 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5114 ctl->max_chunk_size); 5115 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5116 } 5117 5118 static void init_alloc_chunk_ctl_policy_zoned( 5119 struct btrfs_fs_devices *fs_devices, 5120 struct alloc_chunk_ctl *ctl) 5121 { 5122 u64 zone_size = fs_devices->fs_info->zone_size; 5123 u64 limit; 5124 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5125 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5126 u64 min_chunk_size = min_data_stripes * zone_size; 5127 u64 type = ctl->type; 5128 5129 ctl->max_stripe_size = zone_size; 5130 if (type & BTRFS_BLOCK_GROUP_DATA) { 5131 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5132 zone_size); 5133 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5134 ctl->max_chunk_size = ctl->max_stripe_size; 5135 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5136 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5137 ctl->devs_max = min_t(int, ctl->devs_max, 5138 BTRFS_MAX_DEVS_SYS_CHUNK); 5139 } else { 5140 BUG(); 5141 } 5142 5143 /* We don't want a chunk larger than 10% of writable space */ 5144 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5145 zone_size), 5146 min_chunk_size); 5147 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5148 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5149 } 5150 5151 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5152 struct alloc_chunk_ctl *ctl) 5153 { 5154 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5155 5156 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5157 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5158 ctl->devs_max = btrfs_raid_array[index].devs_max; 5159 if (!ctl->devs_max) 5160 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5161 ctl->devs_min = btrfs_raid_array[index].devs_min; 5162 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5163 ctl->ncopies = btrfs_raid_array[index].ncopies; 5164 ctl->nparity = btrfs_raid_array[index].nparity; 5165 ctl->ndevs = 0; 5166 5167 switch (fs_devices->chunk_alloc_policy) { 5168 case BTRFS_CHUNK_ALLOC_REGULAR: 5169 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5170 break; 5171 case BTRFS_CHUNK_ALLOC_ZONED: 5172 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5173 break; 5174 default: 5175 BUG(); 5176 } 5177 } 5178 5179 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5180 struct alloc_chunk_ctl *ctl, 5181 struct btrfs_device_info *devices_info) 5182 { 5183 struct btrfs_fs_info *info = fs_devices->fs_info; 5184 struct btrfs_device *device; 5185 u64 total_avail; 5186 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5187 int ret; 5188 int ndevs = 0; 5189 u64 max_avail; 5190 u64 dev_offset; 5191 5192 /* 5193 * in the first pass through the devices list, we gather information 5194 * about the available holes on each device. 5195 */ 5196 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5197 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5198 WARN(1, KERN_ERR 5199 "BTRFS: read-only device in alloc_list\n"); 5200 continue; 5201 } 5202 5203 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5204 &device->dev_state) || 5205 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5206 continue; 5207 5208 if (device->total_bytes > device->bytes_used) 5209 total_avail = device->total_bytes - device->bytes_used; 5210 else 5211 total_avail = 0; 5212 5213 /* If there is no space on this device, skip it. */ 5214 if (total_avail < ctl->dev_extent_min) 5215 continue; 5216 5217 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5218 &max_avail); 5219 if (ret && ret != -ENOSPC) 5220 return ret; 5221 5222 if (ret == 0) 5223 max_avail = dev_extent_want; 5224 5225 if (max_avail < ctl->dev_extent_min) { 5226 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5227 btrfs_debug(info, 5228 "%s: devid %llu has no free space, have=%llu want=%llu", 5229 __func__, device->devid, max_avail, 5230 ctl->dev_extent_min); 5231 continue; 5232 } 5233 5234 if (ndevs == fs_devices->rw_devices) { 5235 WARN(1, "%s: found more than %llu devices\n", 5236 __func__, fs_devices->rw_devices); 5237 break; 5238 } 5239 devices_info[ndevs].dev_offset = dev_offset; 5240 devices_info[ndevs].max_avail = max_avail; 5241 devices_info[ndevs].total_avail = total_avail; 5242 devices_info[ndevs].dev = device; 5243 ++ndevs; 5244 } 5245 ctl->ndevs = ndevs; 5246 5247 /* 5248 * now sort the devices by hole size / available space 5249 */ 5250 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5251 btrfs_cmp_device_info, NULL); 5252 5253 return 0; 5254 } 5255 5256 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5257 struct btrfs_device_info *devices_info) 5258 { 5259 /* Number of stripes that count for block group size */ 5260 int data_stripes; 5261 5262 /* 5263 * The primary goal is to maximize the number of stripes, so use as 5264 * many devices as possible, even if the stripes are not maximum sized. 5265 * 5266 * The DUP profile stores more than one stripe per device, the 5267 * max_avail is the total size so we have to adjust. 5268 */ 5269 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5270 ctl->dev_stripes); 5271 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5272 5273 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5274 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5275 5276 /* 5277 * Use the number of data stripes to figure out how big this chunk is 5278 * really going to be in terms of logical address space, and compare 5279 * that answer with the max chunk size. If it's higher, we try to 5280 * reduce stripe_size. 5281 */ 5282 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5283 /* 5284 * Reduce stripe_size, round it up to a 16MB boundary again and 5285 * then use it, unless it ends up being even bigger than the 5286 * previous value we had already. 5287 */ 5288 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5289 data_stripes), SZ_16M), 5290 ctl->stripe_size); 5291 } 5292 5293 /* Align to BTRFS_STRIPE_LEN */ 5294 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5295 ctl->chunk_size = ctl->stripe_size * data_stripes; 5296 5297 return 0; 5298 } 5299 5300 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5301 struct btrfs_device_info *devices_info) 5302 { 5303 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5304 /* Number of stripes that count for block group size */ 5305 int data_stripes; 5306 5307 /* 5308 * It should hold because: 5309 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5310 */ 5311 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5312 5313 ctl->stripe_size = zone_size; 5314 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5315 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5316 5317 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5318 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5319 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5320 ctl->stripe_size) + ctl->nparity, 5321 ctl->dev_stripes); 5322 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5323 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5324 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5325 } 5326 5327 ctl->chunk_size = ctl->stripe_size * data_stripes; 5328 5329 return 0; 5330 } 5331 5332 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5333 struct alloc_chunk_ctl *ctl, 5334 struct btrfs_device_info *devices_info) 5335 { 5336 struct btrfs_fs_info *info = fs_devices->fs_info; 5337 5338 /* 5339 * Round down to number of usable stripes, devs_increment can be any 5340 * number so we can't use round_down() that requires power of 2, while 5341 * rounddown is safe. 5342 */ 5343 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5344 5345 if (ctl->ndevs < ctl->devs_min) { 5346 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5347 btrfs_debug(info, 5348 "%s: not enough devices with free space: have=%d minimum required=%d", 5349 __func__, ctl->ndevs, ctl->devs_min); 5350 } 5351 return -ENOSPC; 5352 } 5353 5354 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5355 5356 switch (fs_devices->chunk_alloc_policy) { 5357 case BTRFS_CHUNK_ALLOC_REGULAR: 5358 return decide_stripe_size_regular(ctl, devices_info); 5359 case BTRFS_CHUNK_ALLOC_ZONED: 5360 return decide_stripe_size_zoned(ctl, devices_info); 5361 default: 5362 BUG(); 5363 } 5364 } 5365 5366 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5367 struct alloc_chunk_ctl *ctl, 5368 struct btrfs_device_info *devices_info) 5369 { 5370 struct btrfs_fs_info *info = trans->fs_info; 5371 struct map_lookup *map = NULL; 5372 struct extent_map_tree *em_tree; 5373 struct btrfs_block_group *block_group; 5374 struct extent_map *em; 5375 u64 start = ctl->start; 5376 u64 type = ctl->type; 5377 int ret; 5378 int i; 5379 int j; 5380 5381 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5382 if (!map) 5383 return ERR_PTR(-ENOMEM); 5384 map->num_stripes = ctl->num_stripes; 5385 5386 for (i = 0; i < ctl->ndevs; ++i) { 5387 for (j = 0; j < ctl->dev_stripes; ++j) { 5388 int s = i * ctl->dev_stripes + j; 5389 map->stripes[s].dev = devices_info[i].dev; 5390 map->stripes[s].physical = devices_info[i].dev_offset + 5391 j * ctl->stripe_size; 5392 } 5393 } 5394 map->stripe_len = BTRFS_STRIPE_LEN; 5395 map->io_align = BTRFS_STRIPE_LEN; 5396 map->io_width = BTRFS_STRIPE_LEN; 5397 map->type = type; 5398 map->sub_stripes = ctl->sub_stripes; 5399 5400 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5401 5402 em = alloc_extent_map(); 5403 if (!em) { 5404 kfree(map); 5405 return ERR_PTR(-ENOMEM); 5406 } 5407 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5408 em->map_lookup = map; 5409 em->start = start; 5410 em->len = ctl->chunk_size; 5411 em->block_start = 0; 5412 em->block_len = em->len; 5413 em->orig_block_len = ctl->stripe_size; 5414 5415 em_tree = &info->mapping_tree; 5416 write_lock(&em_tree->lock); 5417 ret = add_extent_mapping(em_tree, em, 0); 5418 if (ret) { 5419 write_unlock(&em_tree->lock); 5420 free_extent_map(em); 5421 return ERR_PTR(ret); 5422 } 5423 write_unlock(&em_tree->lock); 5424 5425 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5426 if (IS_ERR(block_group)) 5427 goto error_del_extent; 5428 5429 for (i = 0; i < map->num_stripes; i++) { 5430 struct btrfs_device *dev = map->stripes[i].dev; 5431 5432 btrfs_device_set_bytes_used(dev, 5433 dev->bytes_used + ctl->stripe_size); 5434 if (list_empty(&dev->post_commit_list)) 5435 list_add_tail(&dev->post_commit_list, 5436 &trans->transaction->dev_update_list); 5437 } 5438 5439 atomic64_sub(ctl->stripe_size * map->num_stripes, 5440 &info->free_chunk_space); 5441 5442 free_extent_map(em); 5443 check_raid56_incompat_flag(info, type); 5444 check_raid1c34_incompat_flag(info, type); 5445 5446 return block_group; 5447 5448 error_del_extent: 5449 write_lock(&em_tree->lock); 5450 remove_extent_mapping(em_tree, em); 5451 write_unlock(&em_tree->lock); 5452 5453 /* One for our allocation */ 5454 free_extent_map(em); 5455 /* One for the tree reference */ 5456 free_extent_map(em); 5457 5458 return block_group; 5459 } 5460 5461 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5462 u64 type) 5463 { 5464 struct btrfs_fs_info *info = trans->fs_info; 5465 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5466 struct btrfs_device_info *devices_info = NULL; 5467 struct alloc_chunk_ctl ctl; 5468 struct btrfs_block_group *block_group; 5469 int ret; 5470 5471 lockdep_assert_held(&info->chunk_mutex); 5472 5473 if (!alloc_profile_is_valid(type, 0)) { 5474 ASSERT(0); 5475 return ERR_PTR(-EINVAL); 5476 } 5477 5478 if (list_empty(&fs_devices->alloc_list)) { 5479 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5480 btrfs_debug(info, "%s: no writable device", __func__); 5481 return ERR_PTR(-ENOSPC); 5482 } 5483 5484 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5485 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5486 ASSERT(0); 5487 return ERR_PTR(-EINVAL); 5488 } 5489 5490 ctl.start = find_next_chunk(info); 5491 ctl.type = type; 5492 init_alloc_chunk_ctl(fs_devices, &ctl); 5493 5494 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5495 GFP_NOFS); 5496 if (!devices_info) 5497 return ERR_PTR(-ENOMEM); 5498 5499 ret = gather_device_info(fs_devices, &ctl, devices_info); 5500 if (ret < 0) { 5501 block_group = ERR_PTR(ret); 5502 goto out; 5503 } 5504 5505 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5506 if (ret < 0) { 5507 block_group = ERR_PTR(ret); 5508 goto out; 5509 } 5510 5511 block_group = create_chunk(trans, &ctl, devices_info); 5512 5513 out: 5514 kfree(devices_info); 5515 return block_group; 5516 } 5517 5518 /* 5519 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5520 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5521 * chunks. 5522 * 5523 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5524 * phases. 5525 */ 5526 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5527 struct btrfs_block_group *bg) 5528 { 5529 struct btrfs_fs_info *fs_info = trans->fs_info; 5530 struct btrfs_root *chunk_root = fs_info->chunk_root; 5531 struct btrfs_key key; 5532 struct btrfs_chunk *chunk; 5533 struct btrfs_stripe *stripe; 5534 struct extent_map *em; 5535 struct map_lookup *map; 5536 size_t item_size; 5537 int i; 5538 int ret; 5539 5540 /* 5541 * We take the chunk_mutex for 2 reasons: 5542 * 5543 * 1) Updates and insertions in the chunk btree must be done while holding 5544 * the chunk_mutex, as well as updating the system chunk array in the 5545 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5546 * details; 5547 * 5548 * 2) To prevent races with the final phase of a device replace operation 5549 * that replaces the device object associated with the map's stripes, 5550 * because the device object's id can change at any time during that 5551 * final phase of the device replace operation 5552 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5553 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5554 * which would cause a failure when updating the device item, which does 5555 * not exists, or persisting a stripe of the chunk item with such ID. 5556 * Here we can't use the device_list_mutex because our caller already 5557 * has locked the chunk_mutex, and the final phase of device replace 5558 * acquires both mutexes - first the device_list_mutex and then the 5559 * chunk_mutex. Using any of those two mutexes protects us from a 5560 * concurrent device replace. 5561 */ 5562 lockdep_assert_held(&fs_info->chunk_mutex); 5563 5564 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5565 if (IS_ERR(em)) { 5566 ret = PTR_ERR(em); 5567 btrfs_abort_transaction(trans, ret); 5568 return ret; 5569 } 5570 5571 map = em->map_lookup; 5572 item_size = btrfs_chunk_item_size(map->num_stripes); 5573 5574 chunk = kzalloc(item_size, GFP_NOFS); 5575 if (!chunk) { 5576 ret = -ENOMEM; 5577 btrfs_abort_transaction(trans, ret); 5578 goto out; 5579 } 5580 5581 for (i = 0; i < map->num_stripes; i++) { 5582 struct btrfs_device *device = map->stripes[i].dev; 5583 5584 ret = btrfs_update_device(trans, device); 5585 if (ret) 5586 goto out; 5587 } 5588 5589 stripe = &chunk->stripe; 5590 for (i = 0; i < map->num_stripes; i++) { 5591 struct btrfs_device *device = map->stripes[i].dev; 5592 const u64 dev_offset = map->stripes[i].physical; 5593 5594 btrfs_set_stack_stripe_devid(stripe, device->devid); 5595 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5596 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5597 stripe++; 5598 } 5599 5600 btrfs_set_stack_chunk_length(chunk, bg->length); 5601 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5602 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5603 btrfs_set_stack_chunk_type(chunk, map->type); 5604 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5605 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5606 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5607 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5608 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5609 5610 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5611 key.type = BTRFS_CHUNK_ITEM_KEY; 5612 key.offset = bg->start; 5613 5614 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5615 if (ret) 5616 goto out; 5617 5618 bg->chunk_item_inserted = 1; 5619 5620 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5621 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5622 if (ret) 5623 goto out; 5624 } 5625 5626 out: 5627 kfree(chunk); 5628 free_extent_map(em); 5629 return ret; 5630 } 5631 5632 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5633 { 5634 struct btrfs_fs_info *fs_info = trans->fs_info; 5635 u64 alloc_profile; 5636 struct btrfs_block_group *meta_bg; 5637 struct btrfs_block_group *sys_bg; 5638 5639 /* 5640 * When adding a new device for sprouting, the seed device is read-only 5641 * so we must first allocate a metadata and a system chunk. But before 5642 * adding the block group items to the extent, device and chunk btrees, 5643 * we must first: 5644 * 5645 * 1) Create both chunks without doing any changes to the btrees, as 5646 * otherwise we would get -ENOSPC since the block groups from the 5647 * seed device are read-only; 5648 * 5649 * 2) Add the device item for the new sprout device - finishing the setup 5650 * of a new block group requires updating the device item in the chunk 5651 * btree, so it must exist when we attempt to do it. The previous step 5652 * ensures this does not fail with -ENOSPC. 5653 * 5654 * After that we can add the block group items to their btrees: 5655 * update existing device item in the chunk btree, add a new block group 5656 * item to the extent btree, add a new chunk item to the chunk btree and 5657 * finally add the new device extent items to the devices btree. 5658 */ 5659 5660 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5661 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5662 if (IS_ERR(meta_bg)) 5663 return PTR_ERR(meta_bg); 5664 5665 alloc_profile = btrfs_system_alloc_profile(fs_info); 5666 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5667 if (IS_ERR(sys_bg)) 5668 return PTR_ERR(sys_bg); 5669 5670 return 0; 5671 } 5672 5673 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5674 { 5675 const int index = btrfs_bg_flags_to_raid_index(map->type); 5676 5677 return btrfs_raid_array[index].tolerated_failures; 5678 } 5679 5680 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5681 { 5682 struct extent_map *em; 5683 struct map_lookup *map; 5684 int miss_ndevs = 0; 5685 int i; 5686 bool ret = true; 5687 5688 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5689 if (IS_ERR(em)) 5690 return false; 5691 5692 map = em->map_lookup; 5693 for (i = 0; i < map->num_stripes; i++) { 5694 if (test_bit(BTRFS_DEV_STATE_MISSING, 5695 &map->stripes[i].dev->dev_state)) { 5696 miss_ndevs++; 5697 continue; 5698 } 5699 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5700 &map->stripes[i].dev->dev_state)) { 5701 ret = false; 5702 goto end; 5703 } 5704 } 5705 5706 /* 5707 * If the number of missing devices is larger than max errors, we can 5708 * not write the data into that chunk successfully. 5709 */ 5710 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5711 ret = false; 5712 end: 5713 free_extent_map(em); 5714 return ret; 5715 } 5716 5717 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5718 { 5719 struct extent_map *em; 5720 5721 while (1) { 5722 write_lock(&tree->lock); 5723 em = lookup_extent_mapping(tree, 0, (u64)-1); 5724 if (em) 5725 remove_extent_mapping(tree, em); 5726 write_unlock(&tree->lock); 5727 if (!em) 5728 break; 5729 /* once for us */ 5730 free_extent_map(em); 5731 /* once for the tree */ 5732 free_extent_map(em); 5733 } 5734 } 5735 5736 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5737 { 5738 struct extent_map *em; 5739 struct map_lookup *map; 5740 int ret; 5741 5742 em = btrfs_get_chunk_map(fs_info, logical, len); 5743 if (IS_ERR(em)) 5744 /* 5745 * We could return errors for these cases, but that could get 5746 * ugly and we'd probably do the same thing which is just not do 5747 * anything else and exit, so return 1 so the callers don't try 5748 * to use other copies. 5749 */ 5750 return 1; 5751 5752 map = em->map_lookup; 5753 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5754 ret = map->num_stripes; 5755 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5756 ret = map->sub_stripes; 5757 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5758 ret = 2; 5759 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5760 /* 5761 * There could be two corrupted data stripes, we need 5762 * to loop retry in order to rebuild the correct data. 5763 * 5764 * Fail a stripe at a time on every retry except the 5765 * stripe under reconstruction. 5766 */ 5767 ret = map->num_stripes; 5768 else 5769 ret = 1; 5770 free_extent_map(em); 5771 5772 down_read(&fs_info->dev_replace.rwsem); 5773 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5774 fs_info->dev_replace.tgtdev) 5775 ret++; 5776 up_read(&fs_info->dev_replace.rwsem); 5777 5778 return ret; 5779 } 5780 5781 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5782 u64 logical) 5783 { 5784 struct extent_map *em; 5785 struct map_lookup *map; 5786 unsigned long len = fs_info->sectorsize; 5787 5788 em = btrfs_get_chunk_map(fs_info, logical, len); 5789 5790 if (!WARN_ON(IS_ERR(em))) { 5791 map = em->map_lookup; 5792 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5793 len = map->stripe_len * nr_data_stripes(map); 5794 free_extent_map(em); 5795 } 5796 return len; 5797 } 5798 5799 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5800 { 5801 struct extent_map *em; 5802 struct map_lookup *map; 5803 int ret = 0; 5804 5805 em = btrfs_get_chunk_map(fs_info, logical, len); 5806 5807 if(!WARN_ON(IS_ERR(em))) { 5808 map = em->map_lookup; 5809 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5810 ret = 1; 5811 free_extent_map(em); 5812 } 5813 return ret; 5814 } 5815 5816 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5817 struct map_lookup *map, int first, 5818 int dev_replace_is_ongoing) 5819 { 5820 int i; 5821 int num_stripes; 5822 int preferred_mirror; 5823 int tolerance; 5824 struct btrfs_device *srcdev; 5825 5826 ASSERT((map->type & 5827 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5828 5829 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5830 num_stripes = map->sub_stripes; 5831 else 5832 num_stripes = map->num_stripes; 5833 5834 switch (fs_info->fs_devices->read_policy) { 5835 default: 5836 /* Shouldn't happen, just warn and use pid instead of failing */ 5837 btrfs_warn_rl(fs_info, 5838 "unknown read_policy type %u, reset to pid", 5839 fs_info->fs_devices->read_policy); 5840 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5841 fallthrough; 5842 case BTRFS_READ_POLICY_PID: 5843 preferred_mirror = first + (current->pid % num_stripes); 5844 break; 5845 } 5846 5847 if (dev_replace_is_ongoing && 5848 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5849 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5850 srcdev = fs_info->dev_replace.srcdev; 5851 else 5852 srcdev = NULL; 5853 5854 /* 5855 * try to avoid the drive that is the source drive for a 5856 * dev-replace procedure, only choose it if no other non-missing 5857 * mirror is available 5858 */ 5859 for (tolerance = 0; tolerance < 2; tolerance++) { 5860 if (map->stripes[preferred_mirror].dev->bdev && 5861 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5862 return preferred_mirror; 5863 for (i = first; i < first + num_stripes; i++) { 5864 if (map->stripes[i].dev->bdev && 5865 (tolerance || map->stripes[i].dev != srcdev)) 5866 return i; 5867 } 5868 } 5869 5870 /* we couldn't find one that doesn't fail. Just return something 5871 * and the io error handling code will clean up eventually 5872 */ 5873 return preferred_mirror; 5874 } 5875 5876 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5877 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5878 { 5879 int i; 5880 int again = 1; 5881 5882 while (again) { 5883 again = 0; 5884 for (i = 0; i < num_stripes - 1; i++) { 5885 /* Swap if parity is on a smaller index */ 5886 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5887 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5888 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5889 again = 1; 5890 } 5891 } 5892 } 5893 } 5894 5895 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5896 int total_stripes, 5897 int real_stripes) 5898 { 5899 struct btrfs_io_context *bioc = kzalloc( 5900 /* The size of btrfs_io_context */ 5901 sizeof(struct btrfs_io_context) + 5902 /* Plus the variable array for the stripes */ 5903 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5904 /* Plus the variable array for the tgt dev */ 5905 sizeof(int) * (real_stripes) + 5906 /* 5907 * Plus the raid_map, which includes both the tgt dev 5908 * and the stripes. 5909 */ 5910 sizeof(u64) * (total_stripes), 5911 GFP_NOFS|__GFP_NOFAIL); 5912 5913 atomic_set(&bioc->error, 0); 5914 refcount_set(&bioc->refs, 1); 5915 5916 bioc->fs_info = fs_info; 5917 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5918 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5919 5920 return bioc; 5921 } 5922 5923 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5924 { 5925 WARN_ON(!refcount_read(&bioc->refs)); 5926 refcount_inc(&bioc->refs); 5927 } 5928 5929 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5930 { 5931 if (!bioc) 5932 return; 5933 if (refcount_dec_and_test(&bioc->refs)) 5934 kfree(bioc); 5935 } 5936 5937 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5938 /* 5939 * Please note that, discard won't be sent to target device of device 5940 * replace. 5941 */ 5942 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5943 u64 logical, u64 *length_ret, 5944 struct btrfs_io_context **bioc_ret) 5945 { 5946 struct extent_map *em; 5947 struct map_lookup *map; 5948 struct btrfs_io_context *bioc; 5949 u64 length = *length_ret; 5950 u64 offset; 5951 u64 stripe_nr; 5952 u64 stripe_nr_end; 5953 u64 stripe_end_offset; 5954 u64 stripe_cnt; 5955 u64 stripe_len; 5956 u64 stripe_offset; 5957 u64 num_stripes; 5958 u32 stripe_index; 5959 u32 factor = 0; 5960 u32 sub_stripes = 0; 5961 u64 stripes_per_dev = 0; 5962 u32 remaining_stripes = 0; 5963 u32 last_stripe = 0; 5964 int ret = 0; 5965 int i; 5966 5967 /* Discard always returns a bioc. */ 5968 ASSERT(bioc_ret); 5969 5970 em = btrfs_get_chunk_map(fs_info, logical, length); 5971 if (IS_ERR(em)) 5972 return PTR_ERR(em); 5973 5974 map = em->map_lookup; 5975 /* we don't discard raid56 yet */ 5976 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5977 ret = -EOPNOTSUPP; 5978 goto out; 5979 } 5980 5981 offset = logical - em->start; 5982 length = min_t(u64, em->start + em->len - logical, length); 5983 *length_ret = length; 5984 5985 stripe_len = map->stripe_len; 5986 /* 5987 * stripe_nr counts the total number of stripes we have to stride 5988 * to get to this block 5989 */ 5990 stripe_nr = div64_u64(offset, stripe_len); 5991 5992 /* stripe_offset is the offset of this block in its stripe */ 5993 stripe_offset = offset - stripe_nr * stripe_len; 5994 5995 stripe_nr_end = round_up(offset + length, map->stripe_len); 5996 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5997 stripe_cnt = stripe_nr_end - stripe_nr; 5998 stripe_end_offset = stripe_nr_end * map->stripe_len - 5999 (offset + length); 6000 /* 6001 * after this, stripe_nr is the number of stripes on this 6002 * device we have to walk to find the data, and stripe_index is 6003 * the number of our device in the stripe array 6004 */ 6005 num_stripes = 1; 6006 stripe_index = 0; 6007 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6008 BTRFS_BLOCK_GROUP_RAID10)) { 6009 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6010 sub_stripes = 1; 6011 else 6012 sub_stripes = map->sub_stripes; 6013 6014 factor = map->num_stripes / sub_stripes; 6015 num_stripes = min_t(u64, map->num_stripes, 6016 sub_stripes * stripe_cnt); 6017 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6018 stripe_index *= sub_stripes; 6019 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6020 &remaining_stripes); 6021 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6022 last_stripe *= sub_stripes; 6023 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6024 BTRFS_BLOCK_GROUP_DUP)) { 6025 num_stripes = map->num_stripes; 6026 } else { 6027 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6028 &stripe_index); 6029 } 6030 6031 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6032 if (!bioc) { 6033 ret = -ENOMEM; 6034 goto out; 6035 } 6036 6037 for (i = 0; i < num_stripes; i++) { 6038 bioc->stripes[i].physical = 6039 map->stripes[stripe_index].physical + 6040 stripe_offset + stripe_nr * map->stripe_len; 6041 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6042 6043 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6044 BTRFS_BLOCK_GROUP_RAID10)) { 6045 bioc->stripes[i].length = stripes_per_dev * 6046 map->stripe_len; 6047 6048 if (i / sub_stripes < remaining_stripes) 6049 bioc->stripes[i].length += map->stripe_len; 6050 6051 /* 6052 * Special for the first stripe and 6053 * the last stripe: 6054 * 6055 * |-------|...|-------| 6056 * |----------| 6057 * off end_off 6058 */ 6059 if (i < sub_stripes) 6060 bioc->stripes[i].length -= stripe_offset; 6061 6062 if (stripe_index >= last_stripe && 6063 stripe_index <= (last_stripe + 6064 sub_stripes - 1)) 6065 bioc->stripes[i].length -= stripe_end_offset; 6066 6067 if (i == sub_stripes - 1) 6068 stripe_offset = 0; 6069 } else { 6070 bioc->stripes[i].length = length; 6071 } 6072 6073 stripe_index++; 6074 if (stripe_index == map->num_stripes) { 6075 stripe_index = 0; 6076 stripe_nr++; 6077 } 6078 } 6079 6080 *bioc_ret = bioc; 6081 bioc->map_type = map->type; 6082 bioc->num_stripes = num_stripes; 6083 out: 6084 free_extent_map(em); 6085 return ret; 6086 } 6087 6088 /* 6089 * In dev-replace case, for repair case (that's the only case where the mirror 6090 * is selected explicitly when calling btrfs_map_block), blocks left of the 6091 * left cursor can also be read from the target drive. 6092 * 6093 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6094 * array of stripes. 6095 * For READ, it also needs to be supported using the same mirror number. 6096 * 6097 * If the requested block is not left of the left cursor, EIO is returned. This 6098 * can happen because btrfs_num_copies() returns one more in the dev-replace 6099 * case. 6100 */ 6101 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6102 u64 logical, u64 length, 6103 u64 srcdev_devid, int *mirror_num, 6104 u64 *physical) 6105 { 6106 struct btrfs_io_context *bioc = NULL; 6107 int num_stripes; 6108 int index_srcdev = 0; 6109 int found = 0; 6110 u64 physical_of_found = 0; 6111 int i; 6112 int ret = 0; 6113 6114 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6115 logical, &length, &bioc, 0, 0); 6116 if (ret) { 6117 ASSERT(bioc == NULL); 6118 return ret; 6119 } 6120 6121 num_stripes = bioc->num_stripes; 6122 if (*mirror_num > num_stripes) { 6123 /* 6124 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6125 * that means that the requested area is not left of the left 6126 * cursor 6127 */ 6128 btrfs_put_bioc(bioc); 6129 return -EIO; 6130 } 6131 6132 /* 6133 * process the rest of the function using the mirror_num of the source 6134 * drive. Therefore look it up first. At the end, patch the device 6135 * pointer to the one of the target drive. 6136 */ 6137 for (i = 0; i < num_stripes; i++) { 6138 if (bioc->stripes[i].dev->devid != srcdev_devid) 6139 continue; 6140 6141 /* 6142 * In case of DUP, in order to keep it simple, only add the 6143 * mirror with the lowest physical address 6144 */ 6145 if (found && 6146 physical_of_found <= bioc->stripes[i].physical) 6147 continue; 6148 6149 index_srcdev = i; 6150 found = 1; 6151 physical_of_found = bioc->stripes[i].physical; 6152 } 6153 6154 btrfs_put_bioc(bioc); 6155 6156 ASSERT(found); 6157 if (!found) 6158 return -EIO; 6159 6160 *mirror_num = index_srcdev + 1; 6161 *physical = physical_of_found; 6162 return ret; 6163 } 6164 6165 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6166 { 6167 struct btrfs_block_group *cache; 6168 bool ret; 6169 6170 /* Non zoned filesystem does not use "to_copy" flag */ 6171 if (!btrfs_is_zoned(fs_info)) 6172 return false; 6173 6174 cache = btrfs_lookup_block_group(fs_info, logical); 6175 6176 spin_lock(&cache->lock); 6177 ret = cache->to_copy; 6178 spin_unlock(&cache->lock); 6179 6180 btrfs_put_block_group(cache); 6181 return ret; 6182 } 6183 6184 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6185 struct btrfs_io_context **bioc_ret, 6186 struct btrfs_dev_replace *dev_replace, 6187 u64 logical, 6188 int *num_stripes_ret, int *max_errors_ret) 6189 { 6190 struct btrfs_io_context *bioc = *bioc_ret; 6191 u64 srcdev_devid = dev_replace->srcdev->devid; 6192 int tgtdev_indexes = 0; 6193 int num_stripes = *num_stripes_ret; 6194 int max_errors = *max_errors_ret; 6195 int i; 6196 6197 if (op == BTRFS_MAP_WRITE) { 6198 int index_where_to_add; 6199 6200 /* 6201 * A block group which have "to_copy" set will eventually 6202 * copied by dev-replace process. We can avoid cloning IO here. 6203 */ 6204 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6205 return; 6206 6207 /* 6208 * duplicate the write operations while the dev replace 6209 * procedure is running. Since the copying of the old disk to 6210 * the new disk takes place at run time while the filesystem is 6211 * mounted writable, the regular write operations to the old 6212 * disk have to be duplicated to go to the new disk as well. 6213 * 6214 * Note that device->missing is handled by the caller, and that 6215 * the write to the old disk is already set up in the stripes 6216 * array. 6217 */ 6218 index_where_to_add = num_stripes; 6219 for (i = 0; i < num_stripes; i++) { 6220 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6221 /* write to new disk, too */ 6222 struct btrfs_io_stripe *new = 6223 bioc->stripes + index_where_to_add; 6224 struct btrfs_io_stripe *old = 6225 bioc->stripes + i; 6226 6227 new->physical = old->physical; 6228 new->length = old->length; 6229 new->dev = dev_replace->tgtdev; 6230 bioc->tgtdev_map[i] = index_where_to_add; 6231 index_where_to_add++; 6232 max_errors++; 6233 tgtdev_indexes++; 6234 } 6235 } 6236 num_stripes = index_where_to_add; 6237 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6238 int index_srcdev = 0; 6239 int found = 0; 6240 u64 physical_of_found = 0; 6241 6242 /* 6243 * During the dev-replace procedure, the target drive can also 6244 * be used to read data in case it is needed to repair a corrupt 6245 * block elsewhere. This is possible if the requested area is 6246 * left of the left cursor. In this area, the target drive is a 6247 * full copy of the source drive. 6248 */ 6249 for (i = 0; i < num_stripes; i++) { 6250 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6251 /* 6252 * In case of DUP, in order to keep it simple, 6253 * only add the mirror with the lowest physical 6254 * address 6255 */ 6256 if (found && 6257 physical_of_found <= bioc->stripes[i].physical) 6258 continue; 6259 index_srcdev = i; 6260 found = 1; 6261 physical_of_found = bioc->stripes[i].physical; 6262 } 6263 } 6264 if (found) { 6265 struct btrfs_io_stripe *tgtdev_stripe = 6266 bioc->stripes + num_stripes; 6267 6268 tgtdev_stripe->physical = physical_of_found; 6269 tgtdev_stripe->length = 6270 bioc->stripes[index_srcdev].length; 6271 tgtdev_stripe->dev = dev_replace->tgtdev; 6272 bioc->tgtdev_map[index_srcdev] = num_stripes; 6273 6274 tgtdev_indexes++; 6275 num_stripes++; 6276 } 6277 } 6278 6279 *num_stripes_ret = num_stripes; 6280 *max_errors_ret = max_errors; 6281 bioc->num_tgtdevs = tgtdev_indexes; 6282 *bioc_ret = bioc; 6283 } 6284 6285 static bool need_full_stripe(enum btrfs_map_op op) 6286 { 6287 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6288 } 6289 6290 /* 6291 * Calculate the geometry of a particular (address, len) tuple. This 6292 * information is used to calculate how big a particular bio can get before it 6293 * straddles a stripe. 6294 * 6295 * @fs_info: the filesystem 6296 * @em: mapping containing the logical extent 6297 * @op: type of operation - write or read 6298 * @logical: address that we want to figure out the geometry of 6299 * @io_geom: pointer used to return values 6300 * 6301 * Returns < 0 in case a chunk for the given logical address cannot be found, 6302 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6303 */ 6304 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6305 enum btrfs_map_op op, u64 logical, 6306 struct btrfs_io_geometry *io_geom) 6307 { 6308 struct map_lookup *map; 6309 u64 len; 6310 u64 offset; 6311 u64 stripe_offset; 6312 u64 stripe_nr; 6313 u64 stripe_len; 6314 u64 raid56_full_stripe_start = (u64)-1; 6315 int data_stripes; 6316 6317 ASSERT(op != BTRFS_MAP_DISCARD); 6318 6319 map = em->map_lookup; 6320 /* Offset of this logical address in the chunk */ 6321 offset = logical - em->start; 6322 /* Len of a stripe in a chunk */ 6323 stripe_len = map->stripe_len; 6324 /* Stripe where this block falls in */ 6325 stripe_nr = div64_u64(offset, stripe_len); 6326 /* Offset of stripe in the chunk */ 6327 stripe_offset = stripe_nr * stripe_len; 6328 if (offset < stripe_offset) { 6329 btrfs_crit(fs_info, 6330 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6331 stripe_offset, offset, em->start, logical, stripe_len); 6332 return -EINVAL; 6333 } 6334 6335 /* stripe_offset is the offset of this block in its stripe */ 6336 stripe_offset = offset - stripe_offset; 6337 data_stripes = nr_data_stripes(map); 6338 6339 /* Only stripe based profiles needs to check against stripe length. */ 6340 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6341 u64 max_len = stripe_len - stripe_offset; 6342 6343 /* 6344 * In case of raid56, we need to know the stripe aligned start 6345 */ 6346 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6347 unsigned long full_stripe_len = stripe_len * data_stripes; 6348 raid56_full_stripe_start = offset; 6349 6350 /* 6351 * Allow a write of a full stripe, but make sure we 6352 * don't allow straddling of stripes 6353 */ 6354 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6355 full_stripe_len); 6356 raid56_full_stripe_start *= full_stripe_len; 6357 6358 /* 6359 * For writes to RAID[56], allow a full stripeset across 6360 * all disks. For other RAID types and for RAID[56] 6361 * reads, just allow a single stripe (on a single disk). 6362 */ 6363 if (op == BTRFS_MAP_WRITE) { 6364 max_len = stripe_len * data_stripes - 6365 (offset - raid56_full_stripe_start); 6366 } 6367 } 6368 len = min_t(u64, em->len - offset, max_len); 6369 } else { 6370 len = em->len - offset; 6371 } 6372 6373 io_geom->len = len; 6374 io_geom->offset = offset; 6375 io_geom->stripe_len = stripe_len; 6376 io_geom->stripe_nr = stripe_nr; 6377 io_geom->stripe_offset = stripe_offset; 6378 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6379 6380 return 0; 6381 } 6382 6383 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6384 enum btrfs_map_op op, 6385 u64 logical, u64 *length, 6386 struct btrfs_io_context **bioc_ret, 6387 int mirror_num, int need_raid_map) 6388 { 6389 struct extent_map *em; 6390 struct map_lookup *map; 6391 u64 stripe_offset; 6392 u64 stripe_nr; 6393 u64 stripe_len; 6394 u32 stripe_index; 6395 int data_stripes; 6396 int i; 6397 int ret = 0; 6398 int num_stripes; 6399 int max_errors = 0; 6400 int tgtdev_indexes = 0; 6401 struct btrfs_io_context *bioc = NULL; 6402 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6403 int dev_replace_is_ongoing = 0; 6404 int num_alloc_stripes; 6405 int patch_the_first_stripe_for_dev_replace = 0; 6406 u64 physical_to_patch_in_first_stripe = 0; 6407 u64 raid56_full_stripe_start = (u64)-1; 6408 struct btrfs_io_geometry geom; 6409 6410 ASSERT(bioc_ret); 6411 ASSERT(op != BTRFS_MAP_DISCARD); 6412 6413 em = btrfs_get_chunk_map(fs_info, logical, *length); 6414 ASSERT(!IS_ERR(em)); 6415 6416 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6417 if (ret < 0) 6418 return ret; 6419 6420 map = em->map_lookup; 6421 6422 *length = geom.len; 6423 stripe_len = geom.stripe_len; 6424 stripe_nr = geom.stripe_nr; 6425 stripe_offset = geom.stripe_offset; 6426 raid56_full_stripe_start = geom.raid56_stripe_offset; 6427 data_stripes = nr_data_stripes(map); 6428 6429 down_read(&dev_replace->rwsem); 6430 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6431 /* 6432 * Hold the semaphore for read during the whole operation, write is 6433 * requested at commit time but must wait. 6434 */ 6435 if (!dev_replace_is_ongoing) 6436 up_read(&dev_replace->rwsem); 6437 6438 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6439 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6440 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6441 dev_replace->srcdev->devid, 6442 &mirror_num, 6443 &physical_to_patch_in_first_stripe); 6444 if (ret) 6445 goto out; 6446 else 6447 patch_the_first_stripe_for_dev_replace = 1; 6448 } else if (mirror_num > map->num_stripes) { 6449 mirror_num = 0; 6450 } 6451 6452 num_stripes = 1; 6453 stripe_index = 0; 6454 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6455 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6456 &stripe_index); 6457 if (!need_full_stripe(op)) 6458 mirror_num = 1; 6459 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6460 if (need_full_stripe(op)) 6461 num_stripes = map->num_stripes; 6462 else if (mirror_num) 6463 stripe_index = mirror_num - 1; 6464 else { 6465 stripe_index = find_live_mirror(fs_info, map, 0, 6466 dev_replace_is_ongoing); 6467 mirror_num = stripe_index + 1; 6468 } 6469 6470 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6471 if (need_full_stripe(op)) { 6472 num_stripes = map->num_stripes; 6473 } else if (mirror_num) { 6474 stripe_index = mirror_num - 1; 6475 } else { 6476 mirror_num = 1; 6477 } 6478 6479 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6480 u32 factor = map->num_stripes / map->sub_stripes; 6481 6482 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6483 stripe_index *= map->sub_stripes; 6484 6485 if (need_full_stripe(op)) 6486 num_stripes = map->sub_stripes; 6487 else if (mirror_num) 6488 stripe_index += mirror_num - 1; 6489 else { 6490 int old_stripe_index = stripe_index; 6491 stripe_index = find_live_mirror(fs_info, map, 6492 stripe_index, 6493 dev_replace_is_ongoing); 6494 mirror_num = stripe_index - old_stripe_index + 1; 6495 } 6496 6497 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6498 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6499 /* push stripe_nr back to the start of the full stripe */ 6500 stripe_nr = div64_u64(raid56_full_stripe_start, 6501 stripe_len * data_stripes); 6502 6503 /* RAID[56] write or recovery. Return all stripes */ 6504 num_stripes = map->num_stripes; 6505 max_errors = nr_parity_stripes(map); 6506 6507 *length = map->stripe_len; 6508 stripe_index = 0; 6509 stripe_offset = 0; 6510 } else { 6511 /* 6512 * Mirror #0 or #1 means the original data block. 6513 * Mirror #2 is RAID5 parity block. 6514 * Mirror #3 is RAID6 Q block. 6515 */ 6516 stripe_nr = div_u64_rem(stripe_nr, 6517 data_stripes, &stripe_index); 6518 if (mirror_num > 1) 6519 stripe_index = data_stripes + mirror_num - 2; 6520 6521 /* We distribute the parity blocks across stripes */ 6522 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6523 &stripe_index); 6524 if (!need_full_stripe(op) && mirror_num <= 1) 6525 mirror_num = 1; 6526 } 6527 } else { 6528 /* 6529 * after this, stripe_nr is the number of stripes on this 6530 * device we have to walk to find the data, and stripe_index is 6531 * the number of our device in the stripe array 6532 */ 6533 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6534 &stripe_index); 6535 mirror_num = stripe_index + 1; 6536 } 6537 if (stripe_index >= map->num_stripes) { 6538 btrfs_crit(fs_info, 6539 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6540 stripe_index, map->num_stripes); 6541 ret = -EINVAL; 6542 goto out; 6543 } 6544 6545 num_alloc_stripes = num_stripes; 6546 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6547 if (op == BTRFS_MAP_WRITE) 6548 num_alloc_stripes <<= 1; 6549 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6550 num_alloc_stripes++; 6551 tgtdev_indexes = num_stripes; 6552 } 6553 6554 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6555 if (!bioc) { 6556 ret = -ENOMEM; 6557 goto out; 6558 } 6559 6560 for (i = 0; i < num_stripes; i++) { 6561 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6562 stripe_offset + stripe_nr * map->stripe_len; 6563 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6564 stripe_index++; 6565 } 6566 6567 /* Build raid_map */ 6568 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6569 (need_full_stripe(op) || mirror_num > 1)) { 6570 u64 tmp; 6571 unsigned rot; 6572 6573 /* Work out the disk rotation on this stripe-set */ 6574 div_u64_rem(stripe_nr, num_stripes, &rot); 6575 6576 /* Fill in the logical address of each stripe */ 6577 tmp = stripe_nr * data_stripes; 6578 for (i = 0; i < data_stripes; i++) 6579 bioc->raid_map[(i + rot) % num_stripes] = 6580 em->start + (tmp + i) * map->stripe_len; 6581 6582 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6583 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6584 bioc->raid_map[(i + rot + 1) % num_stripes] = 6585 RAID6_Q_STRIPE; 6586 6587 sort_parity_stripes(bioc, num_stripes); 6588 } 6589 6590 if (need_full_stripe(op)) 6591 max_errors = btrfs_chunk_max_errors(map); 6592 6593 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6594 need_full_stripe(op)) { 6595 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6596 &num_stripes, &max_errors); 6597 } 6598 6599 *bioc_ret = bioc; 6600 bioc->map_type = map->type; 6601 bioc->num_stripes = num_stripes; 6602 bioc->max_errors = max_errors; 6603 bioc->mirror_num = mirror_num; 6604 6605 /* 6606 * this is the case that REQ_READ && dev_replace_is_ongoing && 6607 * mirror_num == num_stripes + 1 && dev_replace target drive is 6608 * available as a mirror 6609 */ 6610 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6611 WARN_ON(num_stripes > 1); 6612 bioc->stripes[0].dev = dev_replace->tgtdev; 6613 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6614 bioc->mirror_num = map->num_stripes + 1; 6615 } 6616 out: 6617 if (dev_replace_is_ongoing) { 6618 lockdep_assert_held(&dev_replace->rwsem); 6619 /* Unlock and let waiting writers proceed */ 6620 up_read(&dev_replace->rwsem); 6621 } 6622 free_extent_map(em); 6623 return ret; 6624 } 6625 6626 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6627 u64 logical, u64 *length, 6628 struct btrfs_io_context **bioc_ret, int mirror_num) 6629 { 6630 if (op == BTRFS_MAP_DISCARD) 6631 return __btrfs_map_block_for_discard(fs_info, logical, 6632 length, bioc_ret); 6633 6634 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6635 mirror_num, 0); 6636 } 6637 6638 /* For Scrub/replace */ 6639 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6640 u64 logical, u64 *length, 6641 struct btrfs_io_context **bioc_ret) 6642 { 6643 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6644 } 6645 6646 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6647 { 6648 bio->bi_private = bioc->private; 6649 bio->bi_end_io = bioc->end_io; 6650 bio_endio(bio); 6651 6652 btrfs_put_bioc(bioc); 6653 } 6654 6655 static void btrfs_end_bio(struct bio *bio) 6656 { 6657 struct btrfs_io_context *bioc = bio->bi_private; 6658 int is_orig_bio = 0; 6659 6660 if (bio->bi_status) { 6661 atomic_inc(&bioc->error); 6662 if (bio->bi_status == BLK_STS_IOERR || 6663 bio->bi_status == BLK_STS_TARGET) { 6664 struct btrfs_device *dev = btrfs_bio(bio)->device; 6665 6666 ASSERT(dev->bdev); 6667 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6668 btrfs_dev_stat_inc_and_print(dev, 6669 BTRFS_DEV_STAT_WRITE_ERRS); 6670 else if (!(bio->bi_opf & REQ_RAHEAD)) 6671 btrfs_dev_stat_inc_and_print(dev, 6672 BTRFS_DEV_STAT_READ_ERRS); 6673 if (bio->bi_opf & REQ_PREFLUSH) 6674 btrfs_dev_stat_inc_and_print(dev, 6675 BTRFS_DEV_STAT_FLUSH_ERRS); 6676 } 6677 } 6678 6679 if (bio == bioc->orig_bio) 6680 is_orig_bio = 1; 6681 6682 btrfs_bio_counter_dec(bioc->fs_info); 6683 6684 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6685 if (!is_orig_bio) { 6686 bio_put(bio); 6687 bio = bioc->orig_bio; 6688 } 6689 6690 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6691 /* only send an error to the higher layers if it is 6692 * beyond the tolerance of the btrfs bio 6693 */ 6694 if (atomic_read(&bioc->error) > bioc->max_errors) { 6695 bio->bi_status = BLK_STS_IOERR; 6696 } else { 6697 /* 6698 * this bio is actually up to date, we didn't 6699 * go over the max number of errors 6700 */ 6701 bio->bi_status = BLK_STS_OK; 6702 } 6703 6704 btrfs_end_bioc(bioc, bio); 6705 } else if (!is_orig_bio) { 6706 bio_put(bio); 6707 } 6708 } 6709 6710 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6711 u64 physical, struct btrfs_device *dev) 6712 { 6713 struct btrfs_fs_info *fs_info = bioc->fs_info; 6714 6715 bio->bi_private = bioc; 6716 btrfs_bio(bio)->device = dev; 6717 bio->bi_end_io = btrfs_end_bio; 6718 bio->bi_iter.bi_sector = physical >> 9; 6719 /* 6720 * For zone append writing, bi_sector must point the beginning of the 6721 * zone 6722 */ 6723 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6724 if (btrfs_dev_is_sequential(dev, physical)) { 6725 u64 zone_start = round_down(physical, fs_info->zone_size); 6726 6727 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6728 } else { 6729 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6730 bio->bi_opf |= REQ_OP_WRITE; 6731 } 6732 } 6733 btrfs_debug_in_rcu(fs_info, 6734 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6735 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6736 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6737 dev->devid, bio->bi_iter.bi_size); 6738 bio_set_dev(bio, dev->bdev); 6739 6740 btrfs_bio_counter_inc_noblocked(fs_info); 6741 6742 btrfsic_submit_bio(bio); 6743 } 6744 6745 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6746 { 6747 atomic_inc(&bioc->error); 6748 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6749 /* Should be the original bio. */ 6750 WARN_ON(bio != bioc->orig_bio); 6751 6752 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6753 bio->bi_iter.bi_sector = logical >> 9; 6754 if (atomic_read(&bioc->error) > bioc->max_errors) 6755 bio->bi_status = BLK_STS_IOERR; 6756 else 6757 bio->bi_status = BLK_STS_OK; 6758 btrfs_end_bioc(bioc, bio); 6759 } 6760 } 6761 6762 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6763 int mirror_num) 6764 { 6765 struct btrfs_device *dev; 6766 struct bio *first_bio = bio; 6767 u64 logical = bio->bi_iter.bi_sector << 9; 6768 u64 length = 0; 6769 u64 map_length; 6770 int ret; 6771 int dev_nr; 6772 int total_devs; 6773 struct btrfs_io_context *bioc = NULL; 6774 6775 length = bio->bi_iter.bi_size; 6776 map_length = length; 6777 6778 btrfs_bio_counter_inc_blocked(fs_info); 6779 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6780 &map_length, &bioc, mirror_num, 1); 6781 if (ret) { 6782 btrfs_bio_counter_dec(fs_info); 6783 return errno_to_blk_status(ret); 6784 } 6785 6786 total_devs = bioc->num_stripes; 6787 bioc->orig_bio = first_bio; 6788 bioc->private = first_bio->bi_private; 6789 bioc->end_io = first_bio->bi_end_io; 6790 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6791 6792 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6793 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6794 /* In this case, map_length has been set to the length of 6795 a single stripe; not the whole write */ 6796 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6797 ret = raid56_parity_write(bio, bioc, map_length); 6798 } else { 6799 ret = raid56_parity_recover(bio, bioc, map_length, 6800 mirror_num, 1); 6801 } 6802 6803 btrfs_bio_counter_dec(fs_info); 6804 return errno_to_blk_status(ret); 6805 } 6806 6807 if (map_length < length) { 6808 btrfs_crit(fs_info, 6809 "mapping failed logical %llu bio len %llu len %llu", 6810 logical, length, map_length); 6811 BUG(); 6812 } 6813 6814 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6815 dev = bioc->stripes[dev_nr].dev; 6816 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6817 &dev->dev_state) || 6818 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6819 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6820 bioc_error(bioc, first_bio, logical); 6821 continue; 6822 } 6823 6824 if (dev_nr < total_devs - 1) 6825 bio = btrfs_bio_clone(first_bio); 6826 else 6827 bio = first_bio; 6828 6829 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6830 } 6831 btrfs_bio_counter_dec(fs_info); 6832 return BLK_STS_OK; 6833 } 6834 6835 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6836 const struct btrfs_fs_devices *fs_devices) 6837 { 6838 if (args->fsid == NULL) 6839 return true; 6840 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6841 return true; 6842 return false; 6843 } 6844 6845 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6846 const struct btrfs_device *device) 6847 { 6848 ASSERT((args->devid != (u64)-1) || args->missing); 6849 6850 if ((args->devid != (u64)-1) && device->devid != args->devid) 6851 return false; 6852 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6853 return false; 6854 if (!args->missing) 6855 return true; 6856 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6857 !device->bdev) 6858 return true; 6859 return false; 6860 } 6861 6862 /* 6863 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6864 * return NULL. 6865 * 6866 * If devid and uuid are both specified, the match must be exact, otherwise 6867 * only devid is used. 6868 */ 6869 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6870 const struct btrfs_dev_lookup_args *args) 6871 { 6872 struct btrfs_device *device; 6873 struct btrfs_fs_devices *seed_devs; 6874 6875 if (dev_args_match_fs_devices(args, fs_devices)) { 6876 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6877 if (dev_args_match_device(args, device)) 6878 return device; 6879 } 6880 } 6881 6882 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6883 if (!dev_args_match_fs_devices(args, seed_devs)) 6884 continue; 6885 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6886 if (dev_args_match_device(args, device)) 6887 return device; 6888 } 6889 } 6890 6891 return NULL; 6892 } 6893 6894 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6895 u64 devid, u8 *dev_uuid) 6896 { 6897 struct btrfs_device *device; 6898 unsigned int nofs_flag; 6899 6900 /* 6901 * We call this under the chunk_mutex, so we want to use NOFS for this 6902 * allocation, however we don't want to change btrfs_alloc_device() to 6903 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6904 * places. 6905 */ 6906 nofs_flag = memalloc_nofs_save(); 6907 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6908 memalloc_nofs_restore(nofs_flag); 6909 if (IS_ERR(device)) 6910 return device; 6911 6912 list_add(&device->dev_list, &fs_devices->devices); 6913 device->fs_devices = fs_devices; 6914 fs_devices->num_devices++; 6915 6916 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6917 fs_devices->missing_devices++; 6918 6919 return device; 6920 } 6921 6922 /** 6923 * btrfs_alloc_device - allocate struct btrfs_device 6924 * @fs_info: used only for generating a new devid, can be NULL if 6925 * devid is provided (i.e. @devid != NULL). 6926 * @devid: a pointer to devid for this device. If NULL a new devid 6927 * is generated. 6928 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6929 * is generated. 6930 * 6931 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6932 * on error. Returned struct is not linked onto any lists and must be 6933 * destroyed with btrfs_free_device. 6934 */ 6935 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6936 const u64 *devid, 6937 const u8 *uuid) 6938 { 6939 struct btrfs_device *dev; 6940 u64 tmp; 6941 6942 if (WARN_ON(!devid && !fs_info)) 6943 return ERR_PTR(-EINVAL); 6944 6945 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6946 if (!dev) 6947 return ERR_PTR(-ENOMEM); 6948 6949 /* 6950 * Preallocate a bio that's always going to be used for flushing device 6951 * barriers and matches the device lifespan 6952 */ 6953 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6954 if (!dev->flush_bio) { 6955 kfree(dev); 6956 return ERR_PTR(-ENOMEM); 6957 } 6958 6959 INIT_LIST_HEAD(&dev->dev_list); 6960 INIT_LIST_HEAD(&dev->dev_alloc_list); 6961 INIT_LIST_HEAD(&dev->post_commit_list); 6962 6963 atomic_set(&dev->dev_stats_ccnt, 0); 6964 btrfs_device_data_ordered_init(dev); 6965 extent_io_tree_init(fs_info, &dev->alloc_state, 6966 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6967 6968 if (devid) 6969 tmp = *devid; 6970 else { 6971 int ret; 6972 6973 ret = find_next_devid(fs_info, &tmp); 6974 if (ret) { 6975 btrfs_free_device(dev); 6976 return ERR_PTR(ret); 6977 } 6978 } 6979 dev->devid = tmp; 6980 6981 if (uuid) 6982 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6983 else 6984 generate_random_uuid(dev->uuid); 6985 6986 return dev; 6987 } 6988 6989 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6990 u64 devid, u8 *uuid, bool error) 6991 { 6992 if (error) 6993 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6994 devid, uuid); 6995 else 6996 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6997 devid, uuid); 6998 } 6999 7000 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 7001 { 7002 const int data_stripes = calc_data_stripes(type, num_stripes); 7003 7004 return div_u64(chunk_len, data_stripes); 7005 } 7006 7007 #if BITS_PER_LONG == 32 7008 /* 7009 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7010 * can't be accessed on 32bit systems. 7011 * 7012 * This function do mount time check to reject the fs if it already has 7013 * metadata chunk beyond that limit. 7014 */ 7015 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7016 u64 logical, u64 length, u64 type) 7017 { 7018 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7019 return 0; 7020 7021 if (logical + length < MAX_LFS_FILESIZE) 7022 return 0; 7023 7024 btrfs_err_32bit_limit(fs_info); 7025 return -EOVERFLOW; 7026 } 7027 7028 /* 7029 * This is to give early warning for any metadata chunk reaching 7030 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7031 * Although we can still access the metadata, it's not going to be possible 7032 * once the limit is reached. 7033 */ 7034 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7035 u64 logical, u64 length, u64 type) 7036 { 7037 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7038 return; 7039 7040 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7041 return; 7042 7043 btrfs_warn_32bit_limit(fs_info); 7044 } 7045 #endif 7046 7047 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 7048 u64 devid, u8 *uuid) 7049 { 7050 struct btrfs_device *dev; 7051 7052 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7053 btrfs_report_missing_device(fs_info, devid, uuid, true); 7054 return ERR_PTR(-ENOENT); 7055 } 7056 7057 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7058 if (IS_ERR(dev)) { 7059 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7060 devid, PTR_ERR(dev)); 7061 return dev; 7062 } 7063 btrfs_report_missing_device(fs_info, devid, uuid, false); 7064 7065 return dev; 7066 } 7067 7068 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7069 struct btrfs_chunk *chunk) 7070 { 7071 BTRFS_DEV_LOOKUP_ARGS(args); 7072 struct btrfs_fs_info *fs_info = leaf->fs_info; 7073 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7074 struct map_lookup *map; 7075 struct extent_map *em; 7076 u64 logical; 7077 u64 length; 7078 u64 devid; 7079 u64 type; 7080 u8 uuid[BTRFS_UUID_SIZE]; 7081 int num_stripes; 7082 int ret; 7083 int i; 7084 7085 logical = key->offset; 7086 length = btrfs_chunk_length(leaf, chunk); 7087 type = btrfs_chunk_type(leaf, chunk); 7088 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7089 7090 #if BITS_PER_LONG == 32 7091 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7092 if (ret < 0) 7093 return ret; 7094 warn_32bit_meta_chunk(fs_info, logical, length, type); 7095 #endif 7096 7097 /* 7098 * Only need to verify chunk item if we're reading from sys chunk array, 7099 * as chunk item in tree block is already verified by tree-checker. 7100 */ 7101 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7102 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7103 if (ret) 7104 return ret; 7105 } 7106 7107 read_lock(&map_tree->lock); 7108 em = lookup_extent_mapping(map_tree, logical, 1); 7109 read_unlock(&map_tree->lock); 7110 7111 /* already mapped? */ 7112 if (em && em->start <= logical && em->start + em->len > logical) { 7113 free_extent_map(em); 7114 return 0; 7115 } else if (em) { 7116 free_extent_map(em); 7117 } 7118 7119 em = alloc_extent_map(); 7120 if (!em) 7121 return -ENOMEM; 7122 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7123 if (!map) { 7124 free_extent_map(em); 7125 return -ENOMEM; 7126 } 7127 7128 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7129 em->map_lookup = map; 7130 em->start = logical; 7131 em->len = length; 7132 em->orig_start = 0; 7133 em->block_start = 0; 7134 em->block_len = em->len; 7135 7136 map->num_stripes = num_stripes; 7137 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7138 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7139 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7140 map->type = type; 7141 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7142 map->verified_stripes = 0; 7143 em->orig_block_len = calc_stripe_length(type, em->len, 7144 map->num_stripes); 7145 for (i = 0; i < num_stripes; i++) { 7146 map->stripes[i].physical = 7147 btrfs_stripe_offset_nr(leaf, chunk, i); 7148 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7149 args.devid = devid; 7150 read_extent_buffer(leaf, uuid, (unsigned long) 7151 btrfs_stripe_dev_uuid_nr(chunk, i), 7152 BTRFS_UUID_SIZE); 7153 args.uuid = uuid; 7154 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7155 if (!map->stripes[i].dev) { 7156 map->stripes[i].dev = handle_missing_device(fs_info, 7157 devid, uuid); 7158 if (IS_ERR(map->stripes[i].dev)) { 7159 free_extent_map(em); 7160 return PTR_ERR(map->stripes[i].dev); 7161 } 7162 } 7163 7164 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7165 &(map->stripes[i].dev->dev_state)); 7166 } 7167 7168 write_lock(&map_tree->lock); 7169 ret = add_extent_mapping(map_tree, em, 0); 7170 write_unlock(&map_tree->lock); 7171 if (ret < 0) { 7172 btrfs_err(fs_info, 7173 "failed to add chunk map, start=%llu len=%llu: %d", 7174 em->start, em->len, ret); 7175 } 7176 free_extent_map(em); 7177 7178 return ret; 7179 } 7180 7181 static void fill_device_from_item(struct extent_buffer *leaf, 7182 struct btrfs_dev_item *dev_item, 7183 struct btrfs_device *device) 7184 { 7185 unsigned long ptr; 7186 7187 device->devid = btrfs_device_id(leaf, dev_item); 7188 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7189 device->total_bytes = device->disk_total_bytes; 7190 device->commit_total_bytes = device->disk_total_bytes; 7191 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7192 device->commit_bytes_used = device->bytes_used; 7193 device->type = btrfs_device_type(leaf, dev_item); 7194 device->io_align = btrfs_device_io_align(leaf, dev_item); 7195 device->io_width = btrfs_device_io_width(leaf, dev_item); 7196 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7197 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7198 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7199 7200 ptr = btrfs_device_uuid(dev_item); 7201 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7202 } 7203 7204 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7205 u8 *fsid) 7206 { 7207 struct btrfs_fs_devices *fs_devices; 7208 int ret; 7209 7210 lockdep_assert_held(&uuid_mutex); 7211 ASSERT(fsid); 7212 7213 /* This will match only for multi-device seed fs */ 7214 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7215 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7216 return fs_devices; 7217 7218 7219 fs_devices = find_fsid(fsid, NULL); 7220 if (!fs_devices) { 7221 if (!btrfs_test_opt(fs_info, DEGRADED)) 7222 return ERR_PTR(-ENOENT); 7223 7224 fs_devices = alloc_fs_devices(fsid, NULL); 7225 if (IS_ERR(fs_devices)) 7226 return fs_devices; 7227 7228 fs_devices->seeding = true; 7229 fs_devices->opened = 1; 7230 return fs_devices; 7231 } 7232 7233 /* 7234 * Upon first call for a seed fs fsid, just create a private copy of the 7235 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7236 */ 7237 fs_devices = clone_fs_devices(fs_devices); 7238 if (IS_ERR(fs_devices)) 7239 return fs_devices; 7240 7241 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7242 if (ret) { 7243 free_fs_devices(fs_devices); 7244 return ERR_PTR(ret); 7245 } 7246 7247 if (!fs_devices->seeding) { 7248 close_fs_devices(fs_devices); 7249 free_fs_devices(fs_devices); 7250 return ERR_PTR(-EINVAL); 7251 } 7252 7253 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7254 7255 return fs_devices; 7256 } 7257 7258 static int read_one_dev(struct extent_buffer *leaf, 7259 struct btrfs_dev_item *dev_item) 7260 { 7261 BTRFS_DEV_LOOKUP_ARGS(args); 7262 struct btrfs_fs_info *fs_info = leaf->fs_info; 7263 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7264 struct btrfs_device *device; 7265 u64 devid; 7266 int ret; 7267 u8 fs_uuid[BTRFS_FSID_SIZE]; 7268 u8 dev_uuid[BTRFS_UUID_SIZE]; 7269 7270 devid = args.devid = btrfs_device_id(leaf, dev_item); 7271 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7272 BTRFS_UUID_SIZE); 7273 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7274 BTRFS_FSID_SIZE); 7275 args.uuid = dev_uuid; 7276 args.fsid = fs_uuid; 7277 7278 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7279 fs_devices = open_seed_devices(fs_info, fs_uuid); 7280 if (IS_ERR(fs_devices)) 7281 return PTR_ERR(fs_devices); 7282 } 7283 7284 device = btrfs_find_device(fs_info->fs_devices, &args); 7285 if (!device) { 7286 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7287 btrfs_report_missing_device(fs_info, devid, 7288 dev_uuid, true); 7289 return -ENOENT; 7290 } 7291 7292 device = add_missing_dev(fs_devices, devid, dev_uuid); 7293 if (IS_ERR(device)) { 7294 btrfs_err(fs_info, 7295 "failed to add missing dev %llu: %ld", 7296 devid, PTR_ERR(device)); 7297 return PTR_ERR(device); 7298 } 7299 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7300 } else { 7301 if (!device->bdev) { 7302 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7303 btrfs_report_missing_device(fs_info, 7304 devid, dev_uuid, true); 7305 return -ENOENT; 7306 } 7307 btrfs_report_missing_device(fs_info, devid, 7308 dev_uuid, false); 7309 } 7310 7311 if (!device->bdev && 7312 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7313 /* 7314 * this happens when a device that was properly setup 7315 * in the device info lists suddenly goes bad. 7316 * device->bdev is NULL, and so we have to set 7317 * device->missing to one here 7318 */ 7319 device->fs_devices->missing_devices++; 7320 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7321 } 7322 7323 /* Move the device to its own fs_devices */ 7324 if (device->fs_devices != fs_devices) { 7325 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7326 &device->dev_state)); 7327 7328 list_move(&device->dev_list, &fs_devices->devices); 7329 device->fs_devices->num_devices--; 7330 fs_devices->num_devices++; 7331 7332 device->fs_devices->missing_devices--; 7333 fs_devices->missing_devices++; 7334 7335 device->fs_devices = fs_devices; 7336 } 7337 } 7338 7339 if (device->fs_devices != fs_info->fs_devices) { 7340 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7341 if (device->generation != 7342 btrfs_device_generation(leaf, dev_item)) 7343 return -EINVAL; 7344 } 7345 7346 fill_device_from_item(leaf, dev_item, device); 7347 if (device->bdev) { 7348 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7349 7350 if (device->total_bytes > max_total_bytes) { 7351 btrfs_err(fs_info, 7352 "device total_bytes should be at most %llu but found %llu", 7353 max_total_bytes, device->total_bytes); 7354 return -EINVAL; 7355 } 7356 } 7357 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7358 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7359 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7360 device->fs_devices->total_rw_bytes += device->total_bytes; 7361 atomic64_add(device->total_bytes - device->bytes_used, 7362 &fs_info->free_chunk_space); 7363 } 7364 ret = 0; 7365 return ret; 7366 } 7367 7368 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7369 { 7370 struct btrfs_root *root = fs_info->tree_root; 7371 struct btrfs_super_block *super_copy = fs_info->super_copy; 7372 struct extent_buffer *sb; 7373 struct btrfs_disk_key *disk_key; 7374 struct btrfs_chunk *chunk; 7375 u8 *array_ptr; 7376 unsigned long sb_array_offset; 7377 int ret = 0; 7378 u32 num_stripes; 7379 u32 array_size; 7380 u32 len = 0; 7381 u32 cur_offset; 7382 u64 type; 7383 struct btrfs_key key; 7384 7385 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7386 /* 7387 * This will create extent buffer of nodesize, superblock size is 7388 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7389 * overallocate but we can keep it as-is, only the first page is used. 7390 */ 7391 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7392 root->root_key.objectid, 0); 7393 if (IS_ERR(sb)) 7394 return PTR_ERR(sb); 7395 set_extent_buffer_uptodate(sb); 7396 /* 7397 * The sb extent buffer is artificial and just used to read the system array. 7398 * set_extent_buffer_uptodate() call does not properly mark all it's 7399 * pages up-to-date when the page is larger: extent does not cover the 7400 * whole page and consequently check_page_uptodate does not find all 7401 * the page's extents up-to-date (the hole beyond sb), 7402 * write_extent_buffer then triggers a WARN_ON. 7403 * 7404 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7405 * but sb spans only this function. Add an explicit SetPageUptodate call 7406 * to silence the warning eg. on PowerPC 64. 7407 */ 7408 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7409 SetPageUptodate(sb->pages[0]); 7410 7411 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7412 array_size = btrfs_super_sys_array_size(super_copy); 7413 7414 array_ptr = super_copy->sys_chunk_array; 7415 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7416 cur_offset = 0; 7417 7418 while (cur_offset < array_size) { 7419 disk_key = (struct btrfs_disk_key *)array_ptr; 7420 len = sizeof(*disk_key); 7421 if (cur_offset + len > array_size) 7422 goto out_short_read; 7423 7424 btrfs_disk_key_to_cpu(&key, disk_key); 7425 7426 array_ptr += len; 7427 sb_array_offset += len; 7428 cur_offset += len; 7429 7430 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7431 btrfs_err(fs_info, 7432 "unexpected item type %u in sys_array at offset %u", 7433 (u32)key.type, cur_offset); 7434 ret = -EIO; 7435 break; 7436 } 7437 7438 chunk = (struct btrfs_chunk *)sb_array_offset; 7439 /* 7440 * At least one btrfs_chunk with one stripe must be present, 7441 * exact stripe count check comes afterwards 7442 */ 7443 len = btrfs_chunk_item_size(1); 7444 if (cur_offset + len > array_size) 7445 goto out_short_read; 7446 7447 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7448 if (!num_stripes) { 7449 btrfs_err(fs_info, 7450 "invalid number of stripes %u in sys_array at offset %u", 7451 num_stripes, cur_offset); 7452 ret = -EIO; 7453 break; 7454 } 7455 7456 type = btrfs_chunk_type(sb, chunk); 7457 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7458 btrfs_err(fs_info, 7459 "invalid chunk type %llu in sys_array at offset %u", 7460 type, cur_offset); 7461 ret = -EIO; 7462 break; 7463 } 7464 7465 len = btrfs_chunk_item_size(num_stripes); 7466 if (cur_offset + len > array_size) 7467 goto out_short_read; 7468 7469 ret = read_one_chunk(&key, sb, chunk); 7470 if (ret) 7471 break; 7472 7473 array_ptr += len; 7474 sb_array_offset += len; 7475 cur_offset += len; 7476 } 7477 clear_extent_buffer_uptodate(sb); 7478 free_extent_buffer_stale(sb); 7479 return ret; 7480 7481 out_short_read: 7482 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7483 len, cur_offset); 7484 clear_extent_buffer_uptodate(sb); 7485 free_extent_buffer_stale(sb); 7486 return -EIO; 7487 } 7488 7489 /* 7490 * Check if all chunks in the fs are OK for read-write degraded mount 7491 * 7492 * If the @failing_dev is specified, it's accounted as missing. 7493 * 7494 * Return true if all chunks meet the minimal RW mount requirements. 7495 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7496 */ 7497 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7498 struct btrfs_device *failing_dev) 7499 { 7500 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7501 struct extent_map *em; 7502 u64 next_start = 0; 7503 bool ret = true; 7504 7505 read_lock(&map_tree->lock); 7506 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7507 read_unlock(&map_tree->lock); 7508 /* No chunk at all? Return false anyway */ 7509 if (!em) { 7510 ret = false; 7511 goto out; 7512 } 7513 while (em) { 7514 struct map_lookup *map; 7515 int missing = 0; 7516 int max_tolerated; 7517 int i; 7518 7519 map = em->map_lookup; 7520 max_tolerated = 7521 btrfs_get_num_tolerated_disk_barrier_failures( 7522 map->type); 7523 for (i = 0; i < map->num_stripes; i++) { 7524 struct btrfs_device *dev = map->stripes[i].dev; 7525 7526 if (!dev || !dev->bdev || 7527 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7528 dev->last_flush_error) 7529 missing++; 7530 else if (failing_dev && failing_dev == dev) 7531 missing++; 7532 } 7533 if (missing > max_tolerated) { 7534 if (!failing_dev) 7535 btrfs_warn(fs_info, 7536 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7537 em->start, missing, max_tolerated); 7538 free_extent_map(em); 7539 ret = false; 7540 goto out; 7541 } 7542 next_start = extent_map_end(em); 7543 free_extent_map(em); 7544 7545 read_lock(&map_tree->lock); 7546 em = lookup_extent_mapping(map_tree, next_start, 7547 (u64)(-1) - next_start); 7548 read_unlock(&map_tree->lock); 7549 } 7550 out: 7551 return ret; 7552 } 7553 7554 static void readahead_tree_node_children(struct extent_buffer *node) 7555 { 7556 int i; 7557 const int nr_items = btrfs_header_nritems(node); 7558 7559 for (i = 0; i < nr_items; i++) 7560 btrfs_readahead_node_child(node, i); 7561 } 7562 7563 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7564 { 7565 struct btrfs_root *root = fs_info->chunk_root; 7566 struct btrfs_path *path; 7567 struct extent_buffer *leaf; 7568 struct btrfs_key key; 7569 struct btrfs_key found_key; 7570 int ret; 7571 int slot; 7572 u64 total_dev = 0; 7573 u64 last_ra_node = 0; 7574 7575 path = btrfs_alloc_path(); 7576 if (!path) 7577 return -ENOMEM; 7578 7579 /* 7580 * uuid_mutex is needed only if we are mounting a sprout FS 7581 * otherwise we don't need it. 7582 */ 7583 mutex_lock(&uuid_mutex); 7584 7585 /* 7586 * It is possible for mount and umount to race in such a way that 7587 * we execute this code path, but open_fs_devices failed to clear 7588 * total_rw_bytes. We certainly want it cleared before reading the 7589 * device items, so clear it here. 7590 */ 7591 fs_info->fs_devices->total_rw_bytes = 0; 7592 7593 /* 7594 * Lockdep complains about possible circular locking dependency between 7595 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7596 * used for freeze procection of a fs (struct super_block.s_writers), 7597 * which we take when starting a transaction, and extent buffers of the 7598 * chunk tree if we call read_one_dev() while holding a lock on an 7599 * extent buffer of the chunk tree. Since we are mounting the filesystem 7600 * and at this point there can't be any concurrent task modifying the 7601 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7602 */ 7603 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7604 path->skip_locking = 1; 7605 7606 /* 7607 * Read all device items, and then all the chunk items. All 7608 * device items are found before any chunk item (their object id 7609 * is smaller than the lowest possible object id for a chunk 7610 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7611 */ 7612 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7613 key.offset = 0; 7614 key.type = 0; 7615 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7616 if (ret < 0) 7617 goto error; 7618 while (1) { 7619 struct extent_buffer *node; 7620 7621 leaf = path->nodes[0]; 7622 slot = path->slots[0]; 7623 if (slot >= btrfs_header_nritems(leaf)) { 7624 ret = btrfs_next_leaf(root, path); 7625 if (ret == 0) 7626 continue; 7627 if (ret < 0) 7628 goto error; 7629 break; 7630 } 7631 node = path->nodes[1]; 7632 if (node) { 7633 if (last_ra_node != node->start) { 7634 readahead_tree_node_children(node); 7635 last_ra_node = node->start; 7636 } 7637 } 7638 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7639 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7640 struct btrfs_dev_item *dev_item; 7641 dev_item = btrfs_item_ptr(leaf, slot, 7642 struct btrfs_dev_item); 7643 ret = read_one_dev(leaf, dev_item); 7644 if (ret) 7645 goto error; 7646 total_dev++; 7647 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7648 struct btrfs_chunk *chunk; 7649 7650 /* 7651 * We are only called at mount time, so no need to take 7652 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7653 * we always lock first fs_info->chunk_mutex before 7654 * acquiring any locks on the chunk tree. This is a 7655 * requirement for chunk allocation, see the comment on 7656 * top of btrfs_chunk_alloc() for details. 7657 */ 7658 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7659 ret = read_one_chunk(&found_key, leaf, chunk); 7660 if (ret) 7661 goto error; 7662 } 7663 path->slots[0]++; 7664 } 7665 7666 /* 7667 * After loading chunk tree, we've got all device information, 7668 * do another round of validation checks. 7669 */ 7670 if (total_dev != fs_info->fs_devices->total_devices) { 7671 btrfs_err(fs_info, 7672 "super_num_devices %llu mismatch with num_devices %llu found here", 7673 btrfs_super_num_devices(fs_info->super_copy), 7674 total_dev); 7675 ret = -EINVAL; 7676 goto error; 7677 } 7678 if (btrfs_super_total_bytes(fs_info->super_copy) < 7679 fs_info->fs_devices->total_rw_bytes) { 7680 btrfs_err(fs_info, 7681 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7682 btrfs_super_total_bytes(fs_info->super_copy), 7683 fs_info->fs_devices->total_rw_bytes); 7684 ret = -EINVAL; 7685 goto error; 7686 } 7687 ret = 0; 7688 error: 7689 mutex_unlock(&uuid_mutex); 7690 7691 btrfs_free_path(path); 7692 return ret; 7693 } 7694 7695 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7696 { 7697 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7698 struct btrfs_device *device; 7699 7700 fs_devices->fs_info = fs_info; 7701 7702 mutex_lock(&fs_devices->device_list_mutex); 7703 list_for_each_entry(device, &fs_devices->devices, dev_list) 7704 device->fs_info = fs_info; 7705 7706 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7707 list_for_each_entry(device, &seed_devs->devices, dev_list) 7708 device->fs_info = fs_info; 7709 7710 seed_devs->fs_info = fs_info; 7711 } 7712 mutex_unlock(&fs_devices->device_list_mutex); 7713 } 7714 7715 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7716 const struct btrfs_dev_stats_item *ptr, 7717 int index) 7718 { 7719 u64 val; 7720 7721 read_extent_buffer(eb, &val, 7722 offsetof(struct btrfs_dev_stats_item, values) + 7723 ((unsigned long)ptr) + (index * sizeof(u64)), 7724 sizeof(val)); 7725 return val; 7726 } 7727 7728 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7729 struct btrfs_dev_stats_item *ptr, 7730 int index, u64 val) 7731 { 7732 write_extent_buffer(eb, &val, 7733 offsetof(struct btrfs_dev_stats_item, values) + 7734 ((unsigned long)ptr) + (index * sizeof(u64)), 7735 sizeof(val)); 7736 } 7737 7738 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7739 struct btrfs_path *path) 7740 { 7741 struct btrfs_dev_stats_item *ptr; 7742 struct extent_buffer *eb; 7743 struct btrfs_key key; 7744 int item_size; 7745 int i, ret, slot; 7746 7747 if (!device->fs_info->dev_root) 7748 return 0; 7749 7750 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7751 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7752 key.offset = device->devid; 7753 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7754 if (ret) { 7755 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7756 btrfs_dev_stat_set(device, i, 0); 7757 device->dev_stats_valid = 1; 7758 btrfs_release_path(path); 7759 return ret < 0 ? ret : 0; 7760 } 7761 slot = path->slots[0]; 7762 eb = path->nodes[0]; 7763 item_size = btrfs_item_size(eb, slot); 7764 7765 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7766 7767 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7768 if (item_size >= (1 + i) * sizeof(__le64)) 7769 btrfs_dev_stat_set(device, i, 7770 btrfs_dev_stats_value(eb, ptr, i)); 7771 else 7772 btrfs_dev_stat_set(device, i, 0); 7773 } 7774 7775 device->dev_stats_valid = 1; 7776 btrfs_dev_stat_print_on_load(device); 7777 btrfs_release_path(path); 7778 7779 return 0; 7780 } 7781 7782 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7783 { 7784 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7785 struct btrfs_device *device; 7786 struct btrfs_path *path = NULL; 7787 int ret = 0; 7788 7789 path = btrfs_alloc_path(); 7790 if (!path) 7791 return -ENOMEM; 7792 7793 mutex_lock(&fs_devices->device_list_mutex); 7794 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7795 ret = btrfs_device_init_dev_stats(device, path); 7796 if (ret) 7797 goto out; 7798 } 7799 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7800 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7801 ret = btrfs_device_init_dev_stats(device, path); 7802 if (ret) 7803 goto out; 7804 } 7805 } 7806 out: 7807 mutex_unlock(&fs_devices->device_list_mutex); 7808 7809 btrfs_free_path(path); 7810 return ret; 7811 } 7812 7813 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7814 struct btrfs_device *device) 7815 { 7816 struct btrfs_fs_info *fs_info = trans->fs_info; 7817 struct btrfs_root *dev_root = fs_info->dev_root; 7818 struct btrfs_path *path; 7819 struct btrfs_key key; 7820 struct extent_buffer *eb; 7821 struct btrfs_dev_stats_item *ptr; 7822 int ret; 7823 int i; 7824 7825 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7826 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7827 key.offset = device->devid; 7828 7829 path = btrfs_alloc_path(); 7830 if (!path) 7831 return -ENOMEM; 7832 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7833 if (ret < 0) { 7834 btrfs_warn_in_rcu(fs_info, 7835 "error %d while searching for dev_stats item for device %s", 7836 ret, rcu_str_deref(device->name)); 7837 goto out; 7838 } 7839 7840 if (ret == 0 && 7841 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7842 /* need to delete old one and insert a new one */ 7843 ret = btrfs_del_item(trans, dev_root, path); 7844 if (ret != 0) { 7845 btrfs_warn_in_rcu(fs_info, 7846 "delete too small dev_stats item for device %s failed %d", 7847 rcu_str_deref(device->name), ret); 7848 goto out; 7849 } 7850 ret = 1; 7851 } 7852 7853 if (ret == 1) { 7854 /* need to insert a new item */ 7855 btrfs_release_path(path); 7856 ret = btrfs_insert_empty_item(trans, dev_root, path, 7857 &key, sizeof(*ptr)); 7858 if (ret < 0) { 7859 btrfs_warn_in_rcu(fs_info, 7860 "insert dev_stats item for device %s failed %d", 7861 rcu_str_deref(device->name), ret); 7862 goto out; 7863 } 7864 } 7865 7866 eb = path->nodes[0]; 7867 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7868 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7869 btrfs_set_dev_stats_value(eb, ptr, i, 7870 btrfs_dev_stat_read(device, i)); 7871 btrfs_mark_buffer_dirty(eb); 7872 7873 out: 7874 btrfs_free_path(path); 7875 return ret; 7876 } 7877 7878 /* 7879 * called from commit_transaction. Writes all changed device stats to disk. 7880 */ 7881 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7882 { 7883 struct btrfs_fs_info *fs_info = trans->fs_info; 7884 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7885 struct btrfs_device *device; 7886 int stats_cnt; 7887 int ret = 0; 7888 7889 mutex_lock(&fs_devices->device_list_mutex); 7890 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7891 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7892 if (!device->dev_stats_valid || stats_cnt == 0) 7893 continue; 7894 7895 7896 /* 7897 * There is a LOAD-LOAD control dependency between the value of 7898 * dev_stats_ccnt and updating the on-disk values which requires 7899 * reading the in-memory counters. Such control dependencies 7900 * require explicit read memory barriers. 7901 * 7902 * This memory barriers pairs with smp_mb__before_atomic in 7903 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7904 * barrier implied by atomic_xchg in 7905 * btrfs_dev_stats_read_and_reset 7906 */ 7907 smp_rmb(); 7908 7909 ret = update_dev_stat_item(trans, device); 7910 if (!ret) 7911 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7912 } 7913 mutex_unlock(&fs_devices->device_list_mutex); 7914 7915 return ret; 7916 } 7917 7918 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7919 { 7920 btrfs_dev_stat_inc(dev, index); 7921 btrfs_dev_stat_print_on_error(dev); 7922 } 7923 7924 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7925 { 7926 if (!dev->dev_stats_valid) 7927 return; 7928 btrfs_err_rl_in_rcu(dev->fs_info, 7929 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7930 rcu_str_deref(dev->name), 7931 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7932 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7933 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7934 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7935 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7936 } 7937 7938 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7939 { 7940 int i; 7941 7942 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7943 if (btrfs_dev_stat_read(dev, i) != 0) 7944 break; 7945 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7946 return; /* all values == 0, suppress message */ 7947 7948 btrfs_info_in_rcu(dev->fs_info, 7949 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7950 rcu_str_deref(dev->name), 7951 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7952 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7953 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7954 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7955 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7956 } 7957 7958 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7959 struct btrfs_ioctl_get_dev_stats *stats) 7960 { 7961 BTRFS_DEV_LOOKUP_ARGS(args); 7962 struct btrfs_device *dev; 7963 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7964 int i; 7965 7966 mutex_lock(&fs_devices->device_list_mutex); 7967 args.devid = stats->devid; 7968 dev = btrfs_find_device(fs_info->fs_devices, &args); 7969 mutex_unlock(&fs_devices->device_list_mutex); 7970 7971 if (!dev) { 7972 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7973 return -ENODEV; 7974 } else if (!dev->dev_stats_valid) { 7975 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7976 return -ENODEV; 7977 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7978 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7979 if (stats->nr_items > i) 7980 stats->values[i] = 7981 btrfs_dev_stat_read_and_reset(dev, i); 7982 else 7983 btrfs_dev_stat_set(dev, i, 0); 7984 } 7985 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7986 current->comm, task_pid_nr(current)); 7987 } else { 7988 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7989 if (stats->nr_items > i) 7990 stats->values[i] = btrfs_dev_stat_read(dev, i); 7991 } 7992 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7993 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7994 return 0; 7995 } 7996 7997 /* 7998 * Update the size and bytes used for each device where it changed. This is 7999 * delayed since we would otherwise get errors while writing out the 8000 * superblocks. 8001 * 8002 * Must be invoked during transaction commit. 8003 */ 8004 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 8005 { 8006 struct btrfs_device *curr, *next; 8007 8008 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 8009 8010 if (list_empty(&trans->dev_update_list)) 8011 return; 8012 8013 /* 8014 * We don't need the device_list_mutex here. This list is owned by the 8015 * transaction and the transaction must complete before the device is 8016 * released. 8017 */ 8018 mutex_lock(&trans->fs_info->chunk_mutex); 8019 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 8020 post_commit_list) { 8021 list_del_init(&curr->post_commit_list); 8022 curr->commit_total_bytes = curr->disk_total_bytes; 8023 curr->commit_bytes_used = curr->bytes_used; 8024 } 8025 mutex_unlock(&trans->fs_info->chunk_mutex); 8026 } 8027 8028 /* 8029 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 8030 */ 8031 int btrfs_bg_type_to_factor(u64 flags) 8032 { 8033 const int index = btrfs_bg_flags_to_raid_index(flags); 8034 8035 return btrfs_raid_array[index].ncopies; 8036 } 8037 8038 8039 8040 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 8041 u64 chunk_offset, u64 devid, 8042 u64 physical_offset, u64 physical_len) 8043 { 8044 struct btrfs_dev_lookup_args args = { .devid = devid }; 8045 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8046 struct extent_map *em; 8047 struct map_lookup *map; 8048 struct btrfs_device *dev; 8049 u64 stripe_len; 8050 bool found = false; 8051 int ret = 0; 8052 int i; 8053 8054 read_lock(&em_tree->lock); 8055 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8056 read_unlock(&em_tree->lock); 8057 8058 if (!em) { 8059 btrfs_err(fs_info, 8060 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8061 physical_offset, devid); 8062 ret = -EUCLEAN; 8063 goto out; 8064 } 8065 8066 map = em->map_lookup; 8067 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8068 if (physical_len != stripe_len) { 8069 btrfs_err(fs_info, 8070 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8071 physical_offset, devid, em->start, physical_len, 8072 stripe_len); 8073 ret = -EUCLEAN; 8074 goto out; 8075 } 8076 8077 for (i = 0; i < map->num_stripes; i++) { 8078 if (map->stripes[i].dev->devid == devid && 8079 map->stripes[i].physical == physical_offset) { 8080 found = true; 8081 if (map->verified_stripes >= map->num_stripes) { 8082 btrfs_err(fs_info, 8083 "too many dev extents for chunk %llu found", 8084 em->start); 8085 ret = -EUCLEAN; 8086 goto out; 8087 } 8088 map->verified_stripes++; 8089 break; 8090 } 8091 } 8092 if (!found) { 8093 btrfs_err(fs_info, 8094 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8095 physical_offset, devid); 8096 ret = -EUCLEAN; 8097 } 8098 8099 /* Make sure no dev extent is beyond device boundary */ 8100 dev = btrfs_find_device(fs_info->fs_devices, &args); 8101 if (!dev) { 8102 btrfs_err(fs_info, "failed to find devid %llu", devid); 8103 ret = -EUCLEAN; 8104 goto out; 8105 } 8106 8107 if (physical_offset + physical_len > dev->disk_total_bytes) { 8108 btrfs_err(fs_info, 8109 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8110 devid, physical_offset, physical_len, 8111 dev->disk_total_bytes); 8112 ret = -EUCLEAN; 8113 goto out; 8114 } 8115 8116 if (dev->zone_info) { 8117 u64 zone_size = dev->zone_info->zone_size; 8118 8119 if (!IS_ALIGNED(physical_offset, zone_size) || 8120 !IS_ALIGNED(physical_len, zone_size)) { 8121 btrfs_err(fs_info, 8122 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8123 devid, physical_offset, physical_len); 8124 ret = -EUCLEAN; 8125 goto out; 8126 } 8127 } 8128 8129 out: 8130 free_extent_map(em); 8131 return ret; 8132 } 8133 8134 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8135 { 8136 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8137 struct extent_map *em; 8138 struct rb_node *node; 8139 int ret = 0; 8140 8141 read_lock(&em_tree->lock); 8142 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8143 em = rb_entry(node, struct extent_map, rb_node); 8144 if (em->map_lookup->num_stripes != 8145 em->map_lookup->verified_stripes) { 8146 btrfs_err(fs_info, 8147 "chunk %llu has missing dev extent, have %d expect %d", 8148 em->start, em->map_lookup->verified_stripes, 8149 em->map_lookup->num_stripes); 8150 ret = -EUCLEAN; 8151 goto out; 8152 } 8153 } 8154 out: 8155 read_unlock(&em_tree->lock); 8156 return ret; 8157 } 8158 8159 /* 8160 * Ensure that all dev extents are mapped to correct chunk, otherwise 8161 * later chunk allocation/free would cause unexpected behavior. 8162 * 8163 * NOTE: This will iterate through the whole device tree, which should be of 8164 * the same size level as the chunk tree. This slightly increases mount time. 8165 */ 8166 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8167 { 8168 struct btrfs_path *path; 8169 struct btrfs_root *root = fs_info->dev_root; 8170 struct btrfs_key key; 8171 u64 prev_devid = 0; 8172 u64 prev_dev_ext_end = 0; 8173 int ret = 0; 8174 8175 /* 8176 * We don't have a dev_root because we mounted with ignorebadroots and 8177 * failed to load the root, so we want to skip the verification in this 8178 * case for sure. 8179 * 8180 * However if the dev root is fine, but the tree itself is corrupted 8181 * we'd still fail to mount. This verification is only to make sure 8182 * writes can happen safely, so instead just bypass this check 8183 * completely in the case of IGNOREBADROOTS. 8184 */ 8185 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8186 return 0; 8187 8188 key.objectid = 1; 8189 key.type = BTRFS_DEV_EXTENT_KEY; 8190 key.offset = 0; 8191 8192 path = btrfs_alloc_path(); 8193 if (!path) 8194 return -ENOMEM; 8195 8196 path->reada = READA_FORWARD; 8197 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8198 if (ret < 0) 8199 goto out; 8200 8201 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8202 ret = btrfs_next_leaf(root, path); 8203 if (ret < 0) 8204 goto out; 8205 /* No dev extents at all? Not good */ 8206 if (ret > 0) { 8207 ret = -EUCLEAN; 8208 goto out; 8209 } 8210 } 8211 while (1) { 8212 struct extent_buffer *leaf = path->nodes[0]; 8213 struct btrfs_dev_extent *dext; 8214 int slot = path->slots[0]; 8215 u64 chunk_offset; 8216 u64 physical_offset; 8217 u64 physical_len; 8218 u64 devid; 8219 8220 btrfs_item_key_to_cpu(leaf, &key, slot); 8221 if (key.type != BTRFS_DEV_EXTENT_KEY) 8222 break; 8223 devid = key.objectid; 8224 physical_offset = key.offset; 8225 8226 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8227 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8228 physical_len = btrfs_dev_extent_length(leaf, dext); 8229 8230 /* Check if this dev extent overlaps with the previous one */ 8231 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8232 btrfs_err(fs_info, 8233 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8234 devid, physical_offset, prev_dev_ext_end); 8235 ret = -EUCLEAN; 8236 goto out; 8237 } 8238 8239 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8240 physical_offset, physical_len); 8241 if (ret < 0) 8242 goto out; 8243 prev_devid = devid; 8244 prev_dev_ext_end = physical_offset + physical_len; 8245 8246 ret = btrfs_next_item(root, path); 8247 if (ret < 0) 8248 goto out; 8249 if (ret > 0) { 8250 ret = 0; 8251 break; 8252 } 8253 } 8254 8255 /* Ensure all chunks have corresponding dev extents */ 8256 ret = verify_chunk_dev_extent_mapping(fs_info); 8257 out: 8258 btrfs_free_path(path); 8259 return ret; 8260 } 8261 8262 /* 8263 * Check whether the given block group or device is pinned by any inode being 8264 * used as a swapfile. 8265 */ 8266 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8267 { 8268 struct btrfs_swapfile_pin *sp; 8269 struct rb_node *node; 8270 8271 spin_lock(&fs_info->swapfile_pins_lock); 8272 node = fs_info->swapfile_pins.rb_node; 8273 while (node) { 8274 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8275 if (ptr < sp->ptr) 8276 node = node->rb_left; 8277 else if (ptr > sp->ptr) 8278 node = node->rb_right; 8279 else 8280 break; 8281 } 8282 spin_unlock(&fs_info->swapfile_pins_lock); 8283 return node != NULL; 8284 } 8285 8286 static int relocating_repair_kthread(void *data) 8287 { 8288 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8289 struct btrfs_fs_info *fs_info = cache->fs_info; 8290 u64 target; 8291 int ret = 0; 8292 8293 target = cache->start; 8294 btrfs_put_block_group(cache); 8295 8296 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8297 btrfs_info(fs_info, 8298 "zoned: skip relocating block group %llu to repair: EBUSY", 8299 target); 8300 return -EBUSY; 8301 } 8302 8303 mutex_lock(&fs_info->reclaim_bgs_lock); 8304 8305 /* Ensure block group still exists */ 8306 cache = btrfs_lookup_block_group(fs_info, target); 8307 if (!cache) 8308 goto out; 8309 8310 if (!cache->relocating_repair) 8311 goto out; 8312 8313 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8314 if (ret < 0) 8315 goto out; 8316 8317 btrfs_info(fs_info, 8318 "zoned: relocating block group %llu to repair IO failure", 8319 target); 8320 ret = btrfs_relocate_chunk(fs_info, target); 8321 8322 out: 8323 if (cache) 8324 btrfs_put_block_group(cache); 8325 mutex_unlock(&fs_info->reclaim_bgs_lock); 8326 btrfs_exclop_finish(fs_info); 8327 8328 return ret; 8329 } 8330 8331 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8332 { 8333 struct btrfs_block_group *cache; 8334 8335 if (!btrfs_is_zoned(fs_info)) 8336 return false; 8337 8338 /* Do not attempt to repair in degraded state */ 8339 if (btrfs_test_opt(fs_info, DEGRADED)) 8340 return true; 8341 8342 cache = btrfs_lookup_block_group(fs_info, logical); 8343 if (!cache) 8344 return true; 8345 8346 spin_lock(&cache->lock); 8347 if (cache->relocating_repair) { 8348 spin_unlock(&cache->lock); 8349 btrfs_put_block_group(cache); 8350 return true; 8351 } 8352 cache->relocating_repair = 1; 8353 spin_unlock(&cache->lock); 8354 8355 kthread_run(relocating_repair_kthread, cache, 8356 "btrfs-relocating-repair"); 8357 8358 return true; 8359 } 8360