1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 if (flags & BTRFS_BLOCK_GROUP_RAID10) 168 return BTRFS_RAID_RAID10; 169 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 170 return BTRFS_RAID_RAID1; 171 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 172 return BTRFS_RAID_RAID1C3; 173 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 174 return BTRFS_RAID_RAID1C4; 175 else if (flags & BTRFS_BLOCK_GROUP_DUP) 176 return BTRFS_RAID_DUP; 177 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 178 return BTRFS_RAID_RAID0; 179 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 180 return BTRFS_RAID_RAID5; 181 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 182 return BTRFS_RAID_RAID6; 183 184 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 /* 198 * Fill @buf with textual description of @bg_flags, no more than @size_buf 199 * bytes including terminating null byte. 200 */ 201 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 202 { 203 int i; 204 int ret; 205 char *bp = buf; 206 u64 flags = bg_flags; 207 u32 size_bp = size_buf; 208 209 if (!flags) { 210 strcpy(bp, "NONE"); 211 return; 212 } 213 214 #define DESCRIBE_FLAG(flag, desc) \ 215 do { \ 216 if (flags & (flag)) { \ 217 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 218 if (ret < 0 || ret >= size_bp) \ 219 goto out_overflow; \ 220 size_bp -= ret; \ 221 bp += ret; \ 222 flags &= ~(flag); \ 223 } \ 224 } while (0) 225 226 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 227 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 228 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 229 230 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 231 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 232 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 233 btrfs_raid_array[i].raid_name); 234 #undef DESCRIBE_FLAG 235 236 if (flags) { 237 ret = snprintf(bp, size_bp, "0x%llx|", flags); 238 size_bp -= ret; 239 } 240 241 if (size_bp < size_buf) 242 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 243 244 /* 245 * The text is trimmed, it's up to the caller to provide sufficiently 246 * large buffer 247 */ 248 out_overflow:; 249 } 250 251 static int init_first_rw_device(struct btrfs_trans_handle *trans); 252 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 253 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 254 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 255 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 256 enum btrfs_map_op op, 257 u64 logical, u64 *length, 258 struct btrfs_io_context **bioc_ret, 259 int mirror_num, int need_raid_map); 260 261 /* 262 * Device locking 263 * ============== 264 * 265 * There are several mutexes that protect manipulation of devices and low-level 266 * structures like chunks but not block groups, extents or files 267 * 268 * uuid_mutex (global lock) 269 * ------------------------ 270 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 271 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 272 * device) or requested by the device= mount option 273 * 274 * the mutex can be very coarse and can cover long-running operations 275 * 276 * protects: updates to fs_devices counters like missing devices, rw devices, 277 * seeding, structure cloning, opening/closing devices at mount/umount time 278 * 279 * global::fs_devs - add, remove, updates to the global list 280 * 281 * does not protect: manipulation of the fs_devices::devices list in general 282 * but in mount context it could be used to exclude list modifications by eg. 283 * scan ioctl 284 * 285 * btrfs_device::name - renames (write side), read is RCU 286 * 287 * fs_devices::device_list_mutex (per-fs, with RCU) 288 * ------------------------------------------------ 289 * protects updates to fs_devices::devices, ie. adding and deleting 290 * 291 * simple list traversal with read-only actions can be done with RCU protection 292 * 293 * may be used to exclude some operations from running concurrently without any 294 * modifications to the list (see write_all_supers) 295 * 296 * Is not required at mount and close times, because our device list is 297 * protected by the uuid_mutex at that point. 298 * 299 * balance_mutex 300 * ------------- 301 * protects balance structures (status, state) and context accessed from 302 * several places (internally, ioctl) 303 * 304 * chunk_mutex 305 * ----------- 306 * protects chunks, adding or removing during allocation, trim or when a new 307 * device is added/removed. Additionally it also protects post_commit_list of 308 * individual devices, since they can be added to the transaction's 309 * post_commit_list only with chunk_mutex held. 310 * 311 * cleaner_mutex 312 * ------------- 313 * a big lock that is held by the cleaner thread and prevents running subvolume 314 * cleaning together with relocation or delayed iputs 315 * 316 * 317 * Lock nesting 318 * ============ 319 * 320 * uuid_mutex 321 * device_list_mutex 322 * chunk_mutex 323 * balance_mutex 324 * 325 * 326 * Exclusive operations 327 * ==================== 328 * 329 * Maintains the exclusivity of the following operations that apply to the 330 * whole filesystem and cannot run in parallel. 331 * 332 * - Balance (*) 333 * - Device add 334 * - Device remove 335 * - Device replace (*) 336 * - Resize 337 * 338 * The device operations (as above) can be in one of the following states: 339 * 340 * - Running state 341 * - Paused state 342 * - Completed state 343 * 344 * Only device operations marked with (*) can go into the Paused state for the 345 * following reasons: 346 * 347 * - ioctl (only Balance can be Paused through ioctl) 348 * - filesystem remounted as read-only 349 * - filesystem unmounted and mounted as read-only 350 * - system power-cycle and filesystem mounted as read-only 351 * - filesystem or device errors leading to forced read-only 352 * 353 * The status of exclusive operation is set and cleared atomically. 354 * During the course of Paused state, fs_info::exclusive_operation remains set. 355 * A device operation in Paused or Running state can be canceled or resumed 356 * either by ioctl (Balance only) or when remounted as read-write. 357 * The exclusive status is cleared when the device operation is canceled or 358 * completed. 359 */ 360 361 DEFINE_MUTEX(uuid_mutex); 362 static LIST_HEAD(fs_uuids); 363 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 364 { 365 return &fs_uuids; 366 } 367 368 /* 369 * alloc_fs_devices - allocate struct btrfs_fs_devices 370 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 371 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 372 * 373 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 374 * The returned struct is not linked onto any lists and can be destroyed with 375 * kfree() right away. 376 */ 377 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 378 const u8 *metadata_fsid) 379 { 380 struct btrfs_fs_devices *fs_devs; 381 382 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 383 if (!fs_devs) 384 return ERR_PTR(-ENOMEM); 385 386 mutex_init(&fs_devs->device_list_mutex); 387 388 INIT_LIST_HEAD(&fs_devs->devices); 389 INIT_LIST_HEAD(&fs_devs->alloc_list); 390 INIT_LIST_HEAD(&fs_devs->fs_list); 391 INIT_LIST_HEAD(&fs_devs->seed_list); 392 if (fsid) 393 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 394 395 if (metadata_fsid) 396 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 397 else if (fsid) 398 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 399 400 return fs_devs; 401 } 402 403 void btrfs_free_device(struct btrfs_device *device) 404 { 405 WARN_ON(!list_empty(&device->post_commit_list)); 406 rcu_string_free(device->name); 407 extent_io_tree_release(&device->alloc_state); 408 bio_put(device->flush_bio); 409 btrfs_destroy_dev_zone_info(device); 410 kfree(device); 411 } 412 413 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 414 { 415 struct btrfs_device *device; 416 WARN_ON(fs_devices->opened); 417 while (!list_empty(&fs_devices->devices)) { 418 device = list_entry(fs_devices->devices.next, 419 struct btrfs_device, dev_list); 420 list_del(&device->dev_list); 421 btrfs_free_device(device); 422 } 423 kfree(fs_devices); 424 } 425 426 void __exit btrfs_cleanup_fs_uuids(void) 427 { 428 struct btrfs_fs_devices *fs_devices; 429 430 while (!list_empty(&fs_uuids)) { 431 fs_devices = list_entry(fs_uuids.next, 432 struct btrfs_fs_devices, fs_list); 433 list_del(&fs_devices->fs_list); 434 free_fs_devices(fs_devices); 435 } 436 } 437 438 static noinline struct btrfs_fs_devices *find_fsid( 439 const u8 *fsid, const u8 *metadata_fsid) 440 { 441 struct btrfs_fs_devices *fs_devices; 442 443 ASSERT(fsid); 444 445 /* Handle non-split brain cases */ 446 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 447 if (metadata_fsid) { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 449 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 450 BTRFS_FSID_SIZE) == 0) 451 return fs_devices; 452 } else { 453 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 454 return fs_devices; 455 } 456 } 457 return NULL; 458 } 459 460 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 461 struct btrfs_super_block *disk_super) 462 { 463 464 struct btrfs_fs_devices *fs_devices; 465 466 /* 467 * Handle scanned device having completed its fsid change but 468 * belonging to a fs_devices that was created by first scanning 469 * a device which didn't have its fsid/metadata_uuid changed 470 * at all and the CHANGING_FSID_V2 flag set. 471 */ 472 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 473 if (fs_devices->fsid_change && 474 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 475 BTRFS_FSID_SIZE) == 0 && 476 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 477 BTRFS_FSID_SIZE) == 0) { 478 return fs_devices; 479 } 480 } 481 /* 482 * Handle scanned device having completed its fsid change but 483 * belonging to a fs_devices that was created by a device that 484 * has an outdated pair of fsid/metadata_uuid and 485 * CHANGING_FSID_V2 flag set. 486 */ 487 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 488 if (fs_devices->fsid_change && 489 memcmp(fs_devices->metadata_uuid, 490 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 491 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 492 BTRFS_FSID_SIZE) == 0) { 493 return fs_devices; 494 } 495 } 496 497 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 498 } 499 500 501 static int 502 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 503 int flush, struct block_device **bdev, 504 struct btrfs_super_block **disk_super) 505 { 506 int ret; 507 508 *bdev = blkdev_get_by_path(device_path, flags, holder); 509 510 if (IS_ERR(*bdev)) { 511 ret = PTR_ERR(*bdev); 512 goto error; 513 } 514 515 if (flush) 516 sync_blockdev(*bdev); 517 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 518 if (ret) { 519 blkdev_put(*bdev, flags); 520 goto error; 521 } 522 invalidate_bdev(*bdev); 523 *disk_super = btrfs_read_dev_super(*bdev); 524 if (IS_ERR(*disk_super)) { 525 ret = PTR_ERR(*disk_super); 526 blkdev_put(*bdev, flags); 527 goto error; 528 } 529 530 return 0; 531 532 error: 533 *bdev = NULL; 534 return ret; 535 } 536 537 /** 538 * Search and remove all stale devices (which are not mounted). 539 * When both inputs are NULL, it will search and release all stale devices. 540 * 541 * @devt: Optional. When provided will it release all unmounted devices 542 * matching this devt only. 543 * @skip_device: Optional. Will skip this device when searching for the stale 544 * devices. 545 * 546 * Return: 0 for success or if @devt is 0. 547 * -EBUSY if @devt is a mounted device. 548 * -ENOENT if @devt does not match any device in the list. 549 */ 550 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 551 { 552 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 553 struct btrfs_device *device, *tmp_device; 554 int ret = 0; 555 556 lockdep_assert_held(&uuid_mutex); 557 558 if (devt) 559 ret = -ENOENT; 560 561 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 562 563 mutex_lock(&fs_devices->device_list_mutex); 564 list_for_each_entry_safe(device, tmp_device, 565 &fs_devices->devices, dev_list) { 566 if (skip_device && skip_device == device) 567 continue; 568 if (devt && devt != device->devt) 569 continue; 570 if (fs_devices->opened) { 571 /* for an already deleted device return 0 */ 572 if (devt && ret != 0) 573 ret = -EBUSY; 574 break; 575 } 576 577 /* delete the stale device */ 578 fs_devices->num_devices--; 579 list_del(&device->dev_list); 580 btrfs_free_device(device); 581 582 ret = 0; 583 } 584 mutex_unlock(&fs_devices->device_list_mutex); 585 586 if (fs_devices->num_devices == 0) { 587 btrfs_sysfs_remove_fsid(fs_devices); 588 list_del(&fs_devices->fs_list); 589 free_fs_devices(fs_devices); 590 } 591 } 592 593 return ret; 594 } 595 596 /* 597 * This is only used on mount, and we are protected from competing things 598 * messing with our fs_devices by the uuid_mutex, thus we do not need the 599 * fs_devices->device_list_mutex here. 600 */ 601 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 602 struct btrfs_device *device, fmode_t flags, 603 void *holder) 604 { 605 struct block_device *bdev; 606 struct btrfs_super_block *disk_super; 607 u64 devid; 608 int ret; 609 610 if (device->bdev) 611 return -EINVAL; 612 if (!device->name) 613 return -EINVAL; 614 615 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 616 &bdev, &disk_super); 617 if (ret) 618 return ret; 619 620 devid = btrfs_stack_device_id(&disk_super->dev_item); 621 if (devid != device->devid) 622 goto error_free_page; 623 624 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 625 goto error_free_page; 626 627 device->generation = btrfs_super_generation(disk_super); 628 629 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 630 if (btrfs_super_incompat_flags(disk_super) & 631 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 632 pr_err( 633 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 634 goto error_free_page; 635 } 636 637 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 638 fs_devices->seeding = true; 639 } else { 640 if (bdev_read_only(bdev)) 641 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 642 else 643 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 644 } 645 646 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 647 fs_devices->rotating = true; 648 649 device->bdev = bdev; 650 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 651 device->mode = flags; 652 653 fs_devices->open_devices++; 654 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 655 device->devid != BTRFS_DEV_REPLACE_DEVID) { 656 fs_devices->rw_devices++; 657 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 658 } 659 btrfs_release_disk_super(disk_super); 660 661 return 0; 662 663 error_free_page: 664 btrfs_release_disk_super(disk_super); 665 blkdev_put(bdev, flags); 666 667 return -EINVAL; 668 } 669 670 /* 671 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 672 * being created with a disk that has already completed its fsid change. Such 673 * disk can belong to an fs which has its FSID changed or to one which doesn't. 674 * Handle both cases here. 675 */ 676 static struct btrfs_fs_devices *find_fsid_inprogress( 677 struct btrfs_super_block *disk_super) 678 { 679 struct btrfs_fs_devices *fs_devices; 680 681 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 682 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 683 BTRFS_FSID_SIZE) != 0 && 684 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 685 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 686 return fs_devices; 687 } 688 } 689 690 return find_fsid(disk_super->fsid, NULL); 691 } 692 693 694 static struct btrfs_fs_devices *find_fsid_changed( 695 struct btrfs_super_block *disk_super) 696 { 697 struct btrfs_fs_devices *fs_devices; 698 699 /* 700 * Handles the case where scanned device is part of an fs that had 701 * multiple successful changes of FSID but currently device didn't 702 * observe it. Meaning our fsid will be different than theirs. We need 703 * to handle two subcases : 704 * 1 - The fs still continues to have different METADATA/FSID uuids. 705 * 2 - The fs is switched back to its original FSID (METADATA/FSID 706 * are equal). 707 */ 708 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 709 /* Changed UUIDs */ 710 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 711 BTRFS_FSID_SIZE) != 0 && 712 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 713 BTRFS_FSID_SIZE) == 0 && 714 memcmp(fs_devices->fsid, disk_super->fsid, 715 BTRFS_FSID_SIZE) != 0) 716 return fs_devices; 717 718 /* Unchanged UUIDs */ 719 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 720 BTRFS_FSID_SIZE) == 0 && 721 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 722 BTRFS_FSID_SIZE) == 0) 723 return fs_devices; 724 } 725 726 return NULL; 727 } 728 729 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 730 struct btrfs_super_block *disk_super) 731 { 732 struct btrfs_fs_devices *fs_devices; 733 734 /* 735 * Handle the case where the scanned device is part of an fs whose last 736 * metadata UUID change reverted it to the original FSID. At the same 737 * time * fs_devices was first created by another constitutent device 738 * which didn't fully observe the operation. This results in an 739 * btrfs_fs_devices created with metadata/fsid different AND 740 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 741 * fs_devices equal to the FSID of the disk. 742 */ 743 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 744 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 745 BTRFS_FSID_SIZE) != 0 && 746 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 747 BTRFS_FSID_SIZE) == 0 && 748 fs_devices->fsid_change) 749 return fs_devices; 750 } 751 752 return NULL; 753 } 754 /* 755 * Add new device to list of registered devices 756 * 757 * Returns: 758 * device pointer which was just added or updated when successful 759 * error pointer when failed 760 */ 761 static noinline struct btrfs_device *device_list_add(const char *path, 762 struct btrfs_super_block *disk_super, 763 bool *new_device_added) 764 { 765 struct btrfs_device *device; 766 struct btrfs_fs_devices *fs_devices = NULL; 767 struct rcu_string *name; 768 u64 found_transid = btrfs_super_generation(disk_super); 769 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 770 dev_t path_devt; 771 int error; 772 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 773 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 774 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 775 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 776 777 error = lookup_bdev(path, &path_devt); 778 if (error) 779 return ERR_PTR(error); 780 781 if (fsid_change_in_progress) { 782 if (!has_metadata_uuid) 783 fs_devices = find_fsid_inprogress(disk_super); 784 else 785 fs_devices = find_fsid_changed(disk_super); 786 } else if (has_metadata_uuid) { 787 fs_devices = find_fsid_with_metadata_uuid(disk_super); 788 } else { 789 fs_devices = find_fsid_reverted_metadata(disk_super); 790 if (!fs_devices) 791 fs_devices = find_fsid(disk_super->fsid, NULL); 792 } 793 794 795 if (!fs_devices) { 796 if (has_metadata_uuid) 797 fs_devices = alloc_fs_devices(disk_super->fsid, 798 disk_super->metadata_uuid); 799 else 800 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 801 802 if (IS_ERR(fs_devices)) 803 return ERR_CAST(fs_devices); 804 805 fs_devices->fsid_change = fsid_change_in_progress; 806 807 mutex_lock(&fs_devices->device_list_mutex); 808 list_add(&fs_devices->fs_list, &fs_uuids); 809 810 device = NULL; 811 } else { 812 struct btrfs_dev_lookup_args args = { 813 .devid = devid, 814 .uuid = disk_super->dev_item.uuid, 815 }; 816 817 mutex_lock(&fs_devices->device_list_mutex); 818 device = btrfs_find_device(fs_devices, &args); 819 820 /* 821 * If this disk has been pulled into an fs devices created by 822 * a device which had the CHANGING_FSID_V2 flag then replace the 823 * metadata_uuid/fsid values of the fs_devices. 824 */ 825 if (fs_devices->fsid_change && 826 found_transid > fs_devices->latest_generation) { 827 memcpy(fs_devices->fsid, disk_super->fsid, 828 BTRFS_FSID_SIZE); 829 830 if (has_metadata_uuid) 831 memcpy(fs_devices->metadata_uuid, 832 disk_super->metadata_uuid, 833 BTRFS_FSID_SIZE); 834 else 835 memcpy(fs_devices->metadata_uuid, 836 disk_super->fsid, BTRFS_FSID_SIZE); 837 838 fs_devices->fsid_change = false; 839 } 840 } 841 842 if (!device) { 843 if (fs_devices->opened) { 844 mutex_unlock(&fs_devices->device_list_mutex); 845 return ERR_PTR(-EBUSY); 846 } 847 848 device = btrfs_alloc_device(NULL, &devid, 849 disk_super->dev_item.uuid); 850 if (IS_ERR(device)) { 851 mutex_unlock(&fs_devices->device_list_mutex); 852 /* we can safely leave the fs_devices entry around */ 853 return device; 854 } 855 856 name = rcu_string_strdup(path, GFP_NOFS); 857 if (!name) { 858 btrfs_free_device(device); 859 mutex_unlock(&fs_devices->device_list_mutex); 860 return ERR_PTR(-ENOMEM); 861 } 862 rcu_assign_pointer(device->name, name); 863 device->devt = path_devt; 864 865 list_add_rcu(&device->dev_list, &fs_devices->devices); 866 fs_devices->num_devices++; 867 868 device->fs_devices = fs_devices; 869 *new_device_added = true; 870 871 if (disk_super->label[0]) 872 pr_info( 873 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 874 disk_super->label, devid, found_transid, path, 875 current->comm, task_pid_nr(current)); 876 else 877 pr_info( 878 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 879 disk_super->fsid, devid, found_transid, path, 880 current->comm, task_pid_nr(current)); 881 882 } else if (!device->name || strcmp(device->name->str, path)) { 883 /* 884 * When FS is already mounted. 885 * 1. If you are here and if the device->name is NULL that 886 * means this device was missing at time of FS mount. 887 * 2. If you are here and if the device->name is different 888 * from 'path' that means either 889 * a. The same device disappeared and reappeared with 890 * different name. or 891 * b. The missing-disk-which-was-replaced, has 892 * reappeared now. 893 * 894 * We must allow 1 and 2a above. But 2b would be a spurious 895 * and unintentional. 896 * 897 * Further in case of 1 and 2a above, the disk at 'path' 898 * would have missed some transaction when it was away and 899 * in case of 2a the stale bdev has to be updated as well. 900 * 2b must not be allowed at all time. 901 */ 902 903 /* 904 * For now, we do allow update to btrfs_fs_device through the 905 * btrfs dev scan cli after FS has been mounted. We're still 906 * tracking a problem where systems fail mount by subvolume id 907 * when we reject replacement on a mounted FS. 908 */ 909 if (!fs_devices->opened && found_transid < device->generation) { 910 /* 911 * That is if the FS is _not_ mounted and if you 912 * are here, that means there is more than one 913 * disk with same uuid and devid.We keep the one 914 * with larger generation number or the last-in if 915 * generation are equal. 916 */ 917 mutex_unlock(&fs_devices->device_list_mutex); 918 return ERR_PTR(-EEXIST); 919 } 920 921 /* 922 * We are going to replace the device path for a given devid, 923 * make sure it's the same device if the device is mounted 924 */ 925 if (device->bdev) { 926 if (device->devt != path_devt) { 927 mutex_unlock(&fs_devices->device_list_mutex); 928 /* 929 * device->fs_info may not be reliable here, so 930 * pass in a NULL instead. This avoids a 931 * possible use-after-free when the fs_info and 932 * fs_info->sb are already torn down. 933 */ 934 btrfs_warn_in_rcu(NULL, 935 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 936 path, devid, found_transid, 937 current->comm, 938 task_pid_nr(current)); 939 return ERR_PTR(-EEXIST); 940 } 941 btrfs_info_in_rcu(device->fs_info, 942 "devid %llu device path %s changed to %s scanned by %s (%d)", 943 devid, rcu_str_deref(device->name), 944 path, current->comm, 945 task_pid_nr(current)); 946 } 947 948 name = rcu_string_strdup(path, GFP_NOFS); 949 if (!name) { 950 mutex_unlock(&fs_devices->device_list_mutex); 951 return ERR_PTR(-ENOMEM); 952 } 953 rcu_string_free(device->name); 954 rcu_assign_pointer(device->name, name); 955 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 956 fs_devices->missing_devices--; 957 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 958 } 959 device->devt = path_devt; 960 } 961 962 /* 963 * Unmount does not free the btrfs_device struct but would zero 964 * generation along with most of the other members. So just update 965 * it back. We need it to pick the disk with largest generation 966 * (as above). 967 */ 968 if (!fs_devices->opened) { 969 device->generation = found_transid; 970 fs_devices->latest_generation = max_t(u64, found_transid, 971 fs_devices->latest_generation); 972 } 973 974 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 975 976 mutex_unlock(&fs_devices->device_list_mutex); 977 return device; 978 } 979 980 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 981 { 982 struct btrfs_fs_devices *fs_devices; 983 struct btrfs_device *device; 984 struct btrfs_device *orig_dev; 985 int ret = 0; 986 987 lockdep_assert_held(&uuid_mutex); 988 989 fs_devices = alloc_fs_devices(orig->fsid, NULL); 990 if (IS_ERR(fs_devices)) 991 return fs_devices; 992 993 fs_devices->total_devices = orig->total_devices; 994 995 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 996 struct rcu_string *name; 997 998 device = btrfs_alloc_device(NULL, &orig_dev->devid, 999 orig_dev->uuid); 1000 if (IS_ERR(device)) { 1001 ret = PTR_ERR(device); 1002 goto error; 1003 } 1004 1005 /* 1006 * This is ok to do without rcu read locked because we hold the 1007 * uuid mutex so nothing we touch in here is going to disappear. 1008 */ 1009 if (orig_dev->name) { 1010 name = rcu_string_strdup(orig_dev->name->str, 1011 GFP_KERNEL); 1012 if (!name) { 1013 btrfs_free_device(device); 1014 ret = -ENOMEM; 1015 goto error; 1016 } 1017 rcu_assign_pointer(device->name, name); 1018 } 1019 1020 list_add(&device->dev_list, &fs_devices->devices); 1021 device->fs_devices = fs_devices; 1022 fs_devices->num_devices++; 1023 } 1024 return fs_devices; 1025 error: 1026 free_fs_devices(fs_devices); 1027 return ERR_PTR(ret); 1028 } 1029 1030 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1031 struct btrfs_device **latest_dev) 1032 { 1033 struct btrfs_device *device, *next; 1034 1035 /* This is the initialized path, it is safe to release the devices. */ 1036 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1037 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1038 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1039 &device->dev_state) && 1040 !test_bit(BTRFS_DEV_STATE_MISSING, 1041 &device->dev_state) && 1042 (!*latest_dev || 1043 device->generation > (*latest_dev)->generation)) { 1044 *latest_dev = device; 1045 } 1046 continue; 1047 } 1048 1049 /* 1050 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1051 * in btrfs_init_dev_replace() so just continue. 1052 */ 1053 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1054 continue; 1055 1056 if (device->bdev) { 1057 blkdev_put(device->bdev, device->mode); 1058 device->bdev = NULL; 1059 fs_devices->open_devices--; 1060 } 1061 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1062 list_del_init(&device->dev_alloc_list); 1063 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1064 fs_devices->rw_devices--; 1065 } 1066 list_del_init(&device->dev_list); 1067 fs_devices->num_devices--; 1068 btrfs_free_device(device); 1069 } 1070 1071 } 1072 1073 /* 1074 * After we have read the system tree and know devids belonging to this 1075 * filesystem, remove the device which does not belong there. 1076 */ 1077 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1078 { 1079 struct btrfs_device *latest_dev = NULL; 1080 struct btrfs_fs_devices *seed_dev; 1081 1082 mutex_lock(&uuid_mutex); 1083 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1084 1085 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1086 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1087 1088 fs_devices->latest_dev = latest_dev; 1089 1090 mutex_unlock(&uuid_mutex); 1091 } 1092 1093 static void btrfs_close_bdev(struct btrfs_device *device) 1094 { 1095 if (!device->bdev) 1096 return; 1097 1098 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1099 sync_blockdev(device->bdev); 1100 invalidate_bdev(device->bdev); 1101 } 1102 1103 blkdev_put(device->bdev, device->mode); 1104 } 1105 1106 static void btrfs_close_one_device(struct btrfs_device *device) 1107 { 1108 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1109 1110 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1111 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1112 list_del_init(&device->dev_alloc_list); 1113 fs_devices->rw_devices--; 1114 } 1115 1116 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1117 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1118 1119 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1120 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1121 fs_devices->missing_devices--; 1122 } 1123 1124 btrfs_close_bdev(device); 1125 if (device->bdev) { 1126 fs_devices->open_devices--; 1127 device->bdev = NULL; 1128 } 1129 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1130 btrfs_destroy_dev_zone_info(device); 1131 1132 device->fs_info = NULL; 1133 atomic_set(&device->dev_stats_ccnt, 0); 1134 extent_io_tree_release(&device->alloc_state); 1135 1136 /* 1137 * Reset the flush error record. We might have a transient flush error 1138 * in this mount, and if so we aborted the current transaction and set 1139 * the fs to an error state, guaranteeing no super blocks can be further 1140 * committed. However that error might be transient and if we unmount the 1141 * filesystem and mount it again, we should allow the mount to succeed 1142 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1143 * filesystem again we still get flush errors, then we will again abort 1144 * any transaction and set the error state, guaranteeing no commits of 1145 * unsafe super blocks. 1146 */ 1147 device->last_flush_error = 0; 1148 1149 /* Verify the device is back in a pristine state */ 1150 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1151 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1152 ASSERT(list_empty(&device->dev_alloc_list)); 1153 ASSERT(list_empty(&device->post_commit_list)); 1154 } 1155 1156 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1157 { 1158 struct btrfs_device *device, *tmp; 1159 1160 lockdep_assert_held(&uuid_mutex); 1161 1162 if (--fs_devices->opened > 0) 1163 return; 1164 1165 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1166 btrfs_close_one_device(device); 1167 1168 WARN_ON(fs_devices->open_devices); 1169 WARN_ON(fs_devices->rw_devices); 1170 fs_devices->opened = 0; 1171 fs_devices->seeding = false; 1172 fs_devices->fs_info = NULL; 1173 } 1174 1175 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1176 { 1177 LIST_HEAD(list); 1178 struct btrfs_fs_devices *tmp; 1179 1180 mutex_lock(&uuid_mutex); 1181 close_fs_devices(fs_devices); 1182 if (!fs_devices->opened) 1183 list_splice_init(&fs_devices->seed_list, &list); 1184 1185 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1186 close_fs_devices(fs_devices); 1187 list_del(&fs_devices->seed_list); 1188 free_fs_devices(fs_devices); 1189 } 1190 mutex_unlock(&uuid_mutex); 1191 } 1192 1193 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1194 fmode_t flags, void *holder) 1195 { 1196 struct btrfs_device *device; 1197 struct btrfs_device *latest_dev = NULL; 1198 struct btrfs_device *tmp_device; 1199 1200 flags |= FMODE_EXCL; 1201 1202 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1203 dev_list) { 1204 int ret; 1205 1206 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1207 if (ret == 0 && 1208 (!latest_dev || device->generation > latest_dev->generation)) { 1209 latest_dev = device; 1210 } else if (ret == -ENODATA) { 1211 fs_devices->num_devices--; 1212 list_del(&device->dev_list); 1213 btrfs_free_device(device); 1214 } 1215 } 1216 if (fs_devices->open_devices == 0) 1217 return -EINVAL; 1218 1219 fs_devices->opened = 1; 1220 fs_devices->latest_dev = latest_dev; 1221 fs_devices->total_rw_bytes = 0; 1222 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1223 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1224 1225 return 0; 1226 } 1227 1228 static int devid_cmp(void *priv, const struct list_head *a, 1229 const struct list_head *b) 1230 { 1231 const struct btrfs_device *dev1, *dev2; 1232 1233 dev1 = list_entry(a, struct btrfs_device, dev_list); 1234 dev2 = list_entry(b, struct btrfs_device, dev_list); 1235 1236 if (dev1->devid < dev2->devid) 1237 return -1; 1238 else if (dev1->devid > dev2->devid) 1239 return 1; 1240 return 0; 1241 } 1242 1243 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1244 fmode_t flags, void *holder) 1245 { 1246 int ret; 1247 1248 lockdep_assert_held(&uuid_mutex); 1249 /* 1250 * The device_list_mutex cannot be taken here in case opening the 1251 * underlying device takes further locks like open_mutex. 1252 * 1253 * We also don't need the lock here as this is called during mount and 1254 * exclusion is provided by uuid_mutex 1255 */ 1256 1257 if (fs_devices->opened) { 1258 fs_devices->opened++; 1259 ret = 0; 1260 } else { 1261 list_sort(NULL, &fs_devices->devices, devid_cmp); 1262 ret = open_fs_devices(fs_devices, flags, holder); 1263 } 1264 1265 return ret; 1266 } 1267 1268 void btrfs_release_disk_super(struct btrfs_super_block *super) 1269 { 1270 struct page *page = virt_to_page(super); 1271 1272 put_page(page); 1273 } 1274 1275 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1276 u64 bytenr, u64 bytenr_orig) 1277 { 1278 struct btrfs_super_block *disk_super; 1279 struct page *page; 1280 void *p; 1281 pgoff_t index; 1282 1283 /* make sure our super fits in the device */ 1284 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1285 return ERR_PTR(-EINVAL); 1286 1287 /* make sure our super fits in the page */ 1288 if (sizeof(*disk_super) > PAGE_SIZE) 1289 return ERR_PTR(-EINVAL); 1290 1291 /* make sure our super doesn't straddle pages on disk */ 1292 index = bytenr >> PAGE_SHIFT; 1293 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1294 return ERR_PTR(-EINVAL); 1295 1296 /* pull in the page with our super */ 1297 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1298 1299 if (IS_ERR(page)) 1300 return ERR_CAST(page); 1301 1302 p = page_address(page); 1303 1304 /* align our pointer to the offset of the super block */ 1305 disk_super = p + offset_in_page(bytenr); 1306 1307 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1308 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1309 btrfs_release_disk_super(p); 1310 return ERR_PTR(-EINVAL); 1311 } 1312 1313 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1314 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1315 1316 return disk_super; 1317 } 1318 1319 int btrfs_forget_devices(dev_t devt) 1320 { 1321 int ret; 1322 1323 mutex_lock(&uuid_mutex); 1324 ret = btrfs_free_stale_devices(devt, NULL); 1325 mutex_unlock(&uuid_mutex); 1326 1327 return ret; 1328 } 1329 1330 /* 1331 * Look for a btrfs signature on a device. This may be called out of the mount path 1332 * and we are not allowed to call set_blocksize during the scan. The superblock 1333 * is read via pagecache 1334 */ 1335 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1336 void *holder) 1337 { 1338 struct btrfs_super_block *disk_super; 1339 bool new_device_added = false; 1340 struct btrfs_device *device = NULL; 1341 struct block_device *bdev; 1342 u64 bytenr, bytenr_orig; 1343 int ret; 1344 1345 lockdep_assert_held(&uuid_mutex); 1346 1347 /* 1348 * we would like to check all the supers, but that would make 1349 * a btrfs mount succeed after a mkfs from a different FS. 1350 * So, we need to add a special mount option to scan for 1351 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1352 */ 1353 flags |= FMODE_EXCL; 1354 1355 bdev = blkdev_get_by_path(path, flags, holder); 1356 if (IS_ERR(bdev)) 1357 return ERR_CAST(bdev); 1358 1359 bytenr_orig = btrfs_sb_offset(0); 1360 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1361 if (ret) { 1362 device = ERR_PTR(ret); 1363 goto error_bdev_put; 1364 } 1365 1366 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1367 if (IS_ERR(disk_super)) { 1368 device = ERR_CAST(disk_super); 1369 goto error_bdev_put; 1370 } 1371 1372 device = device_list_add(path, disk_super, &new_device_added); 1373 if (!IS_ERR(device) && new_device_added) 1374 btrfs_free_stale_devices(device->devt, device); 1375 1376 btrfs_release_disk_super(disk_super); 1377 1378 error_bdev_put: 1379 blkdev_put(bdev, flags); 1380 1381 return device; 1382 } 1383 1384 /* 1385 * Try to find a chunk that intersects [start, start + len] range and when one 1386 * such is found, record the end of it in *start 1387 */ 1388 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1389 u64 len) 1390 { 1391 u64 physical_start, physical_end; 1392 1393 lockdep_assert_held(&device->fs_info->chunk_mutex); 1394 1395 if (!find_first_extent_bit(&device->alloc_state, *start, 1396 &physical_start, &physical_end, 1397 CHUNK_ALLOCATED, NULL)) { 1398 1399 if (in_range(physical_start, *start, len) || 1400 in_range(*start, physical_start, 1401 physical_end - physical_start)) { 1402 *start = physical_end + 1; 1403 return true; 1404 } 1405 } 1406 return false; 1407 } 1408 1409 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1410 { 1411 switch (device->fs_devices->chunk_alloc_policy) { 1412 case BTRFS_CHUNK_ALLOC_REGULAR: 1413 /* 1414 * We don't want to overwrite the superblock on the drive nor 1415 * any area used by the boot loader (grub for example), so we 1416 * make sure to start at an offset of at least 1MB. 1417 */ 1418 return max_t(u64, start, SZ_1M); 1419 case BTRFS_CHUNK_ALLOC_ZONED: 1420 /* 1421 * We don't care about the starting region like regular 1422 * allocator, because we anyway use/reserve the first two zones 1423 * for superblock logging. 1424 */ 1425 return ALIGN(start, device->zone_info->zone_size); 1426 default: 1427 BUG(); 1428 } 1429 } 1430 1431 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1432 u64 *hole_start, u64 *hole_size, 1433 u64 num_bytes) 1434 { 1435 u64 zone_size = device->zone_info->zone_size; 1436 u64 pos; 1437 int ret; 1438 bool changed = false; 1439 1440 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1441 1442 while (*hole_size > 0) { 1443 pos = btrfs_find_allocatable_zones(device, *hole_start, 1444 *hole_start + *hole_size, 1445 num_bytes); 1446 if (pos != *hole_start) { 1447 *hole_size = *hole_start + *hole_size - pos; 1448 *hole_start = pos; 1449 changed = true; 1450 if (*hole_size < num_bytes) 1451 break; 1452 } 1453 1454 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1455 1456 /* Range is ensured to be empty */ 1457 if (!ret) 1458 return changed; 1459 1460 /* Given hole range was invalid (outside of device) */ 1461 if (ret == -ERANGE) { 1462 *hole_start += *hole_size; 1463 *hole_size = 0; 1464 return true; 1465 } 1466 1467 *hole_start += zone_size; 1468 *hole_size -= zone_size; 1469 changed = true; 1470 } 1471 1472 return changed; 1473 } 1474 1475 /** 1476 * dev_extent_hole_check - check if specified hole is suitable for allocation 1477 * @device: the device which we have the hole 1478 * @hole_start: starting position of the hole 1479 * @hole_size: the size of the hole 1480 * @num_bytes: the size of the free space that we need 1481 * 1482 * This function may modify @hole_start and @hole_size to reflect the suitable 1483 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1484 */ 1485 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1486 u64 *hole_size, u64 num_bytes) 1487 { 1488 bool changed = false; 1489 u64 hole_end = *hole_start + *hole_size; 1490 1491 for (;;) { 1492 /* 1493 * Check before we set max_hole_start, otherwise we could end up 1494 * sending back this offset anyway. 1495 */ 1496 if (contains_pending_extent(device, hole_start, *hole_size)) { 1497 if (hole_end >= *hole_start) 1498 *hole_size = hole_end - *hole_start; 1499 else 1500 *hole_size = 0; 1501 changed = true; 1502 } 1503 1504 switch (device->fs_devices->chunk_alloc_policy) { 1505 case BTRFS_CHUNK_ALLOC_REGULAR: 1506 /* No extra check */ 1507 break; 1508 case BTRFS_CHUNK_ALLOC_ZONED: 1509 if (dev_extent_hole_check_zoned(device, hole_start, 1510 hole_size, num_bytes)) { 1511 changed = true; 1512 /* 1513 * The changed hole can contain pending extent. 1514 * Loop again to check that. 1515 */ 1516 continue; 1517 } 1518 break; 1519 default: 1520 BUG(); 1521 } 1522 1523 break; 1524 } 1525 1526 return changed; 1527 } 1528 1529 /* 1530 * find_free_dev_extent_start - find free space in the specified device 1531 * @device: the device which we search the free space in 1532 * @num_bytes: the size of the free space that we need 1533 * @search_start: the position from which to begin the search 1534 * @start: store the start of the free space. 1535 * @len: the size of the free space. that we find, or the size 1536 * of the max free space if we don't find suitable free space 1537 * 1538 * this uses a pretty simple search, the expectation is that it is 1539 * called very infrequently and that a given device has a small number 1540 * of extents 1541 * 1542 * @start is used to store the start of the free space if we find. But if we 1543 * don't find suitable free space, it will be used to store the start position 1544 * of the max free space. 1545 * 1546 * @len is used to store the size of the free space that we find. 1547 * But if we don't find suitable free space, it is used to store the size of 1548 * the max free space. 1549 * 1550 * NOTE: This function will search *commit* root of device tree, and does extra 1551 * check to ensure dev extents are not double allocated. 1552 * This makes the function safe to allocate dev extents but may not report 1553 * correct usable device space, as device extent freed in current transaction 1554 * is not reported as available. 1555 */ 1556 static int find_free_dev_extent_start(struct btrfs_device *device, 1557 u64 num_bytes, u64 search_start, u64 *start, 1558 u64 *len) 1559 { 1560 struct btrfs_fs_info *fs_info = device->fs_info; 1561 struct btrfs_root *root = fs_info->dev_root; 1562 struct btrfs_key key; 1563 struct btrfs_dev_extent *dev_extent; 1564 struct btrfs_path *path; 1565 u64 hole_size; 1566 u64 max_hole_start; 1567 u64 max_hole_size; 1568 u64 extent_end; 1569 u64 search_end = device->total_bytes; 1570 int ret; 1571 int slot; 1572 struct extent_buffer *l; 1573 1574 search_start = dev_extent_search_start(device, search_start); 1575 1576 WARN_ON(device->zone_info && 1577 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1578 1579 path = btrfs_alloc_path(); 1580 if (!path) 1581 return -ENOMEM; 1582 1583 max_hole_start = search_start; 1584 max_hole_size = 0; 1585 1586 again: 1587 if (search_start >= search_end || 1588 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1589 ret = -ENOSPC; 1590 goto out; 1591 } 1592 1593 path->reada = READA_FORWARD; 1594 path->search_commit_root = 1; 1595 path->skip_locking = 1; 1596 1597 key.objectid = device->devid; 1598 key.offset = search_start; 1599 key.type = BTRFS_DEV_EXTENT_KEY; 1600 1601 ret = btrfs_search_backwards(root, &key, path); 1602 if (ret < 0) 1603 goto out; 1604 1605 while (1) { 1606 l = path->nodes[0]; 1607 slot = path->slots[0]; 1608 if (slot >= btrfs_header_nritems(l)) { 1609 ret = btrfs_next_leaf(root, path); 1610 if (ret == 0) 1611 continue; 1612 if (ret < 0) 1613 goto out; 1614 1615 break; 1616 } 1617 btrfs_item_key_to_cpu(l, &key, slot); 1618 1619 if (key.objectid < device->devid) 1620 goto next; 1621 1622 if (key.objectid > device->devid) 1623 break; 1624 1625 if (key.type != BTRFS_DEV_EXTENT_KEY) 1626 goto next; 1627 1628 if (key.offset > search_start) { 1629 hole_size = key.offset - search_start; 1630 dev_extent_hole_check(device, &search_start, &hole_size, 1631 num_bytes); 1632 1633 if (hole_size > max_hole_size) { 1634 max_hole_start = search_start; 1635 max_hole_size = hole_size; 1636 } 1637 1638 /* 1639 * If this free space is greater than which we need, 1640 * it must be the max free space that we have found 1641 * until now, so max_hole_start must point to the start 1642 * of this free space and the length of this free space 1643 * is stored in max_hole_size. Thus, we return 1644 * max_hole_start and max_hole_size and go back to the 1645 * caller. 1646 */ 1647 if (hole_size >= num_bytes) { 1648 ret = 0; 1649 goto out; 1650 } 1651 } 1652 1653 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1654 extent_end = key.offset + btrfs_dev_extent_length(l, 1655 dev_extent); 1656 if (extent_end > search_start) 1657 search_start = extent_end; 1658 next: 1659 path->slots[0]++; 1660 cond_resched(); 1661 } 1662 1663 /* 1664 * At this point, search_start should be the end of 1665 * allocated dev extents, and when shrinking the device, 1666 * search_end may be smaller than search_start. 1667 */ 1668 if (search_end > search_start) { 1669 hole_size = search_end - search_start; 1670 if (dev_extent_hole_check(device, &search_start, &hole_size, 1671 num_bytes)) { 1672 btrfs_release_path(path); 1673 goto again; 1674 } 1675 1676 if (hole_size > max_hole_size) { 1677 max_hole_start = search_start; 1678 max_hole_size = hole_size; 1679 } 1680 } 1681 1682 /* See above. */ 1683 if (max_hole_size < num_bytes) 1684 ret = -ENOSPC; 1685 else 1686 ret = 0; 1687 1688 out: 1689 btrfs_free_path(path); 1690 *start = max_hole_start; 1691 if (len) 1692 *len = max_hole_size; 1693 return ret; 1694 } 1695 1696 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1697 u64 *start, u64 *len) 1698 { 1699 /* FIXME use last free of some kind */ 1700 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1701 } 1702 1703 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1704 struct btrfs_device *device, 1705 u64 start, u64 *dev_extent_len) 1706 { 1707 struct btrfs_fs_info *fs_info = device->fs_info; 1708 struct btrfs_root *root = fs_info->dev_root; 1709 int ret; 1710 struct btrfs_path *path; 1711 struct btrfs_key key; 1712 struct btrfs_key found_key; 1713 struct extent_buffer *leaf = NULL; 1714 struct btrfs_dev_extent *extent = NULL; 1715 1716 path = btrfs_alloc_path(); 1717 if (!path) 1718 return -ENOMEM; 1719 1720 key.objectid = device->devid; 1721 key.offset = start; 1722 key.type = BTRFS_DEV_EXTENT_KEY; 1723 again: 1724 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1725 if (ret > 0) { 1726 ret = btrfs_previous_item(root, path, key.objectid, 1727 BTRFS_DEV_EXTENT_KEY); 1728 if (ret) 1729 goto out; 1730 leaf = path->nodes[0]; 1731 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1732 extent = btrfs_item_ptr(leaf, path->slots[0], 1733 struct btrfs_dev_extent); 1734 BUG_ON(found_key.offset > start || found_key.offset + 1735 btrfs_dev_extent_length(leaf, extent) < start); 1736 key = found_key; 1737 btrfs_release_path(path); 1738 goto again; 1739 } else if (ret == 0) { 1740 leaf = path->nodes[0]; 1741 extent = btrfs_item_ptr(leaf, path->slots[0], 1742 struct btrfs_dev_extent); 1743 } else { 1744 goto out; 1745 } 1746 1747 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1748 1749 ret = btrfs_del_item(trans, root, path); 1750 if (ret == 0) 1751 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1752 out: 1753 btrfs_free_path(path); 1754 return ret; 1755 } 1756 1757 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1758 { 1759 struct extent_map_tree *em_tree; 1760 struct extent_map *em; 1761 struct rb_node *n; 1762 u64 ret = 0; 1763 1764 em_tree = &fs_info->mapping_tree; 1765 read_lock(&em_tree->lock); 1766 n = rb_last(&em_tree->map.rb_root); 1767 if (n) { 1768 em = rb_entry(n, struct extent_map, rb_node); 1769 ret = em->start + em->len; 1770 } 1771 read_unlock(&em_tree->lock); 1772 1773 return ret; 1774 } 1775 1776 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1777 u64 *devid_ret) 1778 { 1779 int ret; 1780 struct btrfs_key key; 1781 struct btrfs_key found_key; 1782 struct btrfs_path *path; 1783 1784 path = btrfs_alloc_path(); 1785 if (!path) 1786 return -ENOMEM; 1787 1788 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1789 key.type = BTRFS_DEV_ITEM_KEY; 1790 key.offset = (u64)-1; 1791 1792 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1793 if (ret < 0) 1794 goto error; 1795 1796 if (ret == 0) { 1797 /* Corruption */ 1798 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1799 ret = -EUCLEAN; 1800 goto error; 1801 } 1802 1803 ret = btrfs_previous_item(fs_info->chunk_root, path, 1804 BTRFS_DEV_ITEMS_OBJECTID, 1805 BTRFS_DEV_ITEM_KEY); 1806 if (ret) { 1807 *devid_ret = 1; 1808 } else { 1809 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1810 path->slots[0]); 1811 *devid_ret = found_key.offset + 1; 1812 } 1813 ret = 0; 1814 error: 1815 btrfs_free_path(path); 1816 return ret; 1817 } 1818 1819 /* 1820 * the device information is stored in the chunk root 1821 * the btrfs_device struct should be fully filled in 1822 */ 1823 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1824 struct btrfs_device *device) 1825 { 1826 int ret; 1827 struct btrfs_path *path; 1828 struct btrfs_dev_item *dev_item; 1829 struct extent_buffer *leaf; 1830 struct btrfs_key key; 1831 unsigned long ptr; 1832 1833 path = btrfs_alloc_path(); 1834 if (!path) 1835 return -ENOMEM; 1836 1837 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1838 key.type = BTRFS_DEV_ITEM_KEY; 1839 key.offset = device->devid; 1840 1841 btrfs_reserve_chunk_metadata(trans, true); 1842 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1843 &key, sizeof(*dev_item)); 1844 btrfs_trans_release_chunk_metadata(trans); 1845 if (ret) 1846 goto out; 1847 1848 leaf = path->nodes[0]; 1849 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1850 1851 btrfs_set_device_id(leaf, dev_item, device->devid); 1852 btrfs_set_device_generation(leaf, dev_item, 0); 1853 btrfs_set_device_type(leaf, dev_item, device->type); 1854 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1855 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1856 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1857 btrfs_set_device_total_bytes(leaf, dev_item, 1858 btrfs_device_get_disk_total_bytes(device)); 1859 btrfs_set_device_bytes_used(leaf, dev_item, 1860 btrfs_device_get_bytes_used(device)); 1861 btrfs_set_device_group(leaf, dev_item, 0); 1862 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1863 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1864 btrfs_set_device_start_offset(leaf, dev_item, 0); 1865 1866 ptr = btrfs_device_uuid(dev_item); 1867 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1868 ptr = btrfs_device_fsid(dev_item); 1869 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1870 ptr, BTRFS_FSID_SIZE); 1871 btrfs_mark_buffer_dirty(leaf); 1872 1873 ret = 0; 1874 out: 1875 btrfs_free_path(path); 1876 return ret; 1877 } 1878 1879 /* 1880 * Function to update ctime/mtime for a given device path. 1881 * Mainly used for ctime/mtime based probe like libblkid. 1882 * 1883 * We don't care about errors here, this is just to be kind to userspace. 1884 */ 1885 static void update_dev_time(const char *device_path) 1886 { 1887 struct path path; 1888 struct timespec64 now; 1889 int ret; 1890 1891 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1892 if (ret) 1893 return; 1894 1895 now = current_time(d_inode(path.dentry)); 1896 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1897 path_put(&path); 1898 } 1899 1900 static int btrfs_rm_dev_item(struct btrfs_device *device) 1901 { 1902 struct btrfs_root *root = device->fs_info->chunk_root; 1903 int ret; 1904 struct btrfs_path *path; 1905 struct btrfs_key key; 1906 struct btrfs_trans_handle *trans; 1907 1908 path = btrfs_alloc_path(); 1909 if (!path) 1910 return -ENOMEM; 1911 1912 trans = btrfs_start_transaction(root, 0); 1913 if (IS_ERR(trans)) { 1914 btrfs_free_path(path); 1915 return PTR_ERR(trans); 1916 } 1917 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1918 key.type = BTRFS_DEV_ITEM_KEY; 1919 key.offset = device->devid; 1920 1921 btrfs_reserve_chunk_metadata(trans, false); 1922 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1923 btrfs_trans_release_chunk_metadata(trans); 1924 if (ret) { 1925 if (ret > 0) 1926 ret = -ENOENT; 1927 btrfs_abort_transaction(trans, ret); 1928 btrfs_end_transaction(trans); 1929 goto out; 1930 } 1931 1932 ret = btrfs_del_item(trans, root, path); 1933 if (ret) { 1934 btrfs_abort_transaction(trans, ret); 1935 btrfs_end_transaction(trans); 1936 } 1937 1938 out: 1939 btrfs_free_path(path); 1940 if (!ret) 1941 ret = btrfs_commit_transaction(trans); 1942 return ret; 1943 } 1944 1945 /* 1946 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1947 * filesystem. It's up to the caller to adjust that number regarding eg. device 1948 * replace. 1949 */ 1950 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1951 u64 num_devices) 1952 { 1953 u64 all_avail; 1954 unsigned seq; 1955 int i; 1956 1957 do { 1958 seq = read_seqbegin(&fs_info->profiles_lock); 1959 1960 all_avail = fs_info->avail_data_alloc_bits | 1961 fs_info->avail_system_alloc_bits | 1962 fs_info->avail_metadata_alloc_bits; 1963 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1964 1965 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1966 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1967 continue; 1968 1969 if (num_devices < btrfs_raid_array[i].devs_min) 1970 return btrfs_raid_array[i].mindev_error; 1971 } 1972 1973 return 0; 1974 } 1975 1976 static struct btrfs_device * btrfs_find_next_active_device( 1977 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1978 { 1979 struct btrfs_device *next_device; 1980 1981 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1982 if (next_device != device && 1983 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1984 && next_device->bdev) 1985 return next_device; 1986 } 1987 1988 return NULL; 1989 } 1990 1991 /* 1992 * Helper function to check if the given device is part of s_bdev / latest_dev 1993 * and replace it with the provided or the next active device, in the context 1994 * where this function called, there should be always be another device (or 1995 * this_dev) which is active. 1996 */ 1997 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1998 struct btrfs_device *next_device) 1999 { 2000 struct btrfs_fs_info *fs_info = device->fs_info; 2001 2002 if (!next_device) 2003 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2004 device); 2005 ASSERT(next_device); 2006 2007 if (fs_info->sb->s_bdev && 2008 (fs_info->sb->s_bdev == device->bdev)) 2009 fs_info->sb->s_bdev = next_device->bdev; 2010 2011 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2012 fs_info->fs_devices->latest_dev = next_device; 2013 } 2014 2015 /* 2016 * Return btrfs_fs_devices::num_devices excluding the device that's being 2017 * currently replaced. 2018 */ 2019 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2020 { 2021 u64 num_devices = fs_info->fs_devices->num_devices; 2022 2023 down_read(&fs_info->dev_replace.rwsem); 2024 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2025 ASSERT(num_devices > 1); 2026 num_devices--; 2027 } 2028 up_read(&fs_info->dev_replace.rwsem); 2029 2030 return num_devices; 2031 } 2032 2033 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2034 struct block_device *bdev, 2035 const char *device_path) 2036 { 2037 struct btrfs_super_block *disk_super; 2038 int copy_num; 2039 2040 if (!bdev) 2041 return; 2042 2043 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2044 struct page *page; 2045 int ret; 2046 2047 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2048 if (IS_ERR(disk_super)) 2049 continue; 2050 2051 if (bdev_is_zoned(bdev)) { 2052 btrfs_reset_sb_log_zones(bdev, copy_num); 2053 continue; 2054 } 2055 2056 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2057 2058 page = virt_to_page(disk_super); 2059 set_page_dirty(page); 2060 lock_page(page); 2061 /* write_on_page() unlocks the page */ 2062 ret = write_one_page(page); 2063 if (ret) 2064 btrfs_warn(fs_info, 2065 "error clearing superblock number %d (%d)", 2066 copy_num, ret); 2067 btrfs_release_disk_super(disk_super); 2068 2069 } 2070 2071 /* Notify udev that device has changed */ 2072 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2073 2074 /* Update ctime/mtime for device path for libblkid */ 2075 update_dev_time(device_path); 2076 } 2077 2078 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2079 struct btrfs_dev_lookup_args *args, 2080 struct block_device **bdev, fmode_t *mode) 2081 { 2082 struct btrfs_device *device; 2083 struct btrfs_fs_devices *cur_devices; 2084 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2085 u64 num_devices; 2086 int ret = 0; 2087 2088 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2089 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2090 return -EINVAL; 2091 } 2092 2093 /* 2094 * The device list in fs_devices is accessed without locks (neither 2095 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2096 * filesystem and another device rm cannot run. 2097 */ 2098 num_devices = btrfs_num_devices(fs_info); 2099 2100 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2101 if (ret) 2102 goto out; 2103 2104 device = btrfs_find_device(fs_info->fs_devices, args); 2105 if (!device) { 2106 if (args->missing) 2107 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2108 else 2109 ret = -ENOENT; 2110 goto out; 2111 } 2112 2113 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2114 btrfs_warn_in_rcu(fs_info, 2115 "cannot remove device %s (devid %llu) due to active swapfile", 2116 rcu_str_deref(device->name), device->devid); 2117 ret = -ETXTBSY; 2118 goto out; 2119 } 2120 2121 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2122 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2123 goto out; 2124 } 2125 2126 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2127 fs_info->fs_devices->rw_devices == 1) { 2128 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2129 goto out; 2130 } 2131 2132 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2133 mutex_lock(&fs_info->chunk_mutex); 2134 list_del_init(&device->dev_alloc_list); 2135 device->fs_devices->rw_devices--; 2136 mutex_unlock(&fs_info->chunk_mutex); 2137 } 2138 2139 ret = btrfs_shrink_device(device, 0); 2140 if (ret) 2141 goto error_undo; 2142 2143 /* 2144 * TODO: the superblock still includes this device in its num_devices 2145 * counter although write_all_supers() is not locked out. This 2146 * could give a filesystem state which requires a degraded mount. 2147 */ 2148 ret = btrfs_rm_dev_item(device); 2149 if (ret) 2150 goto error_undo; 2151 2152 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2153 btrfs_scrub_cancel_dev(device); 2154 2155 /* 2156 * the device list mutex makes sure that we don't change 2157 * the device list while someone else is writing out all 2158 * the device supers. Whoever is writing all supers, should 2159 * lock the device list mutex before getting the number of 2160 * devices in the super block (super_copy). Conversely, 2161 * whoever updates the number of devices in the super block 2162 * (super_copy) should hold the device list mutex. 2163 */ 2164 2165 /* 2166 * In normal cases the cur_devices == fs_devices. But in case 2167 * of deleting a seed device, the cur_devices should point to 2168 * its own fs_devices listed under the fs_devices->seed_list. 2169 */ 2170 cur_devices = device->fs_devices; 2171 mutex_lock(&fs_devices->device_list_mutex); 2172 list_del_rcu(&device->dev_list); 2173 2174 cur_devices->num_devices--; 2175 cur_devices->total_devices--; 2176 /* Update total_devices of the parent fs_devices if it's seed */ 2177 if (cur_devices != fs_devices) 2178 fs_devices->total_devices--; 2179 2180 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2181 cur_devices->missing_devices--; 2182 2183 btrfs_assign_next_active_device(device, NULL); 2184 2185 if (device->bdev) { 2186 cur_devices->open_devices--; 2187 /* remove sysfs entry */ 2188 btrfs_sysfs_remove_device(device); 2189 } 2190 2191 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2192 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2193 mutex_unlock(&fs_devices->device_list_mutex); 2194 2195 /* 2196 * At this point, the device is zero sized and detached from the 2197 * devices list. All that's left is to zero out the old supers and 2198 * free the device. 2199 * 2200 * We cannot call btrfs_close_bdev() here because we're holding the sb 2201 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2202 * block device and it's dependencies. Instead just flush the device 2203 * and let the caller do the final blkdev_put. 2204 */ 2205 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2206 btrfs_scratch_superblocks(fs_info, device->bdev, 2207 device->name->str); 2208 if (device->bdev) { 2209 sync_blockdev(device->bdev); 2210 invalidate_bdev(device->bdev); 2211 } 2212 } 2213 2214 *bdev = device->bdev; 2215 *mode = device->mode; 2216 synchronize_rcu(); 2217 btrfs_free_device(device); 2218 2219 /* 2220 * This can happen if cur_devices is the private seed devices list. We 2221 * cannot call close_fs_devices() here because it expects the uuid_mutex 2222 * to be held, but in fact we don't need that for the private 2223 * seed_devices, we can simply decrement cur_devices->opened and then 2224 * remove it from our list and free the fs_devices. 2225 */ 2226 if (cur_devices->num_devices == 0) { 2227 list_del_init(&cur_devices->seed_list); 2228 ASSERT(cur_devices->opened == 1); 2229 cur_devices->opened--; 2230 free_fs_devices(cur_devices); 2231 } 2232 2233 out: 2234 return ret; 2235 2236 error_undo: 2237 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2238 mutex_lock(&fs_info->chunk_mutex); 2239 list_add(&device->dev_alloc_list, 2240 &fs_devices->alloc_list); 2241 device->fs_devices->rw_devices++; 2242 mutex_unlock(&fs_info->chunk_mutex); 2243 } 2244 goto out; 2245 } 2246 2247 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2248 { 2249 struct btrfs_fs_devices *fs_devices; 2250 2251 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2252 2253 /* 2254 * in case of fs with no seed, srcdev->fs_devices will point 2255 * to fs_devices of fs_info. However when the dev being replaced is 2256 * a seed dev it will point to the seed's local fs_devices. In short 2257 * srcdev will have its correct fs_devices in both the cases. 2258 */ 2259 fs_devices = srcdev->fs_devices; 2260 2261 list_del_rcu(&srcdev->dev_list); 2262 list_del(&srcdev->dev_alloc_list); 2263 fs_devices->num_devices--; 2264 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2265 fs_devices->missing_devices--; 2266 2267 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2268 fs_devices->rw_devices--; 2269 2270 if (srcdev->bdev) 2271 fs_devices->open_devices--; 2272 } 2273 2274 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2275 { 2276 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2277 2278 mutex_lock(&uuid_mutex); 2279 2280 btrfs_close_bdev(srcdev); 2281 synchronize_rcu(); 2282 btrfs_free_device(srcdev); 2283 2284 /* if this is no devs we rather delete the fs_devices */ 2285 if (!fs_devices->num_devices) { 2286 /* 2287 * On a mounted FS, num_devices can't be zero unless it's a 2288 * seed. In case of a seed device being replaced, the replace 2289 * target added to the sprout FS, so there will be no more 2290 * device left under the seed FS. 2291 */ 2292 ASSERT(fs_devices->seeding); 2293 2294 list_del_init(&fs_devices->seed_list); 2295 close_fs_devices(fs_devices); 2296 free_fs_devices(fs_devices); 2297 } 2298 mutex_unlock(&uuid_mutex); 2299 } 2300 2301 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2302 { 2303 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2304 2305 mutex_lock(&fs_devices->device_list_mutex); 2306 2307 btrfs_sysfs_remove_device(tgtdev); 2308 2309 if (tgtdev->bdev) 2310 fs_devices->open_devices--; 2311 2312 fs_devices->num_devices--; 2313 2314 btrfs_assign_next_active_device(tgtdev, NULL); 2315 2316 list_del_rcu(&tgtdev->dev_list); 2317 2318 mutex_unlock(&fs_devices->device_list_mutex); 2319 2320 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2321 tgtdev->name->str); 2322 2323 btrfs_close_bdev(tgtdev); 2324 synchronize_rcu(); 2325 btrfs_free_device(tgtdev); 2326 } 2327 2328 /** 2329 * Populate args from device at path 2330 * 2331 * @fs_info: the filesystem 2332 * @args: the args to populate 2333 * @path: the path to the device 2334 * 2335 * This will read the super block of the device at @path and populate @args with 2336 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2337 * lookup a device to operate on, but need to do it before we take any locks. 2338 * This properly handles the special case of "missing" that a user may pass in, 2339 * and does some basic sanity checks. The caller must make sure that @path is 2340 * properly NUL terminated before calling in, and must call 2341 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2342 * uuid buffers. 2343 * 2344 * Return: 0 for success, -errno for failure 2345 */ 2346 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2347 struct btrfs_dev_lookup_args *args, 2348 const char *path) 2349 { 2350 struct btrfs_super_block *disk_super; 2351 struct block_device *bdev; 2352 int ret; 2353 2354 if (!path || !path[0]) 2355 return -EINVAL; 2356 if (!strcmp(path, "missing")) { 2357 args->missing = true; 2358 return 0; 2359 } 2360 2361 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2362 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2363 if (!args->uuid || !args->fsid) { 2364 btrfs_put_dev_args_from_path(args); 2365 return -ENOMEM; 2366 } 2367 2368 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2369 &bdev, &disk_super); 2370 if (ret) 2371 return ret; 2372 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2373 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2374 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2375 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2376 else 2377 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2378 btrfs_release_disk_super(disk_super); 2379 blkdev_put(bdev, FMODE_READ); 2380 return 0; 2381 } 2382 2383 /* 2384 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2385 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2386 * that don't need to be freed. 2387 */ 2388 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2389 { 2390 kfree(args->uuid); 2391 kfree(args->fsid); 2392 args->uuid = NULL; 2393 args->fsid = NULL; 2394 } 2395 2396 struct btrfs_device *btrfs_find_device_by_devspec( 2397 struct btrfs_fs_info *fs_info, u64 devid, 2398 const char *device_path) 2399 { 2400 BTRFS_DEV_LOOKUP_ARGS(args); 2401 struct btrfs_device *device; 2402 int ret; 2403 2404 if (devid) { 2405 args.devid = devid; 2406 device = btrfs_find_device(fs_info->fs_devices, &args); 2407 if (!device) 2408 return ERR_PTR(-ENOENT); 2409 return device; 2410 } 2411 2412 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2413 if (ret) 2414 return ERR_PTR(ret); 2415 device = btrfs_find_device(fs_info->fs_devices, &args); 2416 btrfs_put_dev_args_from_path(&args); 2417 if (!device) 2418 return ERR_PTR(-ENOENT); 2419 return device; 2420 } 2421 2422 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2423 { 2424 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2425 struct btrfs_fs_devices *old_devices; 2426 struct btrfs_fs_devices *seed_devices; 2427 2428 lockdep_assert_held(&uuid_mutex); 2429 if (!fs_devices->seeding) 2430 return ERR_PTR(-EINVAL); 2431 2432 /* 2433 * Private copy of the seed devices, anchored at 2434 * fs_info->fs_devices->seed_list 2435 */ 2436 seed_devices = alloc_fs_devices(NULL, NULL); 2437 if (IS_ERR(seed_devices)) 2438 return seed_devices; 2439 2440 /* 2441 * It's necessary to retain a copy of the original seed fs_devices in 2442 * fs_uuids so that filesystems which have been seeded can successfully 2443 * reference the seed device from open_seed_devices. This also supports 2444 * multiple fs seed. 2445 */ 2446 old_devices = clone_fs_devices(fs_devices); 2447 if (IS_ERR(old_devices)) { 2448 kfree(seed_devices); 2449 return old_devices; 2450 } 2451 2452 list_add(&old_devices->fs_list, &fs_uuids); 2453 2454 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2455 seed_devices->opened = 1; 2456 INIT_LIST_HEAD(&seed_devices->devices); 2457 INIT_LIST_HEAD(&seed_devices->alloc_list); 2458 mutex_init(&seed_devices->device_list_mutex); 2459 2460 return seed_devices; 2461 } 2462 2463 /* 2464 * Splice seed devices into the sprout fs_devices. 2465 * Generate a new fsid for the sprouted read-write filesystem. 2466 */ 2467 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2468 struct btrfs_fs_devices *seed_devices) 2469 { 2470 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2471 struct btrfs_super_block *disk_super = fs_info->super_copy; 2472 struct btrfs_device *device; 2473 u64 super_flags; 2474 2475 /* 2476 * We are updating the fsid, the thread leading to device_list_add() 2477 * could race, so uuid_mutex is needed. 2478 */ 2479 lockdep_assert_held(&uuid_mutex); 2480 2481 /* 2482 * The threads listed below may traverse dev_list but can do that without 2483 * device_list_mutex: 2484 * - All device ops and balance - as we are in btrfs_exclop_start. 2485 * - Various dev_list readers - are using RCU. 2486 * - btrfs_ioctl_fitrim() - is using RCU. 2487 * 2488 * For-read threads as below are using device_list_mutex: 2489 * - Readonly scrub btrfs_scrub_dev() 2490 * - Readonly scrub btrfs_scrub_progress() 2491 * - btrfs_get_dev_stats() 2492 */ 2493 lockdep_assert_held(&fs_devices->device_list_mutex); 2494 2495 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2496 synchronize_rcu); 2497 list_for_each_entry(device, &seed_devices->devices, dev_list) 2498 device->fs_devices = seed_devices; 2499 2500 fs_devices->seeding = false; 2501 fs_devices->num_devices = 0; 2502 fs_devices->open_devices = 0; 2503 fs_devices->missing_devices = 0; 2504 fs_devices->rotating = false; 2505 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2506 2507 generate_random_uuid(fs_devices->fsid); 2508 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2509 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2510 2511 super_flags = btrfs_super_flags(disk_super) & 2512 ~BTRFS_SUPER_FLAG_SEEDING; 2513 btrfs_set_super_flags(disk_super, super_flags); 2514 } 2515 2516 /* 2517 * Store the expected generation for seed devices in device items. 2518 */ 2519 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2520 { 2521 BTRFS_DEV_LOOKUP_ARGS(args); 2522 struct btrfs_fs_info *fs_info = trans->fs_info; 2523 struct btrfs_root *root = fs_info->chunk_root; 2524 struct btrfs_path *path; 2525 struct extent_buffer *leaf; 2526 struct btrfs_dev_item *dev_item; 2527 struct btrfs_device *device; 2528 struct btrfs_key key; 2529 u8 fs_uuid[BTRFS_FSID_SIZE]; 2530 u8 dev_uuid[BTRFS_UUID_SIZE]; 2531 int ret; 2532 2533 path = btrfs_alloc_path(); 2534 if (!path) 2535 return -ENOMEM; 2536 2537 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2538 key.offset = 0; 2539 key.type = BTRFS_DEV_ITEM_KEY; 2540 2541 while (1) { 2542 btrfs_reserve_chunk_metadata(trans, false); 2543 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2544 btrfs_trans_release_chunk_metadata(trans); 2545 if (ret < 0) 2546 goto error; 2547 2548 leaf = path->nodes[0]; 2549 next_slot: 2550 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2551 ret = btrfs_next_leaf(root, path); 2552 if (ret > 0) 2553 break; 2554 if (ret < 0) 2555 goto error; 2556 leaf = path->nodes[0]; 2557 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2558 btrfs_release_path(path); 2559 continue; 2560 } 2561 2562 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2563 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2564 key.type != BTRFS_DEV_ITEM_KEY) 2565 break; 2566 2567 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2568 struct btrfs_dev_item); 2569 args.devid = btrfs_device_id(leaf, dev_item); 2570 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2571 BTRFS_UUID_SIZE); 2572 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2573 BTRFS_FSID_SIZE); 2574 args.uuid = dev_uuid; 2575 args.fsid = fs_uuid; 2576 device = btrfs_find_device(fs_info->fs_devices, &args); 2577 BUG_ON(!device); /* Logic error */ 2578 2579 if (device->fs_devices->seeding) { 2580 btrfs_set_device_generation(leaf, dev_item, 2581 device->generation); 2582 btrfs_mark_buffer_dirty(leaf); 2583 } 2584 2585 path->slots[0]++; 2586 goto next_slot; 2587 } 2588 ret = 0; 2589 error: 2590 btrfs_free_path(path); 2591 return ret; 2592 } 2593 2594 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2595 { 2596 struct btrfs_root *root = fs_info->dev_root; 2597 struct btrfs_trans_handle *trans; 2598 struct btrfs_device *device; 2599 struct block_device *bdev; 2600 struct super_block *sb = fs_info->sb; 2601 struct rcu_string *name; 2602 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2603 struct btrfs_fs_devices *seed_devices; 2604 u64 orig_super_total_bytes; 2605 u64 orig_super_num_devices; 2606 int ret = 0; 2607 bool seeding_dev = false; 2608 bool locked = false; 2609 2610 if (sb_rdonly(sb) && !fs_devices->seeding) 2611 return -EROFS; 2612 2613 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2614 fs_info->bdev_holder); 2615 if (IS_ERR(bdev)) 2616 return PTR_ERR(bdev); 2617 2618 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2619 ret = -EINVAL; 2620 goto error; 2621 } 2622 2623 if (fs_devices->seeding) { 2624 seeding_dev = true; 2625 down_write(&sb->s_umount); 2626 mutex_lock(&uuid_mutex); 2627 locked = true; 2628 } 2629 2630 sync_blockdev(bdev); 2631 2632 rcu_read_lock(); 2633 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2634 if (device->bdev == bdev) { 2635 ret = -EEXIST; 2636 rcu_read_unlock(); 2637 goto error; 2638 } 2639 } 2640 rcu_read_unlock(); 2641 2642 device = btrfs_alloc_device(fs_info, NULL, NULL); 2643 if (IS_ERR(device)) { 2644 /* we can safely leave the fs_devices entry around */ 2645 ret = PTR_ERR(device); 2646 goto error; 2647 } 2648 2649 name = rcu_string_strdup(device_path, GFP_KERNEL); 2650 if (!name) { 2651 ret = -ENOMEM; 2652 goto error_free_device; 2653 } 2654 rcu_assign_pointer(device->name, name); 2655 2656 device->fs_info = fs_info; 2657 device->bdev = bdev; 2658 ret = lookup_bdev(device_path, &device->devt); 2659 if (ret) 2660 goto error_free_device; 2661 2662 ret = btrfs_get_dev_zone_info(device, false); 2663 if (ret) 2664 goto error_free_device; 2665 2666 trans = btrfs_start_transaction(root, 0); 2667 if (IS_ERR(trans)) { 2668 ret = PTR_ERR(trans); 2669 goto error_free_zone; 2670 } 2671 2672 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2673 device->generation = trans->transid; 2674 device->io_width = fs_info->sectorsize; 2675 device->io_align = fs_info->sectorsize; 2676 device->sector_size = fs_info->sectorsize; 2677 device->total_bytes = 2678 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2679 device->disk_total_bytes = device->total_bytes; 2680 device->commit_total_bytes = device->total_bytes; 2681 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2682 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2683 device->mode = FMODE_EXCL; 2684 device->dev_stats_valid = 1; 2685 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2686 2687 if (seeding_dev) { 2688 btrfs_clear_sb_rdonly(sb); 2689 2690 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2691 seed_devices = btrfs_init_sprout(fs_info); 2692 if (IS_ERR(seed_devices)) { 2693 ret = PTR_ERR(seed_devices); 2694 btrfs_abort_transaction(trans, ret); 2695 goto error_trans; 2696 } 2697 } 2698 2699 mutex_lock(&fs_devices->device_list_mutex); 2700 if (seeding_dev) { 2701 btrfs_setup_sprout(fs_info, seed_devices); 2702 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2703 device); 2704 } 2705 2706 device->fs_devices = fs_devices; 2707 2708 mutex_lock(&fs_info->chunk_mutex); 2709 list_add_rcu(&device->dev_list, &fs_devices->devices); 2710 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2711 fs_devices->num_devices++; 2712 fs_devices->open_devices++; 2713 fs_devices->rw_devices++; 2714 fs_devices->total_devices++; 2715 fs_devices->total_rw_bytes += device->total_bytes; 2716 2717 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2718 2719 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2720 fs_devices->rotating = true; 2721 2722 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2723 btrfs_set_super_total_bytes(fs_info->super_copy, 2724 round_down(orig_super_total_bytes + device->total_bytes, 2725 fs_info->sectorsize)); 2726 2727 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2728 btrfs_set_super_num_devices(fs_info->super_copy, 2729 orig_super_num_devices + 1); 2730 2731 /* 2732 * we've got more storage, clear any full flags on the space 2733 * infos 2734 */ 2735 btrfs_clear_space_info_full(fs_info); 2736 2737 mutex_unlock(&fs_info->chunk_mutex); 2738 2739 /* Add sysfs device entry */ 2740 btrfs_sysfs_add_device(device); 2741 2742 mutex_unlock(&fs_devices->device_list_mutex); 2743 2744 if (seeding_dev) { 2745 mutex_lock(&fs_info->chunk_mutex); 2746 ret = init_first_rw_device(trans); 2747 mutex_unlock(&fs_info->chunk_mutex); 2748 if (ret) { 2749 btrfs_abort_transaction(trans, ret); 2750 goto error_sysfs; 2751 } 2752 } 2753 2754 ret = btrfs_add_dev_item(trans, device); 2755 if (ret) { 2756 btrfs_abort_transaction(trans, ret); 2757 goto error_sysfs; 2758 } 2759 2760 if (seeding_dev) { 2761 ret = btrfs_finish_sprout(trans); 2762 if (ret) { 2763 btrfs_abort_transaction(trans, ret); 2764 goto error_sysfs; 2765 } 2766 2767 /* 2768 * fs_devices now represents the newly sprouted filesystem and 2769 * its fsid has been changed by btrfs_sprout_splice(). 2770 */ 2771 btrfs_sysfs_update_sprout_fsid(fs_devices); 2772 } 2773 2774 ret = btrfs_commit_transaction(trans); 2775 2776 if (seeding_dev) { 2777 mutex_unlock(&uuid_mutex); 2778 up_write(&sb->s_umount); 2779 locked = false; 2780 2781 if (ret) /* transaction commit */ 2782 return ret; 2783 2784 ret = btrfs_relocate_sys_chunks(fs_info); 2785 if (ret < 0) 2786 btrfs_handle_fs_error(fs_info, ret, 2787 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2788 trans = btrfs_attach_transaction(root); 2789 if (IS_ERR(trans)) { 2790 if (PTR_ERR(trans) == -ENOENT) 2791 return 0; 2792 ret = PTR_ERR(trans); 2793 trans = NULL; 2794 goto error_sysfs; 2795 } 2796 ret = btrfs_commit_transaction(trans); 2797 } 2798 2799 /* 2800 * Now that we have written a new super block to this device, check all 2801 * other fs_devices list if device_path alienates any other scanned 2802 * device. 2803 * We can ignore the return value as it typically returns -EINVAL and 2804 * only succeeds if the device was an alien. 2805 */ 2806 btrfs_forget_devices(device->devt); 2807 2808 /* Update ctime/mtime for blkid or udev */ 2809 update_dev_time(device_path); 2810 2811 return ret; 2812 2813 error_sysfs: 2814 btrfs_sysfs_remove_device(device); 2815 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2816 mutex_lock(&fs_info->chunk_mutex); 2817 list_del_rcu(&device->dev_list); 2818 list_del(&device->dev_alloc_list); 2819 fs_info->fs_devices->num_devices--; 2820 fs_info->fs_devices->open_devices--; 2821 fs_info->fs_devices->rw_devices--; 2822 fs_info->fs_devices->total_devices--; 2823 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2824 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2825 btrfs_set_super_total_bytes(fs_info->super_copy, 2826 orig_super_total_bytes); 2827 btrfs_set_super_num_devices(fs_info->super_copy, 2828 orig_super_num_devices); 2829 mutex_unlock(&fs_info->chunk_mutex); 2830 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2831 error_trans: 2832 if (seeding_dev) 2833 btrfs_set_sb_rdonly(sb); 2834 if (trans) 2835 btrfs_end_transaction(trans); 2836 error_free_zone: 2837 btrfs_destroy_dev_zone_info(device); 2838 error_free_device: 2839 btrfs_free_device(device); 2840 error: 2841 blkdev_put(bdev, FMODE_EXCL); 2842 if (locked) { 2843 mutex_unlock(&uuid_mutex); 2844 up_write(&sb->s_umount); 2845 } 2846 return ret; 2847 } 2848 2849 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2850 struct btrfs_device *device) 2851 { 2852 int ret; 2853 struct btrfs_path *path; 2854 struct btrfs_root *root = device->fs_info->chunk_root; 2855 struct btrfs_dev_item *dev_item; 2856 struct extent_buffer *leaf; 2857 struct btrfs_key key; 2858 2859 path = btrfs_alloc_path(); 2860 if (!path) 2861 return -ENOMEM; 2862 2863 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2864 key.type = BTRFS_DEV_ITEM_KEY; 2865 key.offset = device->devid; 2866 2867 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2868 if (ret < 0) 2869 goto out; 2870 2871 if (ret > 0) { 2872 ret = -ENOENT; 2873 goto out; 2874 } 2875 2876 leaf = path->nodes[0]; 2877 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2878 2879 btrfs_set_device_id(leaf, dev_item, device->devid); 2880 btrfs_set_device_type(leaf, dev_item, device->type); 2881 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2882 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2883 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2884 btrfs_set_device_total_bytes(leaf, dev_item, 2885 btrfs_device_get_disk_total_bytes(device)); 2886 btrfs_set_device_bytes_used(leaf, dev_item, 2887 btrfs_device_get_bytes_used(device)); 2888 btrfs_mark_buffer_dirty(leaf); 2889 2890 out: 2891 btrfs_free_path(path); 2892 return ret; 2893 } 2894 2895 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2896 struct btrfs_device *device, u64 new_size) 2897 { 2898 struct btrfs_fs_info *fs_info = device->fs_info; 2899 struct btrfs_super_block *super_copy = fs_info->super_copy; 2900 u64 old_total; 2901 u64 diff; 2902 int ret; 2903 2904 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2905 return -EACCES; 2906 2907 new_size = round_down(new_size, fs_info->sectorsize); 2908 2909 mutex_lock(&fs_info->chunk_mutex); 2910 old_total = btrfs_super_total_bytes(super_copy); 2911 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2912 2913 if (new_size <= device->total_bytes || 2914 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2915 mutex_unlock(&fs_info->chunk_mutex); 2916 return -EINVAL; 2917 } 2918 2919 btrfs_set_super_total_bytes(super_copy, 2920 round_down(old_total + diff, fs_info->sectorsize)); 2921 device->fs_devices->total_rw_bytes += diff; 2922 2923 btrfs_device_set_total_bytes(device, new_size); 2924 btrfs_device_set_disk_total_bytes(device, new_size); 2925 btrfs_clear_space_info_full(device->fs_info); 2926 if (list_empty(&device->post_commit_list)) 2927 list_add_tail(&device->post_commit_list, 2928 &trans->transaction->dev_update_list); 2929 mutex_unlock(&fs_info->chunk_mutex); 2930 2931 btrfs_reserve_chunk_metadata(trans, false); 2932 ret = btrfs_update_device(trans, device); 2933 btrfs_trans_release_chunk_metadata(trans); 2934 2935 return ret; 2936 } 2937 2938 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2939 { 2940 struct btrfs_fs_info *fs_info = trans->fs_info; 2941 struct btrfs_root *root = fs_info->chunk_root; 2942 int ret; 2943 struct btrfs_path *path; 2944 struct btrfs_key key; 2945 2946 path = btrfs_alloc_path(); 2947 if (!path) 2948 return -ENOMEM; 2949 2950 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2951 key.offset = chunk_offset; 2952 key.type = BTRFS_CHUNK_ITEM_KEY; 2953 2954 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2955 if (ret < 0) 2956 goto out; 2957 else if (ret > 0) { /* Logic error or corruption */ 2958 btrfs_handle_fs_error(fs_info, -ENOENT, 2959 "Failed lookup while freeing chunk."); 2960 ret = -ENOENT; 2961 goto out; 2962 } 2963 2964 ret = btrfs_del_item(trans, root, path); 2965 if (ret < 0) 2966 btrfs_handle_fs_error(fs_info, ret, 2967 "Failed to delete chunk item."); 2968 out: 2969 btrfs_free_path(path); 2970 return ret; 2971 } 2972 2973 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2974 { 2975 struct btrfs_super_block *super_copy = fs_info->super_copy; 2976 struct btrfs_disk_key *disk_key; 2977 struct btrfs_chunk *chunk; 2978 u8 *ptr; 2979 int ret = 0; 2980 u32 num_stripes; 2981 u32 array_size; 2982 u32 len = 0; 2983 u32 cur; 2984 struct btrfs_key key; 2985 2986 lockdep_assert_held(&fs_info->chunk_mutex); 2987 array_size = btrfs_super_sys_array_size(super_copy); 2988 2989 ptr = super_copy->sys_chunk_array; 2990 cur = 0; 2991 2992 while (cur < array_size) { 2993 disk_key = (struct btrfs_disk_key *)ptr; 2994 btrfs_disk_key_to_cpu(&key, disk_key); 2995 2996 len = sizeof(*disk_key); 2997 2998 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2999 chunk = (struct btrfs_chunk *)(ptr + len); 3000 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3001 len += btrfs_chunk_item_size(num_stripes); 3002 } else { 3003 ret = -EIO; 3004 break; 3005 } 3006 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3007 key.offset == chunk_offset) { 3008 memmove(ptr, ptr + len, array_size - (cur + len)); 3009 array_size -= len; 3010 btrfs_set_super_sys_array_size(super_copy, array_size); 3011 } else { 3012 ptr += len; 3013 cur += len; 3014 } 3015 } 3016 return ret; 3017 } 3018 3019 /* 3020 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3021 * @logical: Logical block offset in bytes. 3022 * @length: Length of extent in bytes. 3023 * 3024 * Return: Chunk mapping or ERR_PTR. 3025 */ 3026 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3027 u64 logical, u64 length) 3028 { 3029 struct extent_map_tree *em_tree; 3030 struct extent_map *em; 3031 3032 em_tree = &fs_info->mapping_tree; 3033 read_lock(&em_tree->lock); 3034 em = lookup_extent_mapping(em_tree, logical, length); 3035 read_unlock(&em_tree->lock); 3036 3037 if (!em) { 3038 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3039 logical, length); 3040 return ERR_PTR(-EINVAL); 3041 } 3042 3043 if (em->start > logical || em->start + em->len < logical) { 3044 btrfs_crit(fs_info, 3045 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3046 logical, length, em->start, em->start + em->len); 3047 free_extent_map(em); 3048 return ERR_PTR(-EINVAL); 3049 } 3050 3051 /* callers are responsible for dropping em's ref. */ 3052 return em; 3053 } 3054 3055 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3056 struct map_lookup *map, u64 chunk_offset) 3057 { 3058 int i; 3059 3060 /* 3061 * Removing chunk items and updating the device items in the chunks btree 3062 * requires holding the chunk_mutex. 3063 * See the comment at btrfs_chunk_alloc() for the details. 3064 */ 3065 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3066 3067 for (i = 0; i < map->num_stripes; i++) { 3068 int ret; 3069 3070 ret = btrfs_update_device(trans, map->stripes[i].dev); 3071 if (ret) 3072 return ret; 3073 } 3074 3075 return btrfs_free_chunk(trans, chunk_offset); 3076 } 3077 3078 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3079 { 3080 struct btrfs_fs_info *fs_info = trans->fs_info; 3081 struct extent_map *em; 3082 struct map_lookup *map; 3083 u64 dev_extent_len = 0; 3084 int i, ret = 0; 3085 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3086 3087 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3088 if (IS_ERR(em)) { 3089 /* 3090 * This is a logic error, but we don't want to just rely on the 3091 * user having built with ASSERT enabled, so if ASSERT doesn't 3092 * do anything we still error out. 3093 */ 3094 ASSERT(0); 3095 return PTR_ERR(em); 3096 } 3097 map = em->map_lookup; 3098 3099 /* 3100 * First delete the device extent items from the devices btree. 3101 * We take the device_list_mutex to avoid racing with the finishing phase 3102 * of a device replace operation. See the comment below before acquiring 3103 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3104 * because that can result in a deadlock when deleting the device extent 3105 * items from the devices btree - COWing an extent buffer from the btree 3106 * may result in allocating a new metadata chunk, which would attempt to 3107 * lock again fs_info->chunk_mutex. 3108 */ 3109 mutex_lock(&fs_devices->device_list_mutex); 3110 for (i = 0; i < map->num_stripes; i++) { 3111 struct btrfs_device *device = map->stripes[i].dev; 3112 ret = btrfs_free_dev_extent(trans, device, 3113 map->stripes[i].physical, 3114 &dev_extent_len); 3115 if (ret) { 3116 mutex_unlock(&fs_devices->device_list_mutex); 3117 btrfs_abort_transaction(trans, ret); 3118 goto out; 3119 } 3120 3121 if (device->bytes_used > 0) { 3122 mutex_lock(&fs_info->chunk_mutex); 3123 btrfs_device_set_bytes_used(device, 3124 device->bytes_used - dev_extent_len); 3125 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3126 btrfs_clear_space_info_full(fs_info); 3127 mutex_unlock(&fs_info->chunk_mutex); 3128 } 3129 } 3130 mutex_unlock(&fs_devices->device_list_mutex); 3131 3132 /* 3133 * We acquire fs_info->chunk_mutex for 2 reasons: 3134 * 3135 * 1) Just like with the first phase of the chunk allocation, we must 3136 * reserve system space, do all chunk btree updates and deletions, and 3137 * update the system chunk array in the superblock while holding this 3138 * mutex. This is for similar reasons as explained on the comment at 3139 * the top of btrfs_chunk_alloc(); 3140 * 3141 * 2) Prevent races with the final phase of a device replace operation 3142 * that replaces the device object associated with the map's stripes, 3143 * because the device object's id can change at any time during that 3144 * final phase of the device replace operation 3145 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3146 * replaced device and then see it with an ID of 3147 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3148 * the device item, which does not exists on the chunk btree. 3149 * The finishing phase of device replace acquires both the 3150 * device_list_mutex and the chunk_mutex, in that order, so we are 3151 * safe by just acquiring the chunk_mutex. 3152 */ 3153 trans->removing_chunk = true; 3154 mutex_lock(&fs_info->chunk_mutex); 3155 3156 check_system_chunk(trans, map->type); 3157 3158 ret = remove_chunk_item(trans, map, chunk_offset); 3159 /* 3160 * Normally we should not get -ENOSPC since we reserved space before 3161 * through the call to check_system_chunk(). 3162 * 3163 * Despite our system space_info having enough free space, we may not 3164 * be able to allocate extents from its block groups, because all have 3165 * an incompatible profile, which will force us to allocate a new system 3166 * block group with the right profile, or right after we called 3167 * check_system_space() above, a scrub turned the only system block group 3168 * with enough free space into RO mode. 3169 * This is explained with more detail at do_chunk_alloc(). 3170 * 3171 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3172 */ 3173 if (ret == -ENOSPC) { 3174 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3175 struct btrfs_block_group *sys_bg; 3176 3177 sys_bg = btrfs_create_chunk(trans, sys_flags); 3178 if (IS_ERR(sys_bg)) { 3179 ret = PTR_ERR(sys_bg); 3180 btrfs_abort_transaction(trans, ret); 3181 goto out; 3182 } 3183 3184 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3185 if (ret) { 3186 btrfs_abort_transaction(trans, ret); 3187 goto out; 3188 } 3189 3190 ret = remove_chunk_item(trans, map, chunk_offset); 3191 if (ret) { 3192 btrfs_abort_transaction(trans, ret); 3193 goto out; 3194 } 3195 } else if (ret) { 3196 btrfs_abort_transaction(trans, ret); 3197 goto out; 3198 } 3199 3200 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3201 3202 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3203 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3204 if (ret) { 3205 btrfs_abort_transaction(trans, ret); 3206 goto out; 3207 } 3208 } 3209 3210 mutex_unlock(&fs_info->chunk_mutex); 3211 trans->removing_chunk = false; 3212 3213 /* 3214 * We are done with chunk btree updates and deletions, so release the 3215 * system space we previously reserved (with check_system_chunk()). 3216 */ 3217 btrfs_trans_release_chunk_metadata(trans); 3218 3219 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3220 if (ret) { 3221 btrfs_abort_transaction(trans, ret); 3222 goto out; 3223 } 3224 3225 out: 3226 if (trans->removing_chunk) { 3227 mutex_unlock(&fs_info->chunk_mutex); 3228 trans->removing_chunk = false; 3229 } 3230 /* once for us */ 3231 free_extent_map(em); 3232 return ret; 3233 } 3234 3235 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3236 { 3237 struct btrfs_root *root = fs_info->chunk_root; 3238 struct btrfs_trans_handle *trans; 3239 struct btrfs_block_group *block_group; 3240 u64 length; 3241 int ret; 3242 3243 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3244 btrfs_err(fs_info, 3245 "relocate: not supported on extent tree v2 yet"); 3246 return -EINVAL; 3247 } 3248 3249 /* 3250 * Prevent races with automatic removal of unused block groups. 3251 * After we relocate and before we remove the chunk with offset 3252 * chunk_offset, automatic removal of the block group can kick in, 3253 * resulting in a failure when calling btrfs_remove_chunk() below. 3254 * 3255 * Make sure to acquire this mutex before doing a tree search (dev 3256 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3257 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3258 * we release the path used to search the chunk/dev tree and before 3259 * the current task acquires this mutex and calls us. 3260 */ 3261 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3262 3263 /* step one, relocate all the extents inside this chunk */ 3264 btrfs_scrub_pause(fs_info); 3265 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3266 btrfs_scrub_continue(fs_info); 3267 if (ret) 3268 return ret; 3269 3270 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3271 if (!block_group) 3272 return -ENOENT; 3273 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3274 length = block_group->length; 3275 btrfs_put_block_group(block_group); 3276 3277 /* 3278 * On a zoned file system, discard the whole block group, this will 3279 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3280 * resetting the zone fails, don't treat it as a fatal problem from the 3281 * filesystem's point of view. 3282 */ 3283 if (btrfs_is_zoned(fs_info)) { 3284 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3285 if (ret) 3286 btrfs_info(fs_info, 3287 "failed to reset zone %llu after relocation", 3288 chunk_offset); 3289 } 3290 3291 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3292 chunk_offset); 3293 if (IS_ERR(trans)) { 3294 ret = PTR_ERR(trans); 3295 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3296 return ret; 3297 } 3298 3299 /* 3300 * step two, delete the device extents and the 3301 * chunk tree entries 3302 */ 3303 ret = btrfs_remove_chunk(trans, chunk_offset); 3304 btrfs_end_transaction(trans); 3305 return ret; 3306 } 3307 3308 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3309 { 3310 struct btrfs_root *chunk_root = fs_info->chunk_root; 3311 struct btrfs_path *path; 3312 struct extent_buffer *leaf; 3313 struct btrfs_chunk *chunk; 3314 struct btrfs_key key; 3315 struct btrfs_key found_key; 3316 u64 chunk_type; 3317 bool retried = false; 3318 int failed = 0; 3319 int ret; 3320 3321 path = btrfs_alloc_path(); 3322 if (!path) 3323 return -ENOMEM; 3324 3325 again: 3326 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3327 key.offset = (u64)-1; 3328 key.type = BTRFS_CHUNK_ITEM_KEY; 3329 3330 while (1) { 3331 mutex_lock(&fs_info->reclaim_bgs_lock); 3332 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3333 if (ret < 0) { 3334 mutex_unlock(&fs_info->reclaim_bgs_lock); 3335 goto error; 3336 } 3337 BUG_ON(ret == 0); /* Corruption */ 3338 3339 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3340 key.type); 3341 if (ret) 3342 mutex_unlock(&fs_info->reclaim_bgs_lock); 3343 if (ret < 0) 3344 goto error; 3345 if (ret > 0) 3346 break; 3347 3348 leaf = path->nodes[0]; 3349 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3350 3351 chunk = btrfs_item_ptr(leaf, path->slots[0], 3352 struct btrfs_chunk); 3353 chunk_type = btrfs_chunk_type(leaf, chunk); 3354 btrfs_release_path(path); 3355 3356 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3357 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3358 if (ret == -ENOSPC) 3359 failed++; 3360 else 3361 BUG_ON(ret); 3362 } 3363 mutex_unlock(&fs_info->reclaim_bgs_lock); 3364 3365 if (found_key.offset == 0) 3366 break; 3367 key.offset = found_key.offset - 1; 3368 } 3369 ret = 0; 3370 if (failed && !retried) { 3371 failed = 0; 3372 retried = true; 3373 goto again; 3374 } else if (WARN_ON(failed && retried)) { 3375 ret = -ENOSPC; 3376 } 3377 error: 3378 btrfs_free_path(path); 3379 return ret; 3380 } 3381 3382 /* 3383 * return 1 : allocate a data chunk successfully, 3384 * return <0: errors during allocating a data chunk, 3385 * return 0 : no need to allocate a data chunk. 3386 */ 3387 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3388 u64 chunk_offset) 3389 { 3390 struct btrfs_block_group *cache; 3391 u64 bytes_used; 3392 u64 chunk_type; 3393 3394 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3395 ASSERT(cache); 3396 chunk_type = cache->flags; 3397 btrfs_put_block_group(cache); 3398 3399 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3400 return 0; 3401 3402 spin_lock(&fs_info->data_sinfo->lock); 3403 bytes_used = fs_info->data_sinfo->bytes_used; 3404 spin_unlock(&fs_info->data_sinfo->lock); 3405 3406 if (!bytes_used) { 3407 struct btrfs_trans_handle *trans; 3408 int ret; 3409 3410 trans = btrfs_join_transaction(fs_info->tree_root); 3411 if (IS_ERR(trans)) 3412 return PTR_ERR(trans); 3413 3414 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3415 btrfs_end_transaction(trans); 3416 if (ret < 0) 3417 return ret; 3418 return 1; 3419 } 3420 3421 return 0; 3422 } 3423 3424 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3425 struct btrfs_balance_control *bctl) 3426 { 3427 struct btrfs_root *root = fs_info->tree_root; 3428 struct btrfs_trans_handle *trans; 3429 struct btrfs_balance_item *item; 3430 struct btrfs_disk_balance_args disk_bargs; 3431 struct btrfs_path *path; 3432 struct extent_buffer *leaf; 3433 struct btrfs_key key; 3434 int ret, err; 3435 3436 path = btrfs_alloc_path(); 3437 if (!path) 3438 return -ENOMEM; 3439 3440 trans = btrfs_start_transaction(root, 0); 3441 if (IS_ERR(trans)) { 3442 btrfs_free_path(path); 3443 return PTR_ERR(trans); 3444 } 3445 3446 key.objectid = BTRFS_BALANCE_OBJECTID; 3447 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3448 key.offset = 0; 3449 3450 ret = btrfs_insert_empty_item(trans, root, path, &key, 3451 sizeof(*item)); 3452 if (ret) 3453 goto out; 3454 3455 leaf = path->nodes[0]; 3456 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3457 3458 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3459 3460 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3461 btrfs_set_balance_data(leaf, item, &disk_bargs); 3462 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3463 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3464 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3465 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3466 3467 btrfs_set_balance_flags(leaf, item, bctl->flags); 3468 3469 btrfs_mark_buffer_dirty(leaf); 3470 out: 3471 btrfs_free_path(path); 3472 err = btrfs_commit_transaction(trans); 3473 if (err && !ret) 3474 ret = err; 3475 return ret; 3476 } 3477 3478 static int del_balance_item(struct btrfs_fs_info *fs_info) 3479 { 3480 struct btrfs_root *root = fs_info->tree_root; 3481 struct btrfs_trans_handle *trans; 3482 struct btrfs_path *path; 3483 struct btrfs_key key; 3484 int ret, err; 3485 3486 path = btrfs_alloc_path(); 3487 if (!path) 3488 return -ENOMEM; 3489 3490 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3491 if (IS_ERR(trans)) { 3492 btrfs_free_path(path); 3493 return PTR_ERR(trans); 3494 } 3495 3496 key.objectid = BTRFS_BALANCE_OBJECTID; 3497 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3498 key.offset = 0; 3499 3500 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3501 if (ret < 0) 3502 goto out; 3503 if (ret > 0) { 3504 ret = -ENOENT; 3505 goto out; 3506 } 3507 3508 ret = btrfs_del_item(trans, root, path); 3509 out: 3510 btrfs_free_path(path); 3511 err = btrfs_commit_transaction(trans); 3512 if (err && !ret) 3513 ret = err; 3514 return ret; 3515 } 3516 3517 /* 3518 * This is a heuristic used to reduce the number of chunks balanced on 3519 * resume after balance was interrupted. 3520 */ 3521 static void update_balance_args(struct btrfs_balance_control *bctl) 3522 { 3523 /* 3524 * Turn on soft mode for chunk types that were being converted. 3525 */ 3526 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3527 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3528 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3529 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3530 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3531 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3532 3533 /* 3534 * Turn on usage filter if is not already used. The idea is 3535 * that chunks that we have already balanced should be 3536 * reasonably full. Don't do it for chunks that are being 3537 * converted - that will keep us from relocating unconverted 3538 * (albeit full) chunks. 3539 */ 3540 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3541 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3542 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3543 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3544 bctl->data.usage = 90; 3545 } 3546 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3547 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3548 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3549 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3550 bctl->sys.usage = 90; 3551 } 3552 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3553 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3554 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3555 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3556 bctl->meta.usage = 90; 3557 } 3558 } 3559 3560 /* 3561 * Clear the balance status in fs_info and delete the balance item from disk. 3562 */ 3563 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3564 { 3565 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3566 int ret; 3567 3568 BUG_ON(!fs_info->balance_ctl); 3569 3570 spin_lock(&fs_info->balance_lock); 3571 fs_info->balance_ctl = NULL; 3572 spin_unlock(&fs_info->balance_lock); 3573 3574 kfree(bctl); 3575 ret = del_balance_item(fs_info); 3576 if (ret) 3577 btrfs_handle_fs_error(fs_info, ret, NULL); 3578 } 3579 3580 /* 3581 * Balance filters. Return 1 if chunk should be filtered out 3582 * (should not be balanced). 3583 */ 3584 static int chunk_profiles_filter(u64 chunk_type, 3585 struct btrfs_balance_args *bargs) 3586 { 3587 chunk_type = chunk_to_extended(chunk_type) & 3588 BTRFS_EXTENDED_PROFILE_MASK; 3589 3590 if (bargs->profiles & chunk_type) 3591 return 0; 3592 3593 return 1; 3594 } 3595 3596 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3597 struct btrfs_balance_args *bargs) 3598 { 3599 struct btrfs_block_group *cache; 3600 u64 chunk_used; 3601 u64 user_thresh_min; 3602 u64 user_thresh_max; 3603 int ret = 1; 3604 3605 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3606 chunk_used = cache->used; 3607 3608 if (bargs->usage_min == 0) 3609 user_thresh_min = 0; 3610 else 3611 user_thresh_min = div_factor_fine(cache->length, 3612 bargs->usage_min); 3613 3614 if (bargs->usage_max == 0) 3615 user_thresh_max = 1; 3616 else if (bargs->usage_max > 100) 3617 user_thresh_max = cache->length; 3618 else 3619 user_thresh_max = div_factor_fine(cache->length, 3620 bargs->usage_max); 3621 3622 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3623 ret = 0; 3624 3625 btrfs_put_block_group(cache); 3626 return ret; 3627 } 3628 3629 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3630 u64 chunk_offset, struct btrfs_balance_args *bargs) 3631 { 3632 struct btrfs_block_group *cache; 3633 u64 chunk_used, user_thresh; 3634 int ret = 1; 3635 3636 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3637 chunk_used = cache->used; 3638 3639 if (bargs->usage_min == 0) 3640 user_thresh = 1; 3641 else if (bargs->usage > 100) 3642 user_thresh = cache->length; 3643 else 3644 user_thresh = div_factor_fine(cache->length, bargs->usage); 3645 3646 if (chunk_used < user_thresh) 3647 ret = 0; 3648 3649 btrfs_put_block_group(cache); 3650 return ret; 3651 } 3652 3653 static int chunk_devid_filter(struct extent_buffer *leaf, 3654 struct btrfs_chunk *chunk, 3655 struct btrfs_balance_args *bargs) 3656 { 3657 struct btrfs_stripe *stripe; 3658 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3659 int i; 3660 3661 for (i = 0; i < num_stripes; i++) { 3662 stripe = btrfs_stripe_nr(chunk, i); 3663 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3664 return 0; 3665 } 3666 3667 return 1; 3668 } 3669 3670 static u64 calc_data_stripes(u64 type, int num_stripes) 3671 { 3672 const int index = btrfs_bg_flags_to_raid_index(type); 3673 const int ncopies = btrfs_raid_array[index].ncopies; 3674 const int nparity = btrfs_raid_array[index].nparity; 3675 3676 return (num_stripes - nparity) / ncopies; 3677 } 3678 3679 /* [pstart, pend) */ 3680 static int chunk_drange_filter(struct extent_buffer *leaf, 3681 struct btrfs_chunk *chunk, 3682 struct btrfs_balance_args *bargs) 3683 { 3684 struct btrfs_stripe *stripe; 3685 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3686 u64 stripe_offset; 3687 u64 stripe_length; 3688 u64 type; 3689 int factor; 3690 int i; 3691 3692 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3693 return 0; 3694 3695 type = btrfs_chunk_type(leaf, chunk); 3696 factor = calc_data_stripes(type, num_stripes); 3697 3698 for (i = 0; i < num_stripes; i++) { 3699 stripe = btrfs_stripe_nr(chunk, i); 3700 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3701 continue; 3702 3703 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3704 stripe_length = btrfs_chunk_length(leaf, chunk); 3705 stripe_length = div_u64(stripe_length, factor); 3706 3707 if (stripe_offset < bargs->pend && 3708 stripe_offset + stripe_length > bargs->pstart) 3709 return 0; 3710 } 3711 3712 return 1; 3713 } 3714 3715 /* [vstart, vend) */ 3716 static int chunk_vrange_filter(struct extent_buffer *leaf, 3717 struct btrfs_chunk *chunk, 3718 u64 chunk_offset, 3719 struct btrfs_balance_args *bargs) 3720 { 3721 if (chunk_offset < bargs->vend && 3722 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3723 /* at least part of the chunk is inside this vrange */ 3724 return 0; 3725 3726 return 1; 3727 } 3728 3729 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3730 struct btrfs_chunk *chunk, 3731 struct btrfs_balance_args *bargs) 3732 { 3733 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3734 3735 if (bargs->stripes_min <= num_stripes 3736 && num_stripes <= bargs->stripes_max) 3737 return 0; 3738 3739 return 1; 3740 } 3741 3742 static int chunk_soft_convert_filter(u64 chunk_type, 3743 struct btrfs_balance_args *bargs) 3744 { 3745 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3746 return 0; 3747 3748 chunk_type = chunk_to_extended(chunk_type) & 3749 BTRFS_EXTENDED_PROFILE_MASK; 3750 3751 if (bargs->target == chunk_type) 3752 return 1; 3753 3754 return 0; 3755 } 3756 3757 static int should_balance_chunk(struct extent_buffer *leaf, 3758 struct btrfs_chunk *chunk, u64 chunk_offset) 3759 { 3760 struct btrfs_fs_info *fs_info = leaf->fs_info; 3761 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3762 struct btrfs_balance_args *bargs = NULL; 3763 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3764 3765 /* type filter */ 3766 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3767 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3768 return 0; 3769 } 3770 3771 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3772 bargs = &bctl->data; 3773 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3774 bargs = &bctl->sys; 3775 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3776 bargs = &bctl->meta; 3777 3778 /* profiles filter */ 3779 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3780 chunk_profiles_filter(chunk_type, bargs)) { 3781 return 0; 3782 } 3783 3784 /* usage filter */ 3785 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3786 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3787 return 0; 3788 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3789 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3790 return 0; 3791 } 3792 3793 /* devid filter */ 3794 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3795 chunk_devid_filter(leaf, chunk, bargs)) { 3796 return 0; 3797 } 3798 3799 /* drange filter, makes sense only with devid filter */ 3800 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3801 chunk_drange_filter(leaf, chunk, bargs)) { 3802 return 0; 3803 } 3804 3805 /* vrange filter */ 3806 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3807 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3808 return 0; 3809 } 3810 3811 /* stripes filter */ 3812 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3813 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3814 return 0; 3815 } 3816 3817 /* soft profile changing mode */ 3818 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3819 chunk_soft_convert_filter(chunk_type, bargs)) { 3820 return 0; 3821 } 3822 3823 /* 3824 * limited by count, must be the last filter 3825 */ 3826 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3827 if (bargs->limit == 0) 3828 return 0; 3829 else 3830 bargs->limit--; 3831 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3832 /* 3833 * Same logic as the 'limit' filter; the minimum cannot be 3834 * determined here because we do not have the global information 3835 * about the count of all chunks that satisfy the filters. 3836 */ 3837 if (bargs->limit_max == 0) 3838 return 0; 3839 else 3840 bargs->limit_max--; 3841 } 3842 3843 return 1; 3844 } 3845 3846 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3847 { 3848 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3849 struct btrfs_root *chunk_root = fs_info->chunk_root; 3850 u64 chunk_type; 3851 struct btrfs_chunk *chunk; 3852 struct btrfs_path *path = NULL; 3853 struct btrfs_key key; 3854 struct btrfs_key found_key; 3855 struct extent_buffer *leaf; 3856 int slot; 3857 int ret; 3858 int enospc_errors = 0; 3859 bool counting = true; 3860 /* The single value limit and min/max limits use the same bytes in the */ 3861 u64 limit_data = bctl->data.limit; 3862 u64 limit_meta = bctl->meta.limit; 3863 u64 limit_sys = bctl->sys.limit; 3864 u32 count_data = 0; 3865 u32 count_meta = 0; 3866 u32 count_sys = 0; 3867 int chunk_reserved = 0; 3868 3869 path = btrfs_alloc_path(); 3870 if (!path) { 3871 ret = -ENOMEM; 3872 goto error; 3873 } 3874 3875 /* zero out stat counters */ 3876 spin_lock(&fs_info->balance_lock); 3877 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3878 spin_unlock(&fs_info->balance_lock); 3879 again: 3880 if (!counting) { 3881 /* 3882 * The single value limit and min/max limits use the same bytes 3883 * in the 3884 */ 3885 bctl->data.limit = limit_data; 3886 bctl->meta.limit = limit_meta; 3887 bctl->sys.limit = limit_sys; 3888 } 3889 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3890 key.offset = (u64)-1; 3891 key.type = BTRFS_CHUNK_ITEM_KEY; 3892 3893 while (1) { 3894 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3895 atomic_read(&fs_info->balance_cancel_req)) { 3896 ret = -ECANCELED; 3897 goto error; 3898 } 3899 3900 mutex_lock(&fs_info->reclaim_bgs_lock); 3901 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3902 if (ret < 0) { 3903 mutex_unlock(&fs_info->reclaim_bgs_lock); 3904 goto error; 3905 } 3906 3907 /* 3908 * this shouldn't happen, it means the last relocate 3909 * failed 3910 */ 3911 if (ret == 0) 3912 BUG(); /* FIXME break ? */ 3913 3914 ret = btrfs_previous_item(chunk_root, path, 0, 3915 BTRFS_CHUNK_ITEM_KEY); 3916 if (ret) { 3917 mutex_unlock(&fs_info->reclaim_bgs_lock); 3918 ret = 0; 3919 break; 3920 } 3921 3922 leaf = path->nodes[0]; 3923 slot = path->slots[0]; 3924 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3925 3926 if (found_key.objectid != key.objectid) { 3927 mutex_unlock(&fs_info->reclaim_bgs_lock); 3928 break; 3929 } 3930 3931 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3932 chunk_type = btrfs_chunk_type(leaf, chunk); 3933 3934 if (!counting) { 3935 spin_lock(&fs_info->balance_lock); 3936 bctl->stat.considered++; 3937 spin_unlock(&fs_info->balance_lock); 3938 } 3939 3940 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3941 3942 btrfs_release_path(path); 3943 if (!ret) { 3944 mutex_unlock(&fs_info->reclaim_bgs_lock); 3945 goto loop; 3946 } 3947 3948 if (counting) { 3949 mutex_unlock(&fs_info->reclaim_bgs_lock); 3950 spin_lock(&fs_info->balance_lock); 3951 bctl->stat.expected++; 3952 spin_unlock(&fs_info->balance_lock); 3953 3954 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3955 count_data++; 3956 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3957 count_sys++; 3958 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3959 count_meta++; 3960 3961 goto loop; 3962 } 3963 3964 /* 3965 * Apply limit_min filter, no need to check if the LIMITS 3966 * filter is used, limit_min is 0 by default 3967 */ 3968 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3969 count_data < bctl->data.limit_min) 3970 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3971 count_meta < bctl->meta.limit_min) 3972 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3973 count_sys < bctl->sys.limit_min)) { 3974 mutex_unlock(&fs_info->reclaim_bgs_lock); 3975 goto loop; 3976 } 3977 3978 if (!chunk_reserved) { 3979 /* 3980 * We may be relocating the only data chunk we have, 3981 * which could potentially end up with losing data's 3982 * raid profile, so lets allocate an empty one in 3983 * advance. 3984 */ 3985 ret = btrfs_may_alloc_data_chunk(fs_info, 3986 found_key.offset); 3987 if (ret < 0) { 3988 mutex_unlock(&fs_info->reclaim_bgs_lock); 3989 goto error; 3990 } else if (ret == 1) { 3991 chunk_reserved = 1; 3992 } 3993 } 3994 3995 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3996 mutex_unlock(&fs_info->reclaim_bgs_lock); 3997 if (ret == -ENOSPC) { 3998 enospc_errors++; 3999 } else if (ret == -ETXTBSY) { 4000 btrfs_info(fs_info, 4001 "skipping relocation of block group %llu due to active swapfile", 4002 found_key.offset); 4003 ret = 0; 4004 } else if (ret) { 4005 goto error; 4006 } else { 4007 spin_lock(&fs_info->balance_lock); 4008 bctl->stat.completed++; 4009 spin_unlock(&fs_info->balance_lock); 4010 } 4011 loop: 4012 if (found_key.offset == 0) 4013 break; 4014 key.offset = found_key.offset - 1; 4015 } 4016 4017 if (counting) { 4018 btrfs_release_path(path); 4019 counting = false; 4020 goto again; 4021 } 4022 error: 4023 btrfs_free_path(path); 4024 if (enospc_errors) { 4025 btrfs_info(fs_info, "%d enospc errors during balance", 4026 enospc_errors); 4027 if (!ret) 4028 ret = -ENOSPC; 4029 } 4030 4031 return ret; 4032 } 4033 4034 /** 4035 * alloc_profile_is_valid - see if a given profile is valid and reduced 4036 * @flags: profile to validate 4037 * @extended: if true @flags is treated as an extended profile 4038 */ 4039 static int alloc_profile_is_valid(u64 flags, int extended) 4040 { 4041 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4042 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4043 4044 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4045 4046 /* 1) check that all other bits are zeroed */ 4047 if (flags & ~mask) 4048 return 0; 4049 4050 /* 2) see if profile is reduced */ 4051 if (flags == 0) 4052 return !extended; /* "0" is valid for usual profiles */ 4053 4054 return has_single_bit_set(flags); 4055 } 4056 4057 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4058 { 4059 /* cancel requested || normal exit path */ 4060 return atomic_read(&fs_info->balance_cancel_req) || 4061 (atomic_read(&fs_info->balance_pause_req) == 0 && 4062 atomic_read(&fs_info->balance_cancel_req) == 0); 4063 } 4064 4065 /* 4066 * Validate target profile against allowed profiles and return true if it's OK. 4067 * Otherwise print the error message and return false. 4068 */ 4069 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4070 const struct btrfs_balance_args *bargs, 4071 u64 allowed, const char *type) 4072 { 4073 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4074 return true; 4075 4076 if (fs_info->sectorsize < PAGE_SIZE && 4077 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4078 btrfs_err(fs_info, 4079 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4080 fs_info->sectorsize, PAGE_SIZE); 4081 return false; 4082 } 4083 /* Profile is valid and does not have bits outside of the allowed set */ 4084 if (alloc_profile_is_valid(bargs->target, 1) && 4085 (bargs->target & ~allowed) == 0) 4086 return true; 4087 4088 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4089 type, btrfs_bg_type_to_raid_name(bargs->target)); 4090 return false; 4091 } 4092 4093 /* 4094 * Fill @buf with textual description of balance filter flags @bargs, up to 4095 * @size_buf including the terminating null. The output may be trimmed if it 4096 * does not fit into the provided buffer. 4097 */ 4098 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4099 u32 size_buf) 4100 { 4101 int ret; 4102 u32 size_bp = size_buf; 4103 char *bp = buf; 4104 u64 flags = bargs->flags; 4105 char tmp_buf[128] = {'\0'}; 4106 4107 if (!flags) 4108 return; 4109 4110 #define CHECK_APPEND_NOARG(a) \ 4111 do { \ 4112 ret = snprintf(bp, size_bp, (a)); \ 4113 if (ret < 0 || ret >= size_bp) \ 4114 goto out_overflow; \ 4115 size_bp -= ret; \ 4116 bp += ret; \ 4117 } while (0) 4118 4119 #define CHECK_APPEND_1ARG(a, v1) \ 4120 do { \ 4121 ret = snprintf(bp, size_bp, (a), (v1)); \ 4122 if (ret < 0 || ret >= size_bp) \ 4123 goto out_overflow; \ 4124 size_bp -= ret; \ 4125 bp += ret; \ 4126 } while (0) 4127 4128 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4129 do { \ 4130 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4131 if (ret < 0 || ret >= size_bp) \ 4132 goto out_overflow; \ 4133 size_bp -= ret; \ 4134 bp += ret; \ 4135 } while (0) 4136 4137 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4138 CHECK_APPEND_1ARG("convert=%s,", 4139 btrfs_bg_type_to_raid_name(bargs->target)); 4140 4141 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4142 CHECK_APPEND_NOARG("soft,"); 4143 4144 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4145 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4146 sizeof(tmp_buf)); 4147 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4148 } 4149 4150 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4151 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4152 4153 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4154 CHECK_APPEND_2ARG("usage=%u..%u,", 4155 bargs->usage_min, bargs->usage_max); 4156 4157 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4158 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4159 4160 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4161 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4162 bargs->pstart, bargs->pend); 4163 4164 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4165 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4166 bargs->vstart, bargs->vend); 4167 4168 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4169 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4170 4171 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4172 CHECK_APPEND_2ARG("limit=%u..%u,", 4173 bargs->limit_min, bargs->limit_max); 4174 4175 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4176 CHECK_APPEND_2ARG("stripes=%u..%u,", 4177 bargs->stripes_min, bargs->stripes_max); 4178 4179 #undef CHECK_APPEND_2ARG 4180 #undef CHECK_APPEND_1ARG 4181 #undef CHECK_APPEND_NOARG 4182 4183 out_overflow: 4184 4185 if (size_bp < size_buf) 4186 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4187 else 4188 buf[0] = '\0'; 4189 } 4190 4191 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4192 { 4193 u32 size_buf = 1024; 4194 char tmp_buf[192] = {'\0'}; 4195 char *buf; 4196 char *bp; 4197 u32 size_bp = size_buf; 4198 int ret; 4199 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4200 4201 buf = kzalloc(size_buf, GFP_KERNEL); 4202 if (!buf) 4203 return; 4204 4205 bp = buf; 4206 4207 #define CHECK_APPEND_1ARG(a, v1) \ 4208 do { \ 4209 ret = snprintf(bp, size_bp, (a), (v1)); \ 4210 if (ret < 0 || ret >= size_bp) \ 4211 goto out_overflow; \ 4212 size_bp -= ret; \ 4213 bp += ret; \ 4214 } while (0) 4215 4216 if (bctl->flags & BTRFS_BALANCE_FORCE) 4217 CHECK_APPEND_1ARG("%s", "-f "); 4218 4219 if (bctl->flags & BTRFS_BALANCE_DATA) { 4220 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4221 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4222 } 4223 4224 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4225 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4226 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4227 } 4228 4229 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4230 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4231 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4232 } 4233 4234 #undef CHECK_APPEND_1ARG 4235 4236 out_overflow: 4237 4238 if (size_bp < size_buf) 4239 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4240 btrfs_info(fs_info, "balance: %s %s", 4241 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4242 "resume" : "start", buf); 4243 4244 kfree(buf); 4245 } 4246 4247 /* 4248 * Should be called with balance mutexe held 4249 */ 4250 int btrfs_balance(struct btrfs_fs_info *fs_info, 4251 struct btrfs_balance_control *bctl, 4252 struct btrfs_ioctl_balance_args *bargs) 4253 { 4254 u64 meta_target, data_target; 4255 u64 allowed; 4256 int mixed = 0; 4257 int ret; 4258 u64 num_devices; 4259 unsigned seq; 4260 bool reducing_redundancy; 4261 int i; 4262 4263 if (btrfs_fs_closing(fs_info) || 4264 atomic_read(&fs_info->balance_pause_req) || 4265 btrfs_should_cancel_balance(fs_info)) { 4266 ret = -EINVAL; 4267 goto out; 4268 } 4269 4270 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4271 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4272 mixed = 1; 4273 4274 /* 4275 * In case of mixed groups both data and meta should be picked, 4276 * and identical options should be given for both of them. 4277 */ 4278 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4279 if (mixed && (bctl->flags & allowed)) { 4280 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4281 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4282 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4283 btrfs_err(fs_info, 4284 "balance: mixed groups data and metadata options must be the same"); 4285 ret = -EINVAL; 4286 goto out; 4287 } 4288 } 4289 4290 /* 4291 * rw_devices will not change at the moment, device add/delete/replace 4292 * are exclusive 4293 */ 4294 num_devices = fs_info->fs_devices->rw_devices; 4295 4296 /* 4297 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4298 * special bit for it, to make it easier to distinguish. Thus we need 4299 * to set it manually, or balance would refuse the profile. 4300 */ 4301 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4302 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4303 if (num_devices >= btrfs_raid_array[i].devs_min) 4304 allowed |= btrfs_raid_array[i].bg_flag; 4305 4306 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4307 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4308 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4309 ret = -EINVAL; 4310 goto out; 4311 } 4312 4313 /* 4314 * Allow to reduce metadata or system integrity only if force set for 4315 * profiles with redundancy (copies, parity) 4316 */ 4317 allowed = 0; 4318 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4319 if (btrfs_raid_array[i].ncopies >= 2 || 4320 btrfs_raid_array[i].tolerated_failures >= 1) 4321 allowed |= btrfs_raid_array[i].bg_flag; 4322 } 4323 do { 4324 seq = read_seqbegin(&fs_info->profiles_lock); 4325 4326 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4327 (fs_info->avail_system_alloc_bits & allowed) && 4328 !(bctl->sys.target & allowed)) || 4329 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4330 (fs_info->avail_metadata_alloc_bits & allowed) && 4331 !(bctl->meta.target & allowed))) 4332 reducing_redundancy = true; 4333 else 4334 reducing_redundancy = false; 4335 4336 /* if we're not converting, the target field is uninitialized */ 4337 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4338 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4339 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4340 bctl->data.target : fs_info->avail_data_alloc_bits; 4341 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4342 4343 if (reducing_redundancy) { 4344 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4345 btrfs_info(fs_info, 4346 "balance: force reducing metadata redundancy"); 4347 } else { 4348 btrfs_err(fs_info, 4349 "balance: reduces metadata redundancy, use --force if you want this"); 4350 ret = -EINVAL; 4351 goto out; 4352 } 4353 } 4354 4355 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4356 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4357 btrfs_warn(fs_info, 4358 "balance: metadata profile %s has lower redundancy than data profile %s", 4359 btrfs_bg_type_to_raid_name(meta_target), 4360 btrfs_bg_type_to_raid_name(data_target)); 4361 } 4362 4363 ret = insert_balance_item(fs_info, bctl); 4364 if (ret && ret != -EEXIST) 4365 goto out; 4366 4367 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4368 BUG_ON(ret == -EEXIST); 4369 BUG_ON(fs_info->balance_ctl); 4370 spin_lock(&fs_info->balance_lock); 4371 fs_info->balance_ctl = bctl; 4372 spin_unlock(&fs_info->balance_lock); 4373 } else { 4374 BUG_ON(ret != -EEXIST); 4375 spin_lock(&fs_info->balance_lock); 4376 update_balance_args(bctl); 4377 spin_unlock(&fs_info->balance_lock); 4378 } 4379 4380 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4381 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4382 describe_balance_start_or_resume(fs_info); 4383 mutex_unlock(&fs_info->balance_mutex); 4384 4385 ret = __btrfs_balance(fs_info); 4386 4387 mutex_lock(&fs_info->balance_mutex); 4388 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4389 btrfs_info(fs_info, "balance: paused"); 4390 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4391 } 4392 /* 4393 * Balance can be canceled by: 4394 * 4395 * - Regular cancel request 4396 * Then ret == -ECANCELED and balance_cancel_req > 0 4397 * 4398 * - Fatal signal to "btrfs" process 4399 * Either the signal caught by wait_reserve_ticket() and callers 4400 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4401 * got -ECANCELED. 4402 * Either way, in this case balance_cancel_req = 0, and 4403 * ret == -EINTR or ret == -ECANCELED. 4404 * 4405 * So here we only check the return value to catch canceled balance. 4406 */ 4407 else if (ret == -ECANCELED || ret == -EINTR) 4408 btrfs_info(fs_info, "balance: canceled"); 4409 else 4410 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4411 4412 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4413 4414 if (bargs) { 4415 memset(bargs, 0, sizeof(*bargs)); 4416 btrfs_update_ioctl_balance_args(fs_info, bargs); 4417 } 4418 4419 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4420 balance_need_close(fs_info)) { 4421 reset_balance_state(fs_info); 4422 btrfs_exclop_finish(fs_info); 4423 } 4424 4425 wake_up(&fs_info->balance_wait_q); 4426 4427 return ret; 4428 out: 4429 if (bctl->flags & BTRFS_BALANCE_RESUME) 4430 reset_balance_state(fs_info); 4431 else 4432 kfree(bctl); 4433 btrfs_exclop_finish(fs_info); 4434 4435 return ret; 4436 } 4437 4438 static int balance_kthread(void *data) 4439 { 4440 struct btrfs_fs_info *fs_info = data; 4441 int ret = 0; 4442 4443 mutex_lock(&fs_info->balance_mutex); 4444 if (fs_info->balance_ctl) 4445 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4446 mutex_unlock(&fs_info->balance_mutex); 4447 4448 return ret; 4449 } 4450 4451 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4452 { 4453 struct task_struct *tsk; 4454 4455 mutex_lock(&fs_info->balance_mutex); 4456 if (!fs_info->balance_ctl) { 4457 mutex_unlock(&fs_info->balance_mutex); 4458 return 0; 4459 } 4460 mutex_unlock(&fs_info->balance_mutex); 4461 4462 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4463 btrfs_info(fs_info, "balance: resume skipped"); 4464 return 0; 4465 } 4466 4467 spin_lock(&fs_info->super_lock); 4468 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4469 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4470 spin_unlock(&fs_info->super_lock); 4471 /* 4472 * A ro->rw remount sequence should continue with the paused balance 4473 * regardless of who pauses it, system or the user as of now, so set 4474 * the resume flag. 4475 */ 4476 spin_lock(&fs_info->balance_lock); 4477 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4478 spin_unlock(&fs_info->balance_lock); 4479 4480 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4481 return PTR_ERR_OR_ZERO(tsk); 4482 } 4483 4484 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4485 { 4486 struct btrfs_balance_control *bctl; 4487 struct btrfs_balance_item *item; 4488 struct btrfs_disk_balance_args disk_bargs; 4489 struct btrfs_path *path; 4490 struct extent_buffer *leaf; 4491 struct btrfs_key key; 4492 int ret; 4493 4494 path = btrfs_alloc_path(); 4495 if (!path) 4496 return -ENOMEM; 4497 4498 key.objectid = BTRFS_BALANCE_OBJECTID; 4499 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4500 key.offset = 0; 4501 4502 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4503 if (ret < 0) 4504 goto out; 4505 if (ret > 0) { /* ret = -ENOENT; */ 4506 ret = 0; 4507 goto out; 4508 } 4509 4510 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4511 if (!bctl) { 4512 ret = -ENOMEM; 4513 goto out; 4514 } 4515 4516 leaf = path->nodes[0]; 4517 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4518 4519 bctl->flags = btrfs_balance_flags(leaf, item); 4520 bctl->flags |= BTRFS_BALANCE_RESUME; 4521 4522 btrfs_balance_data(leaf, item, &disk_bargs); 4523 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4524 btrfs_balance_meta(leaf, item, &disk_bargs); 4525 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4526 btrfs_balance_sys(leaf, item, &disk_bargs); 4527 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4528 4529 /* 4530 * This should never happen, as the paused balance state is recovered 4531 * during mount without any chance of other exclusive ops to collide. 4532 * 4533 * This gives the exclusive op status to balance and keeps in paused 4534 * state until user intervention (cancel or umount). If the ownership 4535 * cannot be assigned, show a message but do not fail. The balance 4536 * is in a paused state and must have fs_info::balance_ctl properly 4537 * set up. 4538 */ 4539 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4540 btrfs_warn(fs_info, 4541 "balance: cannot set exclusive op status, resume manually"); 4542 4543 btrfs_release_path(path); 4544 4545 mutex_lock(&fs_info->balance_mutex); 4546 BUG_ON(fs_info->balance_ctl); 4547 spin_lock(&fs_info->balance_lock); 4548 fs_info->balance_ctl = bctl; 4549 spin_unlock(&fs_info->balance_lock); 4550 mutex_unlock(&fs_info->balance_mutex); 4551 out: 4552 btrfs_free_path(path); 4553 return ret; 4554 } 4555 4556 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4557 { 4558 int ret = 0; 4559 4560 mutex_lock(&fs_info->balance_mutex); 4561 if (!fs_info->balance_ctl) { 4562 mutex_unlock(&fs_info->balance_mutex); 4563 return -ENOTCONN; 4564 } 4565 4566 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4567 atomic_inc(&fs_info->balance_pause_req); 4568 mutex_unlock(&fs_info->balance_mutex); 4569 4570 wait_event(fs_info->balance_wait_q, 4571 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4572 4573 mutex_lock(&fs_info->balance_mutex); 4574 /* we are good with balance_ctl ripped off from under us */ 4575 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4576 atomic_dec(&fs_info->balance_pause_req); 4577 } else { 4578 ret = -ENOTCONN; 4579 } 4580 4581 mutex_unlock(&fs_info->balance_mutex); 4582 return ret; 4583 } 4584 4585 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4586 { 4587 mutex_lock(&fs_info->balance_mutex); 4588 if (!fs_info->balance_ctl) { 4589 mutex_unlock(&fs_info->balance_mutex); 4590 return -ENOTCONN; 4591 } 4592 4593 /* 4594 * A paused balance with the item stored on disk can be resumed at 4595 * mount time if the mount is read-write. Otherwise it's still paused 4596 * and we must not allow cancelling as it deletes the item. 4597 */ 4598 if (sb_rdonly(fs_info->sb)) { 4599 mutex_unlock(&fs_info->balance_mutex); 4600 return -EROFS; 4601 } 4602 4603 atomic_inc(&fs_info->balance_cancel_req); 4604 /* 4605 * if we are running just wait and return, balance item is 4606 * deleted in btrfs_balance in this case 4607 */ 4608 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4609 mutex_unlock(&fs_info->balance_mutex); 4610 wait_event(fs_info->balance_wait_q, 4611 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4612 mutex_lock(&fs_info->balance_mutex); 4613 } else { 4614 mutex_unlock(&fs_info->balance_mutex); 4615 /* 4616 * Lock released to allow other waiters to continue, we'll 4617 * reexamine the status again. 4618 */ 4619 mutex_lock(&fs_info->balance_mutex); 4620 4621 if (fs_info->balance_ctl) { 4622 reset_balance_state(fs_info); 4623 btrfs_exclop_finish(fs_info); 4624 btrfs_info(fs_info, "balance: canceled"); 4625 } 4626 } 4627 4628 BUG_ON(fs_info->balance_ctl || 4629 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4630 atomic_dec(&fs_info->balance_cancel_req); 4631 mutex_unlock(&fs_info->balance_mutex); 4632 return 0; 4633 } 4634 4635 int btrfs_uuid_scan_kthread(void *data) 4636 { 4637 struct btrfs_fs_info *fs_info = data; 4638 struct btrfs_root *root = fs_info->tree_root; 4639 struct btrfs_key key; 4640 struct btrfs_path *path = NULL; 4641 int ret = 0; 4642 struct extent_buffer *eb; 4643 int slot; 4644 struct btrfs_root_item root_item; 4645 u32 item_size; 4646 struct btrfs_trans_handle *trans = NULL; 4647 bool closing = false; 4648 4649 path = btrfs_alloc_path(); 4650 if (!path) { 4651 ret = -ENOMEM; 4652 goto out; 4653 } 4654 4655 key.objectid = 0; 4656 key.type = BTRFS_ROOT_ITEM_KEY; 4657 key.offset = 0; 4658 4659 while (1) { 4660 if (btrfs_fs_closing(fs_info)) { 4661 closing = true; 4662 break; 4663 } 4664 ret = btrfs_search_forward(root, &key, path, 4665 BTRFS_OLDEST_GENERATION); 4666 if (ret) { 4667 if (ret > 0) 4668 ret = 0; 4669 break; 4670 } 4671 4672 if (key.type != BTRFS_ROOT_ITEM_KEY || 4673 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4674 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4675 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4676 goto skip; 4677 4678 eb = path->nodes[0]; 4679 slot = path->slots[0]; 4680 item_size = btrfs_item_size(eb, slot); 4681 if (item_size < sizeof(root_item)) 4682 goto skip; 4683 4684 read_extent_buffer(eb, &root_item, 4685 btrfs_item_ptr_offset(eb, slot), 4686 (int)sizeof(root_item)); 4687 if (btrfs_root_refs(&root_item) == 0) 4688 goto skip; 4689 4690 if (!btrfs_is_empty_uuid(root_item.uuid) || 4691 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4692 if (trans) 4693 goto update_tree; 4694 4695 btrfs_release_path(path); 4696 /* 4697 * 1 - subvol uuid item 4698 * 1 - received_subvol uuid item 4699 */ 4700 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4701 if (IS_ERR(trans)) { 4702 ret = PTR_ERR(trans); 4703 break; 4704 } 4705 continue; 4706 } else { 4707 goto skip; 4708 } 4709 update_tree: 4710 btrfs_release_path(path); 4711 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4712 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4713 BTRFS_UUID_KEY_SUBVOL, 4714 key.objectid); 4715 if (ret < 0) { 4716 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4717 ret); 4718 break; 4719 } 4720 } 4721 4722 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4723 ret = btrfs_uuid_tree_add(trans, 4724 root_item.received_uuid, 4725 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4726 key.objectid); 4727 if (ret < 0) { 4728 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4729 ret); 4730 break; 4731 } 4732 } 4733 4734 skip: 4735 btrfs_release_path(path); 4736 if (trans) { 4737 ret = btrfs_end_transaction(trans); 4738 trans = NULL; 4739 if (ret) 4740 break; 4741 } 4742 4743 if (key.offset < (u64)-1) { 4744 key.offset++; 4745 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4746 key.offset = 0; 4747 key.type = BTRFS_ROOT_ITEM_KEY; 4748 } else if (key.objectid < (u64)-1) { 4749 key.offset = 0; 4750 key.type = BTRFS_ROOT_ITEM_KEY; 4751 key.objectid++; 4752 } else { 4753 break; 4754 } 4755 cond_resched(); 4756 } 4757 4758 out: 4759 btrfs_free_path(path); 4760 if (trans && !IS_ERR(trans)) 4761 btrfs_end_transaction(trans); 4762 if (ret) 4763 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4764 else if (!closing) 4765 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4766 up(&fs_info->uuid_tree_rescan_sem); 4767 return 0; 4768 } 4769 4770 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4771 { 4772 struct btrfs_trans_handle *trans; 4773 struct btrfs_root *tree_root = fs_info->tree_root; 4774 struct btrfs_root *uuid_root; 4775 struct task_struct *task; 4776 int ret; 4777 4778 /* 4779 * 1 - root node 4780 * 1 - root item 4781 */ 4782 trans = btrfs_start_transaction(tree_root, 2); 4783 if (IS_ERR(trans)) 4784 return PTR_ERR(trans); 4785 4786 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4787 if (IS_ERR(uuid_root)) { 4788 ret = PTR_ERR(uuid_root); 4789 btrfs_abort_transaction(trans, ret); 4790 btrfs_end_transaction(trans); 4791 return ret; 4792 } 4793 4794 fs_info->uuid_root = uuid_root; 4795 4796 ret = btrfs_commit_transaction(trans); 4797 if (ret) 4798 return ret; 4799 4800 down(&fs_info->uuid_tree_rescan_sem); 4801 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4802 if (IS_ERR(task)) { 4803 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4804 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4805 up(&fs_info->uuid_tree_rescan_sem); 4806 return PTR_ERR(task); 4807 } 4808 4809 return 0; 4810 } 4811 4812 /* 4813 * shrinking a device means finding all of the device extents past 4814 * the new size, and then following the back refs to the chunks. 4815 * The chunk relocation code actually frees the device extent 4816 */ 4817 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4818 { 4819 struct btrfs_fs_info *fs_info = device->fs_info; 4820 struct btrfs_root *root = fs_info->dev_root; 4821 struct btrfs_trans_handle *trans; 4822 struct btrfs_dev_extent *dev_extent = NULL; 4823 struct btrfs_path *path; 4824 u64 length; 4825 u64 chunk_offset; 4826 int ret; 4827 int slot; 4828 int failed = 0; 4829 bool retried = false; 4830 struct extent_buffer *l; 4831 struct btrfs_key key; 4832 struct btrfs_super_block *super_copy = fs_info->super_copy; 4833 u64 old_total = btrfs_super_total_bytes(super_copy); 4834 u64 old_size = btrfs_device_get_total_bytes(device); 4835 u64 diff; 4836 u64 start; 4837 4838 new_size = round_down(new_size, fs_info->sectorsize); 4839 start = new_size; 4840 diff = round_down(old_size - new_size, fs_info->sectorsize); 4841 4842 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4843 return -EINVAL; 4844 4845 path = btrfs_alloc_path(); 4846 if (!path) 4847 return -ENOMEM; 4848 4849 path->reada = READA_BACK; 4850 4851 trans = btrfs_start_transaction(root, 0); 4852 if (IS_ERR(trans)) { 4853 btrfs_free_path(path); 4854 return PTR_ERR(trans); 4855 } 4856 4857 mutex_lock(&fs_info->chunk_mutex); 4858 4859 btrfs_device_set_total_bytes(device, new_size); 4860 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4861 device->fs_devices->total_rw_bytes -= diff; 4862 atomic64_sub(diff, &fs_info->free_chunk_space); 4863 } 4864 4865 /* 4866 * Once the device's size has been set to the new size, ensure all 4867 * in-memory chunks are synced to disk so that the loop below sees them 4868 * and relocates them accordingly. 4869 */ 4870 if (contains_pending_extent(device, &start, diff)) { 4871 mutex_unlock(&fs_info->chunk_mutex); 4872 ret = btrfs_commit_transaction(trans); 4873 if (ret) 4874 goto done; 4875 } else { 4876 mutex_unlock(&fs_info->chunk_mutex); 4877 btrfs_end_transaction(trans); 4878 } 4879 4880 again: 4881 key.objectid = device->devid; 4882 key.offset = (u64)-1; 4883 key.type = BTRFS_DEV_EXTENT_KEY; 4884 4885 do { 4886 mutex_lock(&fs_info->reclaim_bgs_lock); 4887 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4888 if (ret < 0) { 4889 mutex_unlock(&fs_info->reclaim_bgs_lock); 4890 goto done; 4891 } 4892 4893 ret = btrfs_previous_item(root, path, 0, key.type); 4894 if (ret) { 4895 mutex_unlock(&fs_info->reclaim_bgs_lock); 4896 if (ret < 0) 4897 goto done; 4898 ret = 0; 4899 btrfs_release_path(path); 4900 break; 4901 } 4902 4903 l = path->nodes[0]; 4904 slot = path->slots[0]; 4905 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4906 4907 if (key.objectid != device->devid) { 4908 mutex_unlock(&fs_info->reclaim_bgs_lock); 4909 btrfs_release_path(path); 4910 break; 4911 } 4912 4913 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4914 length = btrfs_dev_extent_length(l, dev_extent); 4915 4916 if (key.offset + length <= new_size) { 4917 mutex_unlock(&fs_info->reclaim_bgs_lock); 4918 btrfs_release_path(path); 4919 break; 4920 } 4921 4922 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4923 btrfs_release_path(path); 4924 4925 /* 4926 * We may be relocating the only data chunk we have, 4927 * which could potentially end up with losing data's 4928 * raid profile, so lets allocate an empty one in 4929 * advance. 4930 */ 4931 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4932 if (ret < 0) { 4933 mutex_unlock(&fs_info->reclaim_bgs_lock); 4934 goto done; 4935 } 4936 4937 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4938 mutex_unlock(&fs_info->reclaim_bgs_lock); 4939 if (ret == -ENOSPC) { 4940 failed++; 4941 } else if (ret) { 4942 if (ret == -ETXTBSY) { 4943 btrfs_warn(fs_info, 4944 "could not shrink block group %llu due to active swapfile", 4945 chunk_offset); 4946 } 4947 goto done; 4948 } 4949 } while (key.offset-- > 0); 4950 4951 if (failed && !retried) { 4952 failed = 0; 4953 retried = true; 4954 goto again; 4955 } else if (failed && retried) { 4956 ret = -ENOSPC; 4957 goto done; 4958 } 4959 4960 /* Shrinking succeeded, else we would be at "done". */ 4961 trans = btrfs_start_transaction(root, 0); 4962 if (IS_ERR(trans)) { 4963 ret = PTR_ERR(trans); 4964 goto done; 4965 } 4966 4967 mutex_lock(&fs_info->chunk_mutex); 4968 /* Clear all state bits beyond the shrunk device size */ 4969 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4970 CHUNK_STATE_MASK); 4971 4972 btrfs_device_set_disk_total_bytes(device, new_size); 4973 if (list_empty(&device->post_commit_list)) 4974 list_add_tail(&device->post_commit_list, 4975 &trans->transaction->dev_update_list); 4976 4977 WARN_ON(diff > old_total); 4978 btrfs_set_super_total_bytes(super_copy, 4979 round_down(old_total - diff, fs_info->sectorsize)); 4980 mutex_unlock(&fs_info->chunk_mutex); 4981 4982 btrfs_reserve_chunk_metadata(trans, false); 4983 /* Now btrfs_update_device() will change the on-disk size. */ 4984 ret = btrfs_update_device(trans, device); 4985 btrfs_trans_release_chunk_metadata(trans); 4986 if (ret < 0) { 4987 btrfs_abort_transaction(trans, ret); 4988 btrfs_end_transaction(trans); 4989 } else { 4990 ret = btrfs_commit_transaction(trans); 4991 } 4992 done: 4993 btrfs_free_path(path); 4994 if (ret) { 4995 mutex_lock(&fs_info->chunk_mutex); 4996 btrfs_device_set_total_bytes(device, old_size); 4997 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4998 device->fs_devices->total_rw_bytes += diff; 4999 atomic64_add(diff, &fs_info->free_chunk_space); 5000 mutex_unlock(&fs_info->chunk_mutex); 5001 } 5002 return ret; 5003 } 5004 5005 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5006 struct btrfs_key *key, 5007 struct btrfs_chunk *chunk, int item_size) 5008 { 5009 struct btrfs_super_block *super_copy = fs_info->super_copy; 5010 struct btrfs_disk_key disk_key; 5011 u32 array_size; 5012 u8 *ptr; 5013 5014 lockdep_assert_held(&fs_info->chunk_mutex); 5015 5016 array_size = btrfs_super_sys_array_size(super_copy); 5017 if (array_size + item_size + sizeof(disk_key) 5018 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5019 return -EFBIG; 5020 5021 ptr = super_copy->sys_chunk_array + array_size; 5022 btrfs_cpu_key_to_disk(&disk_key, key); 5023 memcpy(ptr, &disk_key, sizeof(disk_key)); 5024 ptr += sizeof(disk_key); 5025 memcpy(ptr, chunk, item_size); 5026 item_size += sizeof(disk_key); 5027 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5028 5029 return 0; 5030 } 5031 5032 /* 5033 * sort the devices in descending order by max_avail, total_avail 5034 */ 5035 static int btrfs_cmp_device_info(const void *a, const void *b) 5036 { 5037 const struct btrfs_device_info *di_a = a; 5038 const struct btrfs_device_info *di_b = b; 5039 5040 if (di_a->max_avail > di_b->max_avail) 5041 return -1; 5042 if (di_a->max_avail < di_b->max_avail) 5043 return 1; 5044 if (di_a->total_avail > di_b->total_avail) 5045 return -1; 5046 if (di_a->total_avail < di_b->total_avail) 5047 return 1; 5048 return 0; 5049 } 5050 5051 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5052 { 5053 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5054 return; 5055 5056 btrfs_set_fs_incompat(info, RAID56); 5057 } 5058 5059 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5060 { 5061 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5062 return; 5063 5064 btrfs_set_fs_incompat(info, RAID1C34); 5065 } 5066 5067 /* 5068 * Structure used internally for btrfs_create_chunk() function. 5069 * Wraps needed parameters. 5070 */ 5071 struct alloc_chunk_ctl { 5072 u64 start; 5073 u64 type; 5074 /* Total number of stripes to allocate */ 5075 int num_stripes; 5076 /* sub_stripes info for map */ 5077 int sub_stripes; 5078 /* Stripes per device */ 5079 int dev_stripes; 5080 /* Maximum number of devices to use */ 5081 int devs_max; 5082 /* Minimum number of devices to use */ 5083 int devs_min; 5084 /* ndevs has to be a multiple of this */ 5085 int devs_increment; 5086 /* Number of copies */ 5087 int ncopies; 5088 /* Number of stripes worth of bytes to store parity information */ 5089 int nparity; 5090 u64 max_stripe_size; 5091 u64 max_chunk_size; 5092 u64 dev_extent_min; 5093 u64 stripe_size; 5094 u64 chunk_size; 5095 int ndevs; 5096 }; 5097 5098 static void init_alloc_chunk_ctl_policy_regular( 5099 struct btrfs_fs_devices *fs_devices, 5100 struct alloc_chunk_ctl *ctl) 5101 { 5102 u64 type = ctl->type; 5103 5104 if (type & BTRFS_BLOCK_GROUP_DATA) { 5105 ctl->max_stripe_size = SZ_1G; 5106 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5107 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5108 /* For larger filesystems, use larger metadata chunks */ 5109 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5110 ctl->max_stripe_size = SZ_1G; 5111 else 5112 ctl->max_stripe_size = SZ_256M; 5113 ctl->max_chunk_size = ctl->max_stripe_size; 5114 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5115 ctl->max_stripe_size = SZ_32M; 5116 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5117 ctl->devs_max = min_t(int, ctl->devs_max, 5118 BTRFS_MAX_DEVS_SYS_CHUNK); 5119 } else { 5120 BUG(); 5121 } 5122 5123 /* We don't want a chunk larger than 10% of writable space */ 5124 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5125 ctl->max_chunk_size); 5126 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5127 } 5128 5129 static void init_alloc_chunk_ctl_policy_zoned( 5130 struct btrfs_fs_devices *fs_devices, 5131 struct alloc_chunk_ctl *ctl) 5132 { 5133 u64 zone_size = fs_devices->fs_info->zone_size; 5134 u64 limit; 5135 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5136 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5137 u64 min_chunk_size = min_data_stripes * zone_size; 5138 u64 type = ctl->type; 5139 5140 ctl->max_stripe_size = zone_size; 5141 if (type & BTRFS_BLOCK_GROUP_DATA) { 5142 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5143 zone_size); 5144 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5145 ctl->max_chunk_size = ctl->max_stripe_size; 5146 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5147 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5148 ctl->devs_max = min_t(int, ctl->devs_max, 5149 BTRFS_MAX_DEVS_SYS_CHUNK); 5150 } else { 5151 BUG(); 5152 } 5153 5154 /* We don't want a chunk larger than 10% of writable space */ 5155 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5156 zone_size), 5157 min_chunk_size); 5158 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5159 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5160 } 5161 5162 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5163 struct alloc_chunk_ctl *ctl) 5164 { 5165 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5166 5167 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5168 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5169 ctl->devs_max = btrfs_raid_array[index].devs_max; 5170 if (!ctl->devs_max) 5171 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5172 ctl->devs_min = btrfs_raid_array[index].devs_min; 5173 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5174 ctl->ncopies = btrfs_raid_array[index].ncopies; 5175 ctl->nparity = btrfs_raid_array[index].nparity; 5176 ctl->ndevs = 0; 5177 5178 switch (fs_devices->chunk_alloc_policy) { 5179 case BTRFS_CHUNK_ALLOC_REGULAR: 5180 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5181 break; 5182 case BTRFS_CHUNK_ALLOC_ZONED: 5183 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5184 break; 5185 default: 5186 BUG(); 5187 } 5188 } 5189 5190 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5191 struct alloc_chunk_ctl *ctl, 5192 struct btrfs_device_info *devices_info) 5193 { 5194 struct btrfs_fs_info *info = fs_devices->fs_info; 5195 struct btrfs_device *device; 5196 u64 total_avail; 5197 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5198 int ret; 5199 int ndevs = 0; 5200 u64 max_avail; 5201 u64 dev_offset; 5202 5203 /* 5204 * in the first pass through the devices list, we gather information 5205 * about the available holes on each device. 5206 */ 5207 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5208 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5209 WARN(1, KERN_ERR 5210 "BTRFS: read-only device in alloc_list\n"); 5211 continue; 5212 } 5213 5214 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5215 &device->dev_state) || 5216 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5217 continue; 5218 5219 if (device->total_bytes > device->bytes_used) 5220 total_avail = device->total_bytes - device->bytes_used; 5221 else 5222 total_avail = 0; 5223 5224 /* If there is no space on this device, skip it. */ 5225 if (total_avail < ctl->dev_extent_min) 5226 continue; 5227 5228 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5229 &max_avail); 5230 if (ret && ret != -ENOSPC) 5231 return ret; 5232 5233 if (ret == 0) 5234 max_avail = dev_extent_want; 5235 5236 if (max_avail < ctl->dev_extent_min) { 5237 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5238 btrfs_debug(info, 5239 "%s: devid %llu has no free space, have=%llu want=%llu", 5240 __func__, device->devid, max_avail, 5241 ctl->dev_extent_min); 5242 continue; 5243 } 5244 5245 if (ndevs == fs_devices->rw_devices) { 5246 WARN(1, "%s: found more than %llu devices\n", 5247 __func__, fs_devices->rw_devices); 5248 break; 5249 } 5250 devices_info[ndevs].dev_offset = dev_offset; 5251 devices_info[ndevs].max_avail = max_avail; 5252 devices_info[ndevs].total_avail = total_avail; 5253 devices_info[ndevs].dev = device; 5254 ++ndevs; 5255 } 5256 ctl->ndevs = ndevs; 5257 5258 /* 5259 * now sort the devices by hole size / available space 5260 */ 5261 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5262 btrfs_cmp_device_info, NULL); 5263 5264 return 0; 5265 } 5266 5267 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5268 struct btrfs_device_info *devices_info) 5269 { 5270 /* Number of stripes that count for block group size */ 5271 int data_stripes; 5272 5273 /* 5274 * The primary goal is to maximize the number of stripes, so use as 5275 * many devices as possible, even if the stripes are not maximum sized. 5276 * 5277 * The DUP profile stores more than one stripe per device, the 5278 * max_avail is the total size so we have to adjust. 5279 */ 5280 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5281 ctl->dev_stripes); 5282 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5283 5284 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5285 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5286 5287 /* 5288 * Use the number of data stripes to figure out how big this chunk is 5289 * really going to be in terms of logical address space, and compare 5290 * that answer with the max chunk size. If it's higher, we try to 5291 * reduce stripe_size. 5292 */ 5293 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5294 /* 5295 * Reduce stripe_size, round it up to a 16MB boundary again and 5296 * then use it, unless it ends up being even bigger than the 5297 * previous value we had already. 5298 */ 5299 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5300 data_stripes), SZ_16M), 5301 ctl->stripe_size); 5302 } 5303 5304 /* Align to BTRFS_STRIPE_LEN */ 5305 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5306 ctl->chunk_size = ctl->stripe_size * data_stripes; 5307 5308 return 0; 5309 } 5310 5311 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5312 struct btrfs_device_info *devices_info) 5313 { 5314 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5315 /* Number of stripes that count for block group size */ 5316 int data_stripes; 5317 5318 /* 5319 * It should hold because: 5320 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5321 */ 5322 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5323 5324 ctl->stripe_size = zone_size; 5325 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5326 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5327 5328 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5329 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5330 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5331 ctl->stripe_size) + ctl->nparity, 5332 ctl->dev_stripes); 5333 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5334 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5335 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5336 } 5337 5338 ctl->chunk_size = ctl->stripe_size * data_stripes; 5339 5340 return 0; 5341 } 5342 5343 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5344 struct alloc_chunk_ctl *ctl, 5345 struct btrfs_device_info *devices_info) 5346 { 5347 struct btrfs_fs_info *info = fs_devices->fs_info; 5348 5349 /* 5350 * Round down to number of usable stripes, devs_increment can be any 5351 * number so we can't use round_down() that requires power of 2, while 5352 * rounddown is safe. 5353 */ 5354 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5355 5356 if (ctl->ndevs < ctl->devs_min) { 5357 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5358 btrfs_debug(info, 5359 "%s: not enough devices with free space: have=%d minimum required=%d", 5360 __func__, ctl->ndevs, ctl->devs_min); 5361 } 5362 return -ENOSPC; 5363 } 5364 5365 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5366 5367 switch (fs_devices->chunk_alloc_policy) { 5368 case BTRFS_CHUNK_ALLOC_REGULAR: 5369 return decide_stripe_size_regular(ctl, devices_info); 5370 case BTRFS_CHUNK_ALLOC_ZONED: 5371 return decide_stripe_size_zoned(ctl, devices_info); 5372 default: 5373 BUG(); 5374 } 5375 } 5376 5377 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5378 struct alloc_chunk_ctl *ctl, 5379 struct btrfs_device_info *devices_info) 5380 { 5381 struct btrfs_fs_info *info = trans->fs_info; 5382 struct map_lookup *map = NULL; 5383 struct extent_map_tree *em_tree; 5384 struct btrfs_block_group *block_group; 5385 struct extent_map *em; 5386 u64 start = ctl->start; 5387 u64 type = ctl->type; 5388 int ret; 5389 int i; 5390 int j; 5391 5392 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5393 if (!map) 5394 return ERR_PTR(-ENOMEM); 5395 map->num_stripes = ctl->num_stripes; 5396 5397 for (i = 0; i < ctl->ndevs; ++i) { 5398 for (j = 0; j < ctl->dev_stripes; ++j) { 5399 int s = i * ctl->dev_stripes + j; 5400 map->stripes[s].dev = devices_info[i].dev; 5401 map->stripes[s].physical = devices_info[i].dev_offset + 5402 j * ctl->stripe_size; 5403 } 5404 } 5405 map->stripe_len = BTRFS_STRIPE_LEN; 5406 map->io_align = BTRFS_STRIPE_LEN; 5407 map->io_width = BTRFS_STRIPE_LEN; 5408 map->type = type; 5409 map->sub_stripes = ctl->sub_stripes; 5410 5411 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5412 5413 em = alloc_extent_map(); 5414 if (!em) { 5415 kfree(map); 5416 return ERR_PTR(-ENOMEM); 5417 } 5418 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5419 em->map_lookup = map; 5420 em->start = start; 5421 em->len = ctl->chunk_size; 5422 em->block_start = 0; 5423 em->block_len = em->len; 5424 em->orig_block_len = ctl->stripe_size; 5425 5426 em_tree = &info->mapping_tree; 5427 write_lock(&em_tree->lock); 5428 ret = add_extent_mapping(em_tree, em, 0); 5429 if (ret) { 5430 write_unlock(&em_tree->lock); 5431 free_extent_map(em); 5432 return ERR_PTR(ret); 5433 } 5434 write_unlock(&em_tree->lock); 5435 5436 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5437 if (IS_ERR(block_group)) 5438 goto error_del_extent; 5439 5440 for (i = 0; i < map->num_stripes; i++) { 5441 struct btrfs_device *dev = map->stripes[i].dev; 5442 5443 btrfs_device_set_bytes_used(dev, 5444 dev->bytes_used + ctl->stripe_size); 5445 if (list_empty(&dev->post_commit_list)) 5446 list_add_tail(&dev->post_commit_list, 5447 &trans->transaction->dev_update_list); 5448 } 5449 5450 atomic64_sub(ctl->stripe_size * map->num_stripes, 5451 &info->free_chunk_space); 5452 5453 free_extent_map(em); 5454 check_raid56_incompat_flag(info, type); 5455 check_raid1c34_incompat_flag(info, type); 5456 5457 return block_group; 5458 5459 error_del_extent: 5460 write_lock(&em_tree->lock); 5461 remove_extent_mapping(em_tree, em); 5462 write_unlock(&em_tree->lock); 5463 5464 /* One for our allocation */ 5465 free_extent_map(em); 5466 /* One for the tree reference */ 5467 free_extent_map(em); 5468 5469 return block_group; 5470 } 5471 5472 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5473 u64 type) 5474 { 5475 struct btrfs_fs_info *info = trans->fs_info; 5476 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5477 struct btrfs_device_info *devices_info = NULL; 5478 struct alloc_chunk_ctl ctl; 5479 struct btrfs_block_group *block_group; 5480 int ret; 5481 5482 lockdep_assert_held(&info->chunk_mutex); 5483 5484 if (!alloc_profile_is_valid(type, 0)) { 5485 ASSERT(0); 5486 return ERR_PTR(-EINVAL); 5487 } 5488 5489 if (list_empty(&fs_devices->alloc_list)) { 5490 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5491 btrfs_debug(info, "%s: no writable device", __func__); 5492 return ERR_PTR(-ENOSPC); 5493 } 5494 5495 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5496 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5497 ASSERT(0); 5498 return ERR_PTR(-EINVAL); 5499 } 5500 5501 ctl.start = find_next_chunk(info); 5502 ctl.type = type; 5503 init_alloc_chunk_ctl(fs_devices, &ctl); 5504 5505 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5506 GFP_NOFS); 5507 if (!devices_info) 5508 return ERR_PTR(-ENOMEM); 5509 5510 ret = gather_device_info(fs_devices, &ctl, devices_info); 5511 if (ret < 0) { 5512 block_group = ERR_PTR(ret); 5513 goto out; 5514 } 5515 5516 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5517 if (ret < 0) { 5518 block_group = ERR_PTR(ret); 5519 goto out; 5520 } 5521 5522 block_group = create_chunk(trans, &ctl, devices_info); 5523 5524 out: 5525 kfree(devices_info); 5526 return block_group; 5527 } 5528 5529 /* 5530 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5531 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5532 * chunks. 5533 * 5534 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5535 * phases. 5536 */ 5537 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5538 struct btrfs_block_group *bg) 5539 { 5540 struct btrfs_fs_info *fs_info = trans->fs_info; 5541 struct btrfs_root *chunk_root = fs_info->chunk_root; 5542 struct btrfs_key key; 5543 struct btrfs_chunk *chunk; 5544 struct btrfs_stripe *stripe; 5545 struct extent_map *em; 5546 struct map_lookup *map; 5547 size_t item_size; 5548 int i; 5549 int ret; 5550 5551 /* 5552 * We take the chunk_mutex for 2 reasons: 5553 * 5554 * 1) Updates and insertions in the chunk btree must be done while holding 5555 * the chunk_mutex, as well as updating the system chunk array in the 5556 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5557 * details; 5558 * 5559 * 2) To prevent races with the final phase of a device replace operation 5560 * that replaces the device object associated with the map's stripes, 5561 * because the device object's id can change at any time during that 5562 * final phase of the device replace operation 5563 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5564 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5565 * which would cause a failure when updating the device item, which does 5566 * not exists, or persisting a stripe of the chunk item with such ID. 5567 * Here we can't use the device_list_mutex because our caller already 5568 * has locked the chunk_mutex, and the final phase of device replace 5569 * acquires both mutexes - first the device_list_mutex and then the 5570 * chunk_mutex. Using any of those two mutexes protects us from a 5571 * concurrent device replace. 5572 */ 5573 lockdep_assert_held(&fs_info->chunk_mutex); 5574 5575 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5576 if (IS_ERR(em)) { 5577 ret = PTR_ERR(em); 5578 btrfs_abort_transaction(trans, ret); 5579 return ret; 5580 } 5581 5582 map = em->map_lookup; 5583 item_size = btrfs_chunk_item_size(map->num_stripes); 5584 5585 chunk = kzalloc(item_size, GFP_NOFS); 5586 if (!chunk) { 5587 ret = -ENOMEM; 5588 btrfs_abort_transaction(trans, ret); 5589 goto out; 5590 } 5591 5592 for (i = 0; i < map->num_stripes; i++) { 5593 struct btrfs_device *device = map->stripes[i].dev; 5594 5595 ret = btrfs_update_device(trans, device); 5596 if (ret) 5597 goto out; 5598 } 5599 5600 stripe = &chunk->stripe; 5601 for (i = 0; i < map->num_stripes; i++) { 5602 struct btrfs_device *device = map->stripes[i].dev; 5603 const u64 dev_offset = map->stripes[i].physical; 5604 5605 btrfs_set_stack_stripe_devid(stripe, device->devid); 5606 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5607 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5608 stripe++; 5609 } 5610 5611 btrfs_set_stack_chunk_length(chunk, bg->length); 5612 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5613 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5614 btrfs_set_stack_chunk_type(chunk, map->type); 5615 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5616 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5617 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5618 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5619 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5620 5621 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5622 key.type = BTRFS_CHUNK_ITEM_KEY; 5623 key.offset = bg->start; 5624 5625 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5626 if (ret) 5627 goto out; 5628 5629 bg->chunk_item_inserted = 1; 5630 5631 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5632 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5633 if (ret) 5634 goto out; 5635 } 5636 5637 out: 5638 kfree(chunk); 5639 free_extent_map(em); 5640 return ret; 5641 } 5642 5643 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5644 { 5645 struct btrfs_fs_info *fs_info = trans->fs_info; 5646 u64 alloc_profile; 5647 struct btrfs_block_group *meta_bg; 5648 struct btrfs_block_group *sys_bg; 5649 5650 /* 5651 * When adding a new device for sprouting, the seed device is read-only 5652 * so we must first allocate a metadata and a system chunk. But before 5653 * adding the block group items to the extent, device and chunk btrees, 5654 * we must first: 5655 * 5656 * 1) Create both chunks without doing any changes to the btrees, as 5657 * otherwise we would get -ENOSPC since the block groups from the 5658 * seed device are read-only; 5659 * 5660 * 2) Add the device item for the new sprout device - finishing the setup 5661 * of a new block group requires updating the device item in the chunk 5662 * btree, so it must exist when we attempt to do it. The previous step 5663 * ensures this does not fail with -ENOSPC. 5664 * 5665 * After that we can add the block group items to their btrees: 5666 * update existing device item in the chunk btree, add a new block group 5667 * item to the extent btree, add a new chunk item to the chunk btree and 5668 * finally add the new device extent items to the devices btree. 5669 */ 5670 5671 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5672 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5673 if (IS_ERR(meta_bg)) 5674 return PTR_ERR(meta_bg); 5675 5676 alloc_profile = btrfs_system_alloc_profile(fs_info); 5677 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5678 if (IS_ERR(sys_bg)) 5679 return PTR_ERR(sys_bg); 5680 5681 return 0; 5682 } 5683 5684 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5685 { 5686 const int index = btrfs_bg_flags_to_raid_index(map->type); 5687 5688 return btrfs_raid_array[index].tolerated_failures; 5689 } 5690 5691 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5692 { 5693 struct extent_map *em; 5694 struct map_lookup *map; 5695 int miss_ndevs = 0; 5696 int i; 5697 bool ret = true; 5698 5699 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5700 if (IS_ERR(em)) 5701 return false; 5702 5703 map = em->map_lookup; 5704 for (i = 0; i < map->num_stripes; i++) { 5705 if (test_bit(BTRFS_DEV_STATE_MISSING, 5706 &map->stripes[i].dev->dev_state)) { 5707 miss_ndevs++; 5708 continue; 5709 } 5710 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5711 &map->stripes[i].dev->dev_state)) { 5712 ret = false; 5713 goto end; 5714 } 5715 } 5716 5717 /* 5718 * If the number of missing devices is larger than max errors, we can 5719 * not write the data into that chunk successfully. 5720 */ 5721 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5722 ret = false; 5723 end: 5724 free_extent_map(em); 5725 return ret; 5726 } 5727 5728 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5729 { 5730 struct extent_map *em; 5731 5732 while (1) { 5733 write_lock(&tree->lock); 5734 em = lookup_extent_mapping(tree, 0, (u64)-1); 5735 if (em) 5736 remove_extent_mapping(tree, em); 5737 write_unlock(&tree->lock); 5738 if (!em) 5739 break; 5740 /* once for us */ 5741 free_extent_map(em); 5742 /* once for the tree */ 5743 free_extent_map(em); 5744 } 5745 } 5746 5747 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5748 { 5749 struct extent_map *em; 5750 struct map_lookup *map; 5751 int ret; 5752 5753 em = btrfs_get_chunk_map(fs_info, logical, len); 5754 if (IS_ERR(em)) 5755 /* 5756 * We could return errors for these cases, but that could get 5757 * ugly and we'd probably do the same thing which is just not do 5758 * anything else and exit, so return 1 so the callers don't try 5759 * to use other copies. 5760 */ 5761 return 1; 5762 5763 map = em->map_lookup; 5764 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5765 ret = map->num_stripes; 5766 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5767 ret = map->sub_stripes; 5768 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5769 ret = 2; 5770 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5771 /* 5772 * There could be two corrupted data stripes, we need 5773 * to loop retry in order to rebuild the correct data. 5774 * 5775 * Fail a stripe at a time on every retry except the 5776 * stripe under reconstruction. 5777 */ 5778 ret = map->num_stripes; 5779 else 5780 ret = 1; 5781 free_extent_map(em); 5782 5783 down_read(&fs_info->dev_replace.rwsem); 5784 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5785 fs_info->dev_replace.tgtdev) 5786 ret++; 5787 up_read(&fs_info->dev_replace.rwsem); 5788 5789 return ret; 5790 } 5791 5792 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5793 u64 logical) 5794 { 5795 struct extent_map *em; 5796 struct map_lookup *map; 5797 unsigned long len = fs_info->sectorsize; 5798 5799 em = btrfs_get_chunk_map(fs_info, logical, len); 5800 5801 if (!WARN_ON(IS_ERR(em))) { 5802 map = em->map_lookup; 5803 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5804 len = map->stripe_len * nr_data_stripes(map); 5805 free_extent_map(em); 5806 } 5807 return len; 5808 } 5809 5810 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5811 { 5812 struct extent_map *em; 5813 struct map_lookup *map; 5814 int ret = 0; 5815 5816 em = btrfs_get_chunk_map(fs_info, logical, len); 5817 5818 if(!WARN_ON(IS_ERR(em))) { 5819 map = em->map_lookup; 5820 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5821 ret = 1; 5822 free_extent_map(em); 5823 } 5824 return ret; 5825 } 5826 5827 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5828 struct map_lookup *map, int first, 5829 int dev_replace_is_ongoing) 5830 { 5831 int i; 5832 int num_stripes; 5833 int preferred_mirror; 5834 int tolerance; 5835 struct btrfs_device *srcdev; 5836 5837 ASSERT((map->type & 5838 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5839 5840 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5841 num_stripes = map->sub_stripes; 5842 else 5843 num_stripes = map->num_stripes; 5844 5845 switch (fs_info->fs_devices->read_policy) { 5846 default: 5847 /* Shouldn't happen, just warn and use pid instead of failing */ 5848 btrfs_warn_rl(fs_info, 5849 "unknown read_policy type %u, reset to pid", 5850 fs_info->fs_devices->read_policy); 5851 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5852 fallthrough; 5853 case BTRFS_READ_POLICY_PID: 5854 preferred_mirror = first + (current->pid % num_stripes); 5855 break; 5856 } 5857 5858 if (dev_replace_is_ongoing && 5859 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5860 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5861 srcdev = fs_info->dev_replace.srcdev; 5862 else 5863 srcdev = NULL; 5864 5865 /* 5866 * try to avoid the drive that is the source drive for a 5867 * dev-replace procedure, only choose it if no other non-missing 5868 * mirror is available 5869 */ 5870 for (tolerance = 0; tolerance < 2; tolerance++) { 5871 if (map->stripes[preferred_mirror].dev->bdev && 5872 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5873 return preferred_mirror; 5874 for (i = first; i < first + num_stripes; i++) { 5875 if (map->stripes[i].dev->bdev && 5876 (tolerance || map->stripes[i].dev != srcdev)) 5877 return i; 5878 } 5879 } 5880 5881 /* we couldn't find one that doesn't fail. Just return something 5882 * and the io error handling code will clean up eventually 5883 */ 5884 return preferred_mirror; 5885 } 5886 5887 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5888 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5889 { 5890 int i; 5891 int again = 1; 5892 5893 while (again) { 5894 again = 0; 5895 for (i = 0; i < num_stripes - 1; i++) { 5896 /* Swap if parity is on a smaller index */ 5897 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5898 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5899 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5900 again = 1; 5901 } 5902 } 5903 } 5904 } 5905 5906 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5907 int total_stripes, 5908 int real_stripes) 5909 { 5910 struct btrfs_io_context *bioc = kzalloc( 5911 /* The size of btrfs_io_context */ 5912 sizeof(struct btrfs_io_context) + 5913 /* Plus the variable array for the stripes */ 5914 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5915 /* Plus the variable array for the tgt dev */ 5916 sizeof(int) * (real_stripes) + 5917 /* 5918 * Plus the raid_map, which includes both the tgt dev 5919 * and the stripes. 5920 */ 5921 sizeof(u64) * (total_stripes), 5922 GFP_NOFS|__GFP_NOFAIL); 5923 5924 atomic_set(&bioc->error, 0); 5925 refcount_set(&bioc->refs, 1); 5926 5927 bioc->fs_info = fs_info; 5928 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5929 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5930 5931 return bioc; 5932 } 5933 5934 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5935 { 5936 WARN_ON(!refcount_read(&bioc->refs)); 5937 refcount_inc(&bioc->refs); 5938 } 5939 5940 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5941 { 5942 if (!bioc) 5943 return; 5944 if (refcount_dec_and_test(&bioc->refs)) 5945 kfree(bioc); 5946 } 5947 5948 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5949 /* 5950 * Please note that, discard won't be sent to target device of device 5951 * replace. 5952 */ 5953 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5954 u64 logical, u64 *length_ret, 5955 struct btrfs_io_context **bioc_ret) 5956 { 5957 struct extent_map *em; 5958 struct map_lookup *map; 5959 struct btrfs_io_context *bioc; 5960 u64 length = *length_ret; 5961 u64 offset; 5962 u64 stripe_nr; 5963 u64 stripe_nr_end; 5964 u64 stripe_end_offset; 5965 u64 stripe_cnt; 5966 u64 stripe_len; 5967 u64 stripe_offset; 5968 u64 num_stripes; 5969 u32 stripe_index; 5970 u32 factor = 0; 5971 u32 sub_stripes = 0; 5972 u64 stripes_per_dev = 0; 5973 u32 remaining_stripes = 0; 5974 u32 last_stripe = 0; 5975 int ret = 0; 5976 int i; 5977 5978 /* Discard always returns a bioc. */ 5979 ASSERT(bioc_ret); 5980 5981 em = btrfs_get_chunk_map(fs_info, logical, length); 5982 if (IS_ERR(em)) 5983 return PTR_ERR(em); 5984 5985 map = em->map_lookup; 5986 /* we don't discard raid56 yet */ 5987 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5988 ret = -EOPNOTSUPP; 5989 goto out; 5990 } 5991 5992 offset = logical - em->start; 5993 length = min_t(u64, em->start + em->len - logical, length); 5994 *length_ret = length; 5995 5996 stripe_len = map->stripe_len; 5997 /* 5998 * stripe_nr counts the total number of stripes we have to stride 5999 * to get to this block 6000 */ 6001 stripe_nr = div64_u64(offset, stripe_len); 6002 6003 /* stripe_offset is the offset of this block in its stripe */ 6004 stripe_offset = offset - stripe_nr * stripe_len; 6005 6006 stripe_nr_end = round_up(offset + length, map->stripe_len); 6007 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 6008 stripe_cnt = stripe_nr_end - stripe_nr; 6009 stripe_end_offset = stripe_nr_end * map->stripe_len - 6010 (offset + length); 6011 /* 6012 * after this, stripe_nr is the number of stripes on this 6013 * device we have to walk to find the data, and stripe_index is 6014 * the number of our device in the stripe array 6015 */ 6016 num_stripes = 1; 6017 stripe_index = 0; 6018 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6019 BTRFS_BLOCK_GROUP_RAID10)) { 6020 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6021 sub_stripes = 1; 6022 else 6023 sub_stripes = map->sub_stripes; 6024 6025 factor = map->num_stripes / sub_stripes; 6026 num_stripes = min_t(u64, map->num_stripes, 6027 sub_stripes * stripe_cnt); 6028 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6029 stripe_index *= sub_stripes; 6030 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6031 &remaining_stripes); 6032 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6033 last_stripe *= sub_stripes; 6034 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6035 BTRFS_BLOCK_GROUP_DUP)) { 6036 num_stripes = map->num_stripes; 6037 } else { 6038 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6039 &stripe_index); 6040 } 6041 6042 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6043 if (!bioc) { 6044 ret = -ENOMEM; 6045 goto out; 6046 } 6047 6048 for (i = 0; i < num_stripes; i++) { 6049 bioc->stripes[i].physical = 6050 map->stripes[stripe_index].physical + 6051 stripe_offset + stripe_nr * map->stripe_len; 6052 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6053 6054 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6055 BTRFS_BLOCK_GROUP_RAID10)) { 6056 bioc->stripes[i].length = stripes_per_dev * 6057 map->stripe_len; 6058 6059 if (i / sub_stripes < remaining_stripes) 6060 bioc->stripes[i].length += map->stripe_len; 6061 6062 /* 6063 * Special for the first stripe and 6064 * the last stripe: 6065 * 6066 * |-------|...|-------| 6067 * |----------| 6068 * off end_off 6069 */ 6070 if (i < sub_stripes) 6071 bioc->stripes[i].length -= stripe_offset; 6072 6073 if (stripe_index >= last_stripe && 6074 stripe_index <= (last_stripe + 6075 sub_stripes - 1)) 6076 bioc->stripes[i].length -= stripe_end_offset; 6077 6078 if (i == sub_stripes - 1) 6079 stripe_offset = 0; 6080 } else { 6081 bioc->stripes[i].length = length; 6082 } 6083 6084 stripe_index++; 6085 if (stripe_index == map->num_stripes) { 6086 stripe_index = 0; 6087 stripe_nr++; 6088 } 6089 } 6090 6091 *bioc_ret = bioc; 6092 bioc->map_type = map->type; 6093 bioc->num_stripes = num_stripes; 6094 out: 6095 free_extent_map(em); 6096 return ret; 6097 } 6098 6099 /* 6100 * In dev-replace case, for repair case (that's the only case where the mirror 6101 * is selected explicitly when calling btrfs_map_block), blocks left of the 6102 * left cursor can also be read from the target drive. 6103 * 6104 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6105 * array of stripes. 6106 * For READ, it also needs to be supported using the same mirror number. 6107 * 6108 * If the requested block is not left of the left cursor, EIO is returned. This 6109 * can happen because btrfs_num_copies() returns one more in the dev-replace 6110 * case. 6111 */ 6112 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6113 u64 logical, u64 length, 6114 u64 srcdev_devid, int *mirror_num, 6115 u64 *physical) 6116 { 6117 struct btrfs_io_context *bioc = NULL; 6118 int num_stripes; 6119 int index_srcdev = 0; 6120 int found = 0; 6121 u64 physical_of_found = 0; 6122 int i; 6123 int ret = 0; 6124 6125 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6126 logical, &length, &bioc, 0, 0); 6127 if (ret) { 6128 ASSERT(bioc == NULL); 6129 return ret; 6130 } 6131 6132 num_stripes = bioc->num_stripes; 6133 if (*mirror_num > num_stripes) { 6134 /* 6135 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6136 * that means that the requested area is not left of the left 6137 * cursor 6138 */ 6139 btrfs_put_bioc(bioc); 6140 return -EIO; 6141 } 6142 6143 /* 6144 * process the rest of the function using the mirror_num of the source 6145 * drive. Therefore look it up first. At the end, patch the device 6146 * pointer to the one of the target drive. 6147 */ 6148 for (i = 0; i < num_stripes; i++) { 6149 if (bioc->stripes[i].dev->devid != srcdev_devid) 6150 continue; 6151 6152 /* 6153 * In case of DUP, in order to keep it simple, only add the 6154 * mirror with the lowest physical address 6155 */ 6156 if (found && 6157 physical_of_found <= bioc->stripes[i].physical) 6158 continue; 6159 6160 index_srcdev = i; 6161 found = 1; 6162 physical_of_found = bioc->stripes[i].physical; 6163 } 6164 6165 btrfs_put_bioc(bioc); 6166 6167 ASSERT(found); 6168 if (!found) 6169 return -EIO; 6170 6171 *mirror_num = index_srcdev + 1; 6172 *physical = physical_of_found; 6173 return ret; 6174 } 6175 6176 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6177 { 6178 struct btrfs_block_group *cache; 6179 bool ret; 6180 6181 /* Non zoned filesystem does not use "to_copy" flag */ 6182 if (!btrfs_is_zoned(fs_info)) 6183 return false; 6184 6185 cache = btrfs_lookup_block_group(fs_info, logical); 6186 6187 spin_lock(&cache->lock); 6188 ret = cache->to_copy; 6189 spin_unlock(&cache->lock); 6190 6191 btrfs_put_block_group(cache); 6192 return ret; 6193 } 6194 6195 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6196 struct btrfs_io_context **bioc_ret, 6197 struct btrfs_dev_replace *dev_replace, 6198 u64 logical, 6199 int *num_stripes_ret, int *max_errors_ret) 6200 { 6201 struct btrfs_io_context *bioc = *bioc_ret; 6202 u64 srcdev_devid = dev_replace->srcdev->devid; 6203 int tgtdev_indexes = 0; 6204 int num_stripes = *num_stripes_ret; 6205 int max_errors = *max_errors_ret; 6206 int i; 6207 6208 if (op == BTRFS_MAP_WRITE) { 6209 int index_where_to_add; 6210 6211 /* 6212 * A block group which have "to_copy" set will eventually 6213 * copied by dev-replace process. We can avoid cloning IO here. 6214 */ 6215 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6216 return; 6217 6218 /* 6219 * duplicate the write operations while the dev replace 6220 * procedure is running. Since the copying of the old disk to 6221 * the new disk takes place at run time while the filesystem is 6222 * mounted writable, the regular write operations to the old 6223 * disk have to be duplicated to go to the new disk as well. 6224 * 6225 * Note that device->missing is handled by the caller, and that 6226 * the write to the old disk is already set up in the stripes 6227 * array. 6228 */ 6229 index_where_to_add = num_stripes; 6230 for (i = 0; i < num_stripes; i++) { 6231 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6232 /* write to new disk, too */ 6233 struct btrfs_io_stripe *new = 6234 bioc->stripes + index_where_to_add; 6235 struct btrfs_io_stripe *old = 6236 bioc->stripes + i; 6237 6238 new->physical = old->physical; 6239 new->length = old->length; 6240 new->dev = dev_replace->tgtdev; 6241 bioc->tgtdev_map[i] = index_where_to_add; 6242 index_where_to_add++; 6243 max_errors++; 6244 tgtdev_indexes++; 6245 } 6246 } 6247 num_stripes = index_where_to_add; 6248 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6249 int index_srcdev = 0; 6250 int found = 0; 6251 u64 physical_of_found = 0; 6252 6253 /* 6254 * During the dev-replace procedure, the target drive can also 6255 * be used to read data in case it is needed to repair a corrupt 6256 * block elsewhere. This is possible if the requested area is 6257 * left of the left cursor. In this area, the target drive is a 6258 * full copy of the source drive. 6259 */ 6260 for (i = 0; i < num_stripes; i++) { 6261 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6262 /* 6263 * In case of DUP, in order to keep it simple, 6264 * only add the mirror with the lowest physical 6265 * address 6266 */ 6267 if (found && 6268 physical_of_found <= bioc->stripes[i].physical) 6269 continue; 6270 index_srcdev = i; 6271 found = 1; 6272 physical_of_found = bioc->stripes[i].physical; 6273 } 6274 } 6275 if (found) { 6276 struct btrfs_io_stripe *tgtdev_stripe = 6277 bioc->stripes + num_stripes; 6278 6279 tgtdev_stripe->physical = physical_of_found; 6280 tgtdev_stripe->length = 6281 bioc->stripes[index_srcdev].length; 6282 tgtdev_stripe->dev = dev_replace->tgtdev; 6283 bioc->tgtdev_map[index_srcdev] = num_stripes; 6284 6285 tgtdev_indexes++; 6286 num_stripes++; 6287 } 6288 } 6289 6290 *num_stripes_ret = num_stripes; 6291 *max_errors_ret = max_errors; 6292 bioc->num_tgtdevs = tgtdev_indexes; 6293 *bioc_ret = bioc; 6294 } 6295 6296 static bool need_full_stripe(enum btrfs_map_op op) 6297 { 6298 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6299 } 6300 6301 /* 6302 * Calculate the geometry of a particular (address, len) tuple. This 6303 * information is used to calculate how big a particular bio can get before it 6304 * straddles a stripe. 6305 * 6306 * @fs_info: the filesystem 6307 * @em: mapping containing the logical extent 6308 * @op: type of operation - write or read 6309 * @logical: address that we want to figure out the geometry of 6310 * @io_geom: pointer used to return values 6311 * 6312 * Returns < 0 in case a chunk for the given logical address cannot be found, 6313 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6314 */ 6315 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6316 enum btrfs_map_op op, u64 logical, 6317 struct btrfs_io_geometry *io_geom) 6318 { 6319 struct map_lookup *map; 6320 u64 len; 6321 u64 offset; 6322 u64 stripe_offset; 6323 u64 stripe_nr; 6324 u64 stripe_len; 6325 u64 raid56_full_stripe_start = (u64)-1; 6326 int data_stripes; 6327 6328 ASSERT(op != BTRFS_MAP_DISCARD); 6329 6330 map = em->map_lookup; 6331 /* Offset of this logical address in the chunk */ 6332 offset = logical - em->start; 6333 /* Len of a stripe in a chunk */ 6334 stripe_len = map->stripe_len; 6335 /* Stripe where this block falls in */ 6336 stripe_nr = div64_u64(offset, stripe_len); 6337 /* Offset of stripe in the chunk */ 6338 stripe_offset = stripe_nr * stripe_len; 6339 if (offset < stripe_offset) { 6340 btrfs_crit(fs_info, 6341 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6342 stripe_offset, offset, em->start, logical, stripe_len); 6343 return -EINVAL; 6344 } 6345 6346 /* stripe_offset is the offset of this block in its stripe */ 6347 stripe_offset = offset - stripe_offset; 6348 data_stripes = nr_data_stripes(map); 6349 6350 /* Only stripe based profiles needs to check against stripe length. */ 6351 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6352 u64 max_len = stripe_len - stripe_offset; 6353 6354 /* 6355 * In case of raid56, we need to know the stripe aligned start 6356 */ 6357 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6358 unsigned long full_stripe_len = stripe_len * data_stripes; 6359 raid56_full_stripe_start = offset; 6360 6361 /* 6362 * Allow a write of a full stripe, but make sure we 6363 * don't allow straddling of stripes 6364 */ 6365 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6366 full_stripe_len); 6367 raid56_full_stripe_start *= full_stripe_len; 6368 6369 /* 6370 * For writes to RAID[56], allow a full stripeset across 6371 * all disks. For other RAID types and for RAID[56] 6372 * reads, just allow a single stripe (on a single disk). 6373 */ 6374 if (op == BTRFS_MAP_WRITE) { 6375 max_len = stripe_len * data_stripes - 6376 (offset - raid56_full_stripe_start); 6377 } 6378 } 6379 len = min_t(u64, em->len - offset, max_len); 6380 } else { 6381 len = em->len - offset; 6382 } 6383 6384 io_geom->len = len; 6385 io_geom->offset = offset; 6386 io_geom->stripe_len = stripe_len; 6387 io_geom->stripe_nr = stripe_nr; 6388 io_geom->stripe_offset = stripe_offset; 6389 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6390 6391 return 0; 6392 } 6393 6394 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6395 enum btrfs_map_op op, 6396 u64 logical, u64 *length, 6397 struct btrfs_io_context **bioc_ret, 6398 int mirror_num, int need_raid_map) 6399 { 6400 struct extent_map *em; 6401 struct map_lookup *map; 6402 u64 stripe_offset; 6403 u64 stripe_nr; 6404 u64 stripe_len; 6405 u32 stripe_index; 6406 int data_stripes; 6407 int i; 6408 int ret = 0; 6409 int num_stripes; 6410 int max_errors = 0; 6411 int tgtdev_indexes = 0; 6412 struct btrfs_io_context *bioc = NULL; 6413 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6414 int dev_replace_is_ongoing = 0; 6415 int num_alloc_stripes; 6416 int patch_the_first_stripe_for_dev_replace = 0; 6417 u64 physical_to_patch_in_first_stripe = 0; 6418 u64 raid56_full_stripe_start = (u64)-1; 6419 struct btrfs_io_geometry geom; 6420 6421 ASSERT(bioc_ret); 6422 ASSERT(op != BTRFS_MAP_DISCARD); 6423 6424 em = btrfs_get_chunk_map(fs_info, logical, *length); 6425 ASSERT(!IS_ERR(em)); 6426 6427 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6428 if (ret < 0) 6429 return ret; 6430 6431 map = em->map_lookup; 6432 6433 *length = geom.len; 6434 stripe_len = geom.stripe_len; 6435 stripe_nr = geom.stripe_nr; 6436 stripe_offset = geom.stripe_offset; 6437 raid56_full_stripe_start = geom.raid56_stripe_offset; 6438 data_stripes = nr_data_stripes(map); 6439 6440 down_read(&dev_replace->rwsem); 6441 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6442 /* 6443 * Hold the semaphore for read during the whole operation, write is 6444 * requested at commit time but must wait. 6445 */ 6446 if (!dev_replace_is_ongoing) 6447 up_read(&dev_replace->rwsem); 6448 6449 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6450 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6451 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6452 dev_replace->srcdev->devid, 6453 &mirror_num, 6454 &physical_to_patch_in_first_stripe); 6455 if (ret) 6456 goto out; 6457 else 6458 patch_the_first_stripe_for_dev_replace = 1; 6459 } else if (mirror_num > map->num_stripes) { 6460 mirror_num = 0; 6461 } 6462 6463 num_stripes = 1; 6464 stripe_index = 0; 6465 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6466 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6467 &stripe_index); 6468 if (!need_full_stripe(op)) 6469 mirror_num = 1; 6470 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6471 if (need_full_stripe(op)) 6472 num_stripes = map->num_stripes; 6473 else if (mirror_num) 6474 stripe_index = mirror_num - 1; 6475 else { 6476 stripe_index = find_live_mirror(fs_info, map, 0, 6477 dev_replace_is_ongoing); 6478 mirror_num = stripe_index + 1; 6479 } 6480 6481 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6482 if (need_full_stripe(op)) { 6483 num_stripes = map->num_stripes; 6484 } else if (mirror_num) { 6485 stripe_index = mirror_num - 1; 6486 } else { 6487 mirror_num = 1; 6488 } 6489 6490 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6491 u32 factor = map->num_stripes / map->sub_stripes; 6492 6493 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6494 stripe_index *= map->sub_stripes; 6495 6496 if (need_full_stripe(op)) 6497 num_stripes = map->sub_stripes; 6498 else if (mirror_num) 6499 stripe_index += mirror_num - 1; 6500 else { 6501 int old_stripe_index = stripe_index; 6502 stripe_index = find_live_mirror(fs_info, map, 6503 stripe_index, 6504 dev_replace_is_ongoing); 6505 mirror_num = stripe_index - old_stripe_index + 1; 6506 } 6507 6508 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6509 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6510 /* push stripe_nr back to the start of the full stripe */ 6511 stripe_nr = div64_u64(raid56_full_stripe_start, 6512 stripe_len * data_stripes); 6513 6514 /* RAID[56] write or recovery. Return all stripes */ 6515 num_stripes = map->num_stripes; 6516 max_errors = nr_parity_stripes(map); 6517 6518 *length = map->stripe_len; 6519 stripe_index = 0; 6520 stripe_offset = 0; 6521 } else { 6522 /* 6523 * Mirror #0 or #1 means the original data block. 6524 * Mirror #2 is RAID5 parity block. 6525 * Mirror #3 is RAID6 Q block. 6526 */ 6527 stripe_nr = div_u64_rem(stripe_nr, 6528 data_stripes, &stripe_index); 6529 if (mirror_num > 1) 6530 stripe_index = data_stripes + mirror_num - 2; 6531 6532 /* We distribute the parity blocks across stripes */ 6533 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6534 &stripe_index); 6535 if (!need_full_stripe(op) && mirror_num <= 1) 6536 mirror_num = 1; 6537 } 6538 } else { 6539 /* 6540 * after this, stripe_nr is the number of stripes on this 6541 * device we have to walk to find the data, and stripe_index is 6542 * the number of our device in the stripe array 6543 */ 6544 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6545 &stripe_index); 6546 mirror_num = stripe_index + 1; 6547 } 6548 if (stripe_index >= map->num_stripes) { 6549 btrfs_crit(fs_info, 6550 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6551 stripe_index, map->num_stripes); 6552 ret = -EINVAL; 6553 goto out; 6554 } 6555 6556 num_alloc_stripes = num_stripes; 6557 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6558 if (op == BTRFS_MAP_WRITE) 6559 num_alloc_stripes <<= 1; 6560 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6561 num_alloc_stripes++; 6562 tgtdev_indexes = num_stripes; 6563 } 6564 6565 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6566 if (!bioc) { 6567 ret = -ENOMEM; 6568 goto out; 6569 } 6570 6571 for (i = 0; i < num_stripes; i++) { 6572 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6573 stripe_offset + stripe_nr * map->stripe_len; 6574 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6575 stripe_index++; 6576 } 6577 6578 /* Build raid_map */ 6579 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6580 (need_full_stripe(op) || mirror_num > 1)) { 6581 u64 tmp; 6582 unsigned rot; 6583 6584 /* Work out the disk rotation on this stripe-set */ 6585 div_u64_rem(stripe_nr, num_stripes, &rot); 6586 6587 /* Fill in the logical address of each stripe */ 6588 tmp = stripe_nr * data_stripes; 6589 for (i = 0; i < data_stripes; i++) 6590 bioc->raid_map[(i + rot) % num_stripes] = 6591 em->start + (tmp + i) * map->stripe_len; 6592 6593 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6594 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6595 bioc->raid_map[(i + rot + 1) % num_stripes] = 6596 RAID6_Q_STRIPE; 6597 6598 sort_parity_stripes(bioc, num_stripes); 6599 } 6600 6601 if (need_full_stripe(op)) 6602 max_errors = btrfs_chunk_max_errors(map); 6603 6604 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6605 need_full_stripe(op)) { 6606 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6607 &num_stripes, &max_errors); 6608 } 6609 6610 *bioc_ret = bioc; 6611 bioc->map_type = map->type; 6612 bioc->num_stripes = num_stripes; 6613 bioc->max_errors = max_errors; 6614 bioc->mirror_num = mirror_num; 6615 6616 /* 6617 * this is the case that REQ_READ && dev_replace_is_ongoing && 6618 * mirror_num == num_stripes + 1 && dev_replace target drive is 6619 * available as a mirror 6620 */ 6621 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6622 WARN_ON(num_stripes > 1); 6623 bioc->stripes[0].dev = dev_replace->tgtdev; 6624 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6625 bioc->mirror_num = map->num_stripes + 1; 6626 } 6627 out: 6628 if (dev_replace_is_ongoing) { 6629 lockdep_assert_held(&dev_replace->rwsem); 6630 /* Unlock and let waiting writers proceed */ 6631 up_read(&dev_replace->rwsem); 6632 } 6633 free_extent_map(em); 6634 return ret; 6635 } 6636 6637 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6638 u64 logical, u64 *length, 6639 struct btrfs_io_context **bioc_ret, int mirror_num) 6640 { 6641 if (op == BTRFS_MAP_DISCARD) 6642 return __btrfs_map_block_for_discard(fs_info, logical, 6643 length, bioc_ret); 6644 6645 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6646 mirror_num, 0); 6647 } 6648 6649 /* For Scrub/replace */ 6650 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6651 u64 logical, u64 *length, 6652 struct btrfs_io_context **bioc_ret) 6653 { 6654 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6655 } 6656 6657 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6658 { 6659 bio->bi_private = bioc->private; 6660 bio->bi_end_io = bioc->end_io; 6661 bio_endio(bio); 6662 6663 btrfs_put_bioc(bioc); 6664 } 6665 6666 static void btrfs_end_bio(struct bio *bio) 6667 { 6668 struct btrfs_io_context *bioc = bio->bi_private; 6669 int is_orig_bio = 0; 6670 6671 if (bio->bi_status) { 6672 atomic_inc(&bioc->error); 6673 if (bio->bi_status == BLK_STS_IOERR || 6674 bio->bi_status == BLK_STS_TARGET) { 6675 struct btrfs_device *dev = btrfs_bio(bio)->device; 6676 6677 ASSERT(dev->bdev); 6678 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6679 btrfs_dev_stat_inc_and_print(dev, 6680 BTRFS_DEV_STAT_WRITE_ERRS); 6681 else if (!(bio->bi_opf & REQ_RAHEAD)) 6682 btrfs_dev_stat_inc_and_print(dev, 6683 BTRFS_DEV_STAT_READ_ERRS); 6684 if (bio->bi_opf & REQ_PREFLUSH) 6685 btrfs_dev_stat_inc_and_print(dev, 6686 BTRFS_DEV_STAT_FLUSH_ERRS); 6687 } 6688 } 6689 6690 if (bio == bioc->orig_bio) 6691 is_orig_bio = 1; 6692 6693 btrfs_bio_counter_dec(bioc->fs_info); 6694 6695 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6696 if (!is_orig_bio) { 6697 bio_put(bio); 6698 bio = bioc->orig_bio; 6699 } 6700 6701 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6702 /* only send an error to the higher layers if it is 6703 * beyond the tolerance of the btrfs bio 6704 */ 6705 if (atomic_read(&bioc->error) > bioc->max_errors) { 6706 bio->bi_status = BLK_STS_IOERR; 6707 } else { 6708 /* 6709 * this bio is actually up to date, we didn't 6710 * go over the max number of errors 6711 */ 6712 bio->bi_status = BLK_STS_OK; 6713 } 6714 6715 btrfs_end_bioc(bioc, bio); 6716 } else if (!is_orig_bio) { 6717 bio_put(bio); 6718 } 6719 } 6720 6721 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6722 u64 physical, struct btrfs_device *dev) 6723 { 6724 struct btrfs_fs_info *fs_info = bioc->fs_info; 6725 6726 bio->bi_private = bioc; 6727 btrfs_bio(bio)->device = dev; 6728 bio->bi_end_io = btrfs_end_bio; 6729 bio->bi_iter.bi_sector = physical >> 9; 6730 /* 6731 * For zone append writing, bi_sector must point the beginning of the 6732 * zone 6733 */ 6734 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6735 if (btrfs_dev_is_sequential(dev, physical)) { 6736 u64 zone_start = round_down(physical, fs_info->zone_size); 6737 6738 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6739 } else { 6740 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6741 bio->bi_opf |= REQ_OP_WRITE; 6742 } 6743 } 6744 btrfs_debug_in_rcu(fs_info, 6745 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6746 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6747 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6748 dev->devid, bio->bi_iter.bi_size); 6749 bio_set_dev(bio, dev->bdev); 6750 6751 btrfs_bio_counter_inc_noblocked(fs_info); 6752 6753 btrfsic_submit_bio(bio); 6754 } 6755 6756 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6757 { 6758 atomic_inc(&bioc->error); 6759 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6760 /* Should be the original bio. */ 6761 WARN_ON(bio != bioc->orig_bio); 6762 6763 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6764 bio->bi_iter.bi_sector = logical >> 9; 6765 if (atomic_read(&bioc->error) > bioc->max_errors) 6766 bio->bi_status = BLK_STS_IOERR; 6767 else 6768 bio->bi_status = BLK_STS_OK; 6769 btrfs_end_bioc(bioc, bio); 6770 } 6771 } 6772 6773 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6774 int mirror_num) 6775 { 6776 struct btrfs_device *dev; 6777 struct bio *first_bio = bio; 6778 u64 logical = bio->bi_iter.bi_sector << 9; 6779 u64 length = 0; 6780 u64 map_length; 6781 int ret; 6782 int dev_nr; 6783 int total_devs; 6784 struct btrfs_io_context *bioc = NULL; 6785 6786 length = bio->bi_iter.bi_size; 6787 map_length = length; 6788 6789 btrfs_bio_counter_inc_blocked(fs_info); 6790 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6791 &map_length, &bioc, mirror_num, 1); 6792 if (ret) { 6793 btrfs_bio_counter_dec(fs_info); 6794 return errno_to_blk_status(ret); 6795 } 6796 6797 total_devs = bioc->num_stripes; 6798 bioc->orig_bio = first_bio; 6799 bioc->private = first_bio->bi_private; 6800 bioc->end_io = first_bio->bi_end_io; 6801 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6802 6803 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6804 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6805 /* In this case, map_length has been set to the length of 6806 a single stripe; not the whole write */ 6807 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6808 ret = raid56_parity_write(bio, bioc, map_length); 6809 } else { 6810 ret = raid56_parity_recover(bio, bioc, map_length, 6811 mirror_num, 1); 6812 } 6813 6814 btrfs_bio_counter_dec(fs_info); 6815 return errno_to_blk_status(ret); 6816 } 6817 6818 if (map_length < length) { 6819 btrfs_crit(fs_info, 6820 "mapping failed logical %llu bio len %llu len %llu", 6821 logical, length, map_length); 6822 BUG(); 6823 } 6824 6825 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6826 dev = bioc->stripes[dev_nr].dev; 6827 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6828 &dev->dev_state) || 6829 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6830 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6831 bioc_error(bioc, first_bio, logical); 6832 continue; 6833 } 6834 6835 if (dev_nr < total_devs - 1) 6836 bio = btrfs_bio_clone(first_bio); 6837 else 6838 bio = first_bio; 6839 6840 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6841 } 6842 btrfs_bio_counter_dec(fs_info); 6843 return BLK_STS_OK; 6844 } 6845 6846 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6847 const struct btrfs_fs_devices *fs_devices) 6848 { 6849 if (args->fsid == NULL) 6850 return true; 6851 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6852 return true; 6853 return false; 6854 } 6855 6856 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6857 const struct btrfs_device *device) 6858 { 6859 ASSERT((args->devid != (u64)-1) || args->missing); 6860 6861 if ((args->devid != (u64)-1) && device->devid != args->devid) 6862 return false; 6863 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6864 return false; 6865 if (!args->missing) 6866 return true; 6867 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6868 !device->bdev) 6869 return true; 6870 return false; 6871 } 6872 6873 /* 6874 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6875 * return NULL. 6876 * 6877 * If devid and uuid are both specified, the match must be exact, otherwise 6878 * only devid is used. 6879 */ 6880 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6881 const struct btrfs_dev_lookup_args *args) 6882 { 6883 struct btrfs_device *device; 6884 struct btrfs_fs_devices *seed_devs; 6885 6886 if (dev_args_match_fs_devices(args, fs_devices)) { 6887 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6888 if (dev_args_match_device(args, device)) 6889 return device; 6890 } 6891 } 6892 6893 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6894 if (!dev_args_match_fs_devices(args, seed_devs)) 6895 continue; 6896 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6897 if (dev_args_match_device(args, device)) 6898 return device; 6899 } 6900 } 6901 6902 return NULL; 6903 } 6904 6905 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6906 u64 devid, u8 *dev_uuid) 6907 { 6908 struct btrfs_device *device; 6909 unsigned int nofs_flag; 6910 6911 /* 6912 * We call this under the chunk_mutex, so we want to use NOFS for this 6913 * allocation, however we don't want to change btrfs_alloc_device() to 6914 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6915 * places. 6916 */ 6917 nofs_flag = memalloc_nofs_save(); 6918 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6919 memalloc_nofs_restore(nofs_flag); 6920 if (IS_ERR(device)) 6921 return device; 6922 6923 list_add(&device->dev_list, &fs_devices->devices); 6924 device->fs_devices = fs_devices; 6925 fs_devices->num_devices++; 6926 6927 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6928 fs_devices->missing_devices++; 6929 6930 return device; 6931 } 6932 6933 /** 6934 * btrfs_alloc_device - allocate struct btrfs_device 6935 * @fs_info: used only for generating a new devid, can be NULL if 6936 * devid is provided (i.e. @devid != NULL). 6937 * @devid: a pointer to devid for this device. If NULL a new devid 6938 * is generated. 6939 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6940 * is generated. 6941 * 6942 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6943 * on error. Returned struct is not linked onto any lists and must be 6944 * destroyed with btrfs_free_device. 6945 */ 6946 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6947 const u64 *devid, 6948 const u8 *uuid) 6949 { 6950 struct btrfs_device *dev; 6951 u64 tmp; 6952 6953 if (WARN_ON(!devid && !fs_info)) 6954 return ERR_PTR(-EINVAL); 6955 6956 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6957 if (!dev) 6958 return ERR_PTR(-ENOMEM); 6959 6960 /* 6961 * Preallocate a bio that's always going to be used for flushing device 6962 * barriers and matches the device lifespan 6963 */ 6964 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6965 if (!dev->flush_bio) { 6966 kfree(dev); 6967 return ERR_PTR(-ENOMEM); 6968 } 6969 6970 INIT_LIST_HEAD(&dev->dev_list); 6971 INIT_LIST_HEAD(&dev->dev_alloc_list); 6972 INIT_LIST_HEAD(&dev->post_commit_list); 6973 6974 atomic_set(&dev->dev_stats_ccnt, 0); 6975 btrfs_device_data_ordered_init(dev); 6976 extent_io_tree_init(fs_info, &dev->alloc_state, 6977 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6978 6979 if (devid) 6980 tmp = *devid; 6981 else { 6982 int ret; 6983 6984 ret = find_next_devid(fs_info, &tmp); 6985 if (ret) { 6986 btrfs_free_device(dev); 6987 return ERR_PTR(ret); 6988 } 6989 } 6990 dev->devid = tmp; 6991 6992 if (uuid) 6993 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6994 else 6995 generate_random_uuid(dev->uuid); 6996 6997 return dev; 6998 } 6999 7000 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 7001 u64 devid, u8 *uuid, bool error) 7002 { 7003 if (error) 7004 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 7005 devid, uuid); 7006 else 7007 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 7008 devid, uuid); 7009 } 7010 7011 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 7012 { 7013 const int data_stripes = calc_data_stripes(type, num_stripes); 7014 7015 return div_u64(chunk_len, data_stripes); 7016 } 7017 7018 #if BITS_PER_LONG == 32 7019 /* 7020 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7021 * can't be accessed on 32bit systems. 7022 * 7023 * This function do mount time check to reject the fs if it already has 7024 * metadata chunk beyond that limit. 7025 */ 7026 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7027 u64 logical, u64 length, u64 type) 7028 { 7029 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7030 return 0; 7031 7032 if (logical + length < MAX_LFS_FILESIZE) 7033 return 0; 7034 7035 btrfs_err_32bit_limit(fs_info); 7036 return -EOVERFLOW; 7037 } 7038 7039 /* 7040 * This is to give early warning for any metadata chunk reaching 7041 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7042 * Although we can still access the metadata, it's not going to be possible 7043 * once the limit is reached. 7044 */ 7045 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7046 u64 logical, u64 length, u64 type) 7047 { 7048 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7049 return; 7050 7051 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7052 return; 7053 7054 btrfs_warn_32bit_limit(fs_info); 7055 } 7056 #endif 7057 7058 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 7059 u64 devid, u8 *uuid) 7060 { 7061 struct btrfs_device *dev; 7062 7063 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7064 btrfs_report_missing_device(fs_info, devid, uuid, true); 7065 return ERR_PTR(-ENOENT); 7066 } 7067 7068 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7069 if (IS_ERR(dev)) { 7070 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7071 devid, PTR_ERR(dev)); 7072 return dev; 7073 } 7074 btrfs_report_missing_device(fs_info, devid, uuid, false); 7075 7076 return dev; 7077 } 7078 7079 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7080 struct btrfs_chunk *chunk) 7081 { 7082 BTRFS_DEV_LOOKUP_ARGS(args); 7083 struct btrfs_fs_info *fs_info = leaf->fs_info; 7084 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7085 struct map_lookup *map; 7086 struct extent_map *em; 7087 u64 logical; 7088 u64 length; 7089 u64 devid; 7090 u64 type; 7091 u8 uuid[BTRFS_UUID_SIZE]; 7092 int num_stripes; 7093 int ret; 7094 int i; 7095 7096 logical = key->offset; 7097 length = btrfs_chunk_length(leaf, chunk); 7098 type = btrfs_chunk_type(leaf, chunk); 7099 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7100 7101 #if BITS_PER_LONG == 32 7102 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7103 if (ret < 0) 7104 return ret; 7105 warn_32bit_meta_chunk(fs_info, logical, length, type); 7106 #endif 7107 7108 /* 7109 * Only need to verify chunk item if we're reading from sys chunk array, 7110 * as chunk item in tree block is already verified by tree-checker. 7111 */ 7112 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7113 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7114 if (ret) 7115 return ret; 7116 } 7117 7118 read_lock(&map_tree->lock); 7119 em = lookup_extent_mapping(map_tree, logical, 1); 7120 read_unlock(&map_tree->lock); 7121 7122 /* already mapped? */ 7123 if (em && em->start <= logical && em->start + em->len > logical) { 7124 free_extent_map(em); 7125 return 0; 7126 } else if (em) { 7127 free_extent_map(em); 7128 } 7129 7130 em = alloc_extent_map(); 7131 if (!em) 7132 return -ENOMEM; 7133 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7134 if (!map) { 7135 free_extent_map(em); 7136 return -ENOMEM; 7137 } 7138 7139 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7140 em->map_lookup = map; 7141 em->start = logical; 7142 em->len = length; 7143 em->orig_start = 0; 7144 em->block_start = 0; 7145 em->block_len = em->len; 7146 7147 map->num_stripes = num_stripes; 7148 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7149 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7150 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7151 map->type = type; 7152 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7153 map->verified_stripes = 0; 7154 em->orig_block_len = calc_stripe_length(type, em->len, 7155 map->num_stripes); 7156 for (i = 0; i < num_stripes; i++) { 7157 map->stripes[i].physical = 7158 btrfs_stripe_offset_nr(leaf, chunk, i); 7159 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7160 args.devid = devid; 7161 read_extent_buffer(leaf, uuid, (unsigned long) 7162 btrfs_stripe_dev_uuid_nr(chunk, i), 7163 BTRFS_UUID_SIZE); 7164 args.uuid = uuid; 7165 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7166 if (!map->stripes[i].dev) { 7167 map->stripes[i].dev = handle_missing_device(fs_info, 7168 devid, uuid); 7169 if (IS_ERR(map->stripes[i].dev)) { 7170 free_extent_map(em); 7171 return PTR_ERR(map->stripes[i].dev); 7172 } 7173 } 7174 7175 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7176 &(map->stripes[i].dev->dev_state)); 7177 } 7178 7179 write_lock(&map_tree->lock); 7180 ret = add_extent_mapping(map_tree, em, 0); 7181 write_unlock(&map_tree->lock); 7182 if (ret < 0) { 7183 btrfs_err(fs_info, 7184 "failed to add chunk map, start=%llu len=%llu: %d", 7185 em->start, em->len, ret); 7186 } 7187 free_extent_map(em); 7188 7189 return ret; 7190 } 7191 7192 static void fill_device_from_item(struct extent_buffer *leaf, 7193 struct btrfs_dev_item *dev_item, 7194 struct btrfs_device *device) 7195 { 7196 unsigned long ptr; 7197 7198 device->devid = btrfs_device_id(leaf, dev_item); 7199 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7200 device->total_bytes = device->disk_total_bytes; 7201 device->commit_total_bytes = device->disk_total_bytes; 7202 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7203 device->commit_bytes_used = device->bytes_used; 7204 device->type = btrfs_device_type(leaf, dev_item); 7205 device->io_align = btrfs_device_io_align(leaf, dev_item); 7206 device->io_width = btrfs_device_io_width(leaf, dev_item); 7207 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7208 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7209 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7210 7211 ptr = btrfs_device_uuid(dev_item); 7212 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7213 } 7214 7215 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7216 u8 *fsid) 7217 { 7218 struct btrfs_fs_devices *fs_devices; 7219 int ret; 7220 7221 lockdep_assert_held(&uuid_mutex); 7222 ASSERT(fsid); 7223 7224 /* This will match only for multi-device seed fs */ 7225 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7226 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7227 return fs_devices; 7228 7229 7230 fs_devices = find_fsid(fsid, NULL); 7231 if (!fs_devices) { 7232 if (!btrfs_test_opt(fs_info, DEGRADED)) 7233 return ERR_PTR(-ENOENT); 7234 7235 fs_devices = alloc_fs_devices(fsid, NULL); 7236 if (IS_ERR(fs_devices)) 7237 return fs_devices; 7238 7239 fs_devices->seeding = true; 7240 fs_devices->opened = 1; 7241 return fs_devices; 7242 } 7243 7244 /* 7245 * Upon first call for a seed fs fsid, just create a private copy of the 7246 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7247 */ 7248 fs_devices = clone_fs_devices(fs_devices); 7249 if (IS_ERR(fs_devices)) 7250 return fs_devices; 7251 7252 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7253 if (ret) { 7254 free_fs_devices(fs_devices); 7255 return ERR_PTR(ret); 7256 } 7257 7258 if (!fs_devices->seeding) { 7259 close_fs_devices(fs_devices); 7260 free_fs_devices(fs_devices); 7261 return ERR_PTR(-EINVAL); 7262 } 7263 7264 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7265 7266 return fs_devices; 7267 } 7268 7269 static int read_one_dev(struct extent_buffer *leaf, 7270 struct btrfs_dev_item *dev_item) 7271 { 7272 BTRFS_DEV_LOOKUP_ARGS(args); 7273 struct btrfs_fs_info *fs_info = leaf->fs_info; 7274 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7275 struct btrfs_device *device; 7276 u64 devid; 7277 int ret; 7278 u8 fs_uuid[BTRFS_FSID_SIZE]; 7279 u8 dev_uuid[BTRFS_UUID_SIZE]; 7280 7281 devid = args.devid = btrfs_device_id(leaf, dev_item); 7282 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7283 BTRFS_UUID_SIZE); 7284 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7285 BTRFS_FSID_SIZE); 7286 args.uuid = dev_uuid; 7287 args.fsid = fs_uuid; 7288 7289 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7290 fs_devices = open_seed_devices(fs_info, fs_uuid); 7291 if (IS_ERR(fs_devices)) 7292 return PTR_ERR(fs_devices); 7293 } 7294 7295 device = btrfs_find_device(fs_info->fs_devices, &args); 7296 if (!device) { 7297 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7298 btrfs_report_missing_device(fs_info, devid, 7299 dev_uuid, true); 7300 return -ENOENT; 7301 } 7302 7303 device = add_missing_dev(fs_devices, devid, dev_uuid); 7304 if (IS_ERR(device)) { 7305 btrfs_err(fs_info, 7306 "failed to add missing dev %llu: %ld", 7307 devid, PTR_ERR(device)); 7308 return PTR_ERR(device); 7309 } 7310 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7311 } else { 7312 if (!device->bdev) { 7313 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7314 btrfs_report_missing_device(fs_info, 7315 devid, dev_uuid, true); 7316 return -ENOENT; 7317 } 7318 btrfs_report_missing_device(fs_info, devid, 7319 dev_uuid, false); 7320 } 7321 7322 if (!device->bdev && 7323 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7324 /* 7325 * this happens when a device that was properly setup 7326 * in the device info lists suddenly goes bad. 7327 * device->bdev is NULL, and so we have to set 7328 * device->missing to one here 7329 */ 7330 device->fs_devices->missing_devices++; 7331 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7332 } 7333 7334 /* Move the device to its own fs_devices */ 7335 if (device->fs_devices != fs_devices) { 7336 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7337 &device->dev_state)); 7338 7339 list_move(&device->dev_list, &fs_devices->devices); 7340 device->fs_devices->num_devices--; 7341 fs_devices->num_devices++; 7342 7343 device->fs_devices->missing_devices--; 7344 fs_devices->missing_devices++; 7345 7346 device->fs_devices = fs_devices; 7347 } 7348 } 7349 7350 if (device->fs_devices != fs_info->fs_devices) { 7351 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7352 if (device->generation != 7353 btrfs_device_generation(leaf, dev_item)) 7354 return -EINVAL; 7355 } 7356 7357 fill_device_from_item(leaf, dev_item, device); 7358 if (device->bdev) { 7359 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7360 7361 if (device->total_bytes > max_total_bytes) { 7362 btrfs_err(fs_info, 7363 "device total_bytes should be at most %llu but found %llu", 7364 max_total_bytes, device->total_bytes); 7365 return -EINVAL; 7366 } 7367 } 7368 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7369 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7370 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7371 device->fs_devices->total_rw_bytes += device->total_bytes; 7372 atomic64_add(device->total_bytes - device->bytes_used, 7373 &fs_info->free_chunk_space); 7374 } 7375 ret = 0; 7376 return ret; 7377 } 7378 7379 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7380 { 7381 struct btrfs_root *root = fs_info->tree_root; 7382 struct btrfs_super_block *super_copy = fs_info->super_copy; 7383 struct extent_buffer *sb; 7384 struct btrfs_disk_key *disk_key; 7385 struct btrfs_chunk *chunk; 7386 u8 *array_ptr; 7387 unsigned long sb_array_offset; 7388 int ret = 0; 7389 u32 num_stripes; 7390 u32 array_size; 7391 u32 len = 0; 7392 u32 cur_offset; 7393 u64 type; 7394 struct btrfs_key key; 7395 7396 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7397 /* 7398 * This will create extent buffer of nodesize, superblock size is 7399 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7400 * overallocate but we can keep it as-is, only the first page is used. 7401 */ 7402 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7403 root->root_key.objectid, 0); 7404 if (IS_ERR(sb)) 7405 return PTR_ERR(sb); 7406 set_extent_buffer_uptodate(sb); 7407 /* 7408 * The sb extent buffer is artificial and just used to read the system array. 7409 * set_extent_buffer_uptodate() call does not properly mark all it's 7410 * pages up-to-date when the page is larger: extent does not cover the 7411 * whole page and consequently check_page_uptodate does not find all 7412 * the page's extents up-to-date (the hole beyond sb), 7413 * write_extent_buffer then triggers a WARN_ON. 7414 * 7415 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7416 * but sb spans only this function. Add an explicit SetPageUptodate call 7417 * to silence the warning eg. on PowerPC 64. 7418 */ 7419 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7420 SetPageUptodate(sb->pages[0]); 7421 7422 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7423 array_size = btrfs_super_sys_array_size(super_copy); 7424 7425 array_ptr = super_copy->sys_chunk_array; 7426 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7427 cur_offset = 0; 7428 7429 while (cur_offset < array_size) { 7430 disk_key = (struct btrfs_disk_key *)array_ptr; 7431 len = sizeof(*disk_key); 7432 if (cur_offset + len > array_size) 7433 goto out_short_read; 7434 7435 btrfs_disk_key_to_cpu(&key, disk_key); 7436 7437 array_ptr += len; 7438 sb_array_offset += len; 7439 cur_offset += len; 7440 7441 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7442 btrfs_err(fs_info, 7443 "unexpected item type %u in sys_array at offset %u", 7444 (u32)key.type, cur_offset); 7445 ret = -EIO; 7446 break; 7447 } 7448 7449 chunk = (struct btrfs_chunk *)sb_array_offset; 7450 /* 7451 * At least one btrfs_chunk with one stripe must be present, 7452 * exact stripe count check comes afterwards 7453 */ 7454 len = btrfs_chunk_item_size(1); 7455 if (cur_offset + len > array_size) 7456 goto out_short_read; 7457 7458 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7459 if (!num_stripes) { 7460 btrfs_err(fs_info, 7461 "invalid number of stripes %u in sys_array at offset %u", 7462 num_stripes, cur_offset); 7463 ret = -EIO; 7464 break; 7465 } 7466 7467 type = btrfs_chunk_type(sb, chunk); 7468 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7469 btrfs_err(fs_info, 7470 "invalid chunk type %llu in sys_array at offset %u", 7471 type, cur_offset); 7472 ret = -EIO; 7473 break; 7474 } 7475 7476 len = btrfs_chunk_item_size(num_stripes); 7477 if (cur_offset + len > array_size) 7478 goto out_short_read; 7479 7480 ret = read_one_chunk(&key, sb, chunk); 7481 if (ret) 7482 break; 7483 7484 array_ptr += len; 7485 sb_array_offset += len; 7486 cur_offset += len; 7487 } 7488 clear_extent_buffer_uptodate(sb); 7489 free_extent_buffer_stale(sb); 7490 return ret; 7491 7492 out_short_read: 7493 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7494 len, cur_offset); 7495 clear_extent_buffer_uptodate(sb); 7496 free_extent_buffer_stale(sb); 7497 return -EIO; 7498 } 7499 7500 /* 7501 * Check if all chunks in the fs are OK for read-write degraded mount 7502 * 7503 * If the @failing_dev is specified, it's accounted as missing. 7504 * 7505 * Return true if all chunks meet the minimal RW mount requirements. 7506 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7507 */ 7508 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7509 struct btrfs_device *failing_dev) 7510 { 7511 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7512 struct extent_map *em; 7513 u64 next_start = 0; 7514 bool ret = true; 7515 7516 read_lock(&map_tree->lock); 7517 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7518 read_unlock(&map_tree->lock); 7519 /* No chunk at all? Return false anyway */ 7520 if (!em) { 7521 ret = false; 7522 goto out; 7523 } 7524 while (em) { 7525 struct map_lookup *map; 7526 int missing = 0; 7527 int max_tolerated; 7528 int i; 7529 7530 map = em->map_lookup; 7531 max_tolerated = 7532 btrfs_get_num_tolerated_disk_barrier_failures( 7533 map->type); 7534 for (i = 0; i < map->num_stripes; i++) { 7535 struct btrfs_device *dev = map->stripes[i].dev; 7536 7537 if (!dev || !dev->bdev || 7538 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7539 dev->last_flush_error) 7540 missing++; 7541 else if (failing_dev && failing_dev == dev) 7542 missing++; 7543 } 7544 if (missing > max_tolerated) { 7545 if (!failing_dev) 7546 btrfs_warn(fs_info, 7547 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7548 em->start, missing, max_tolerated); 7549 free_extent_map(em); 7550 ret = false; 7551 goto out; 7552 } 7553 next_start = extent_map_end(em); 7554 free_extent_map(em); 7555 7556 read_lock(&map_tree->lock); 7557 em = lookup_extent_mapping(map_tree, next_start, 7558 (u64)(-1) - next_start); 7559 read_unlock(&map_tree->lock); 7560 } 7561 out: 7562 return ret; 7563 } 7564 7565 static void readahead_tree_node_children(struct extent_buffer *node) 7566 { 7567 int i; 7568 const int nr_items = btrfs_header_nritems(node); 7569 7570 for (i = 0; i < nr_items; i++) 7571 btrfs_readahead_node_child(node, i); 7572 } 7573 7574 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7575 { 7576 struct btrfs_root *root = fs_info->chunk_root; 7577 struct btrfs_path *path; 7578 struct extent_buffer *leaf; 7579 struct btrfs_key key; 7580 struct btrfs_key found_key; 7581 int ret; 7582 int slot; 7583 u64 total_dev = 0; 7584 u64 last_ra_node = 0; 7585 7586 path = btrfs_alloc_path(); 7587 if (!path) 7588 return -ENOMEM; 7589 7590 /* 7591 * uuid_mutex is needed only if we are mounting a sprout FS 7592 * otherwise we don't need it. 7593 */ 7594 mutex_lock(&uuid_mutex); 7595 7596 /* 7597 * It is possible for mount and umount to race in such a way that 7598 * we execute this code path, but open_fs_devices failed to clear 7599 * total_rw_bytes. We certainly want it cleared before reading the 7600 * device items, so clear it here. 7601 */ 7602 fs_info->fs_devices->total_rw_bytes = 0; 7603 7604 /* 7605 * Lockdep complains about possible circular locking dependency between 7606 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7607 * used for freeze procection of a fs (struct super_block.s_writers), 7608 * which we take when starting a transaction, and extent buffers of the 7609 * chunk tree if we call read_one_dev() while holding a lock on an 7610 * extent buffer of the chunk tree. Since we are mounting the filesystem 7611 * and at this point there can't be any concurrent task modifying the 7612 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7613 */ 7614 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7615 path->skip_locking = 1; 7616 7617 /* 7618 * Read all device items, and then all the chunk items. All 7619 * device items are found before any chunk item (their object id 7620 * is smaller than the lowest possible object id for a chunk 7621 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7622 */ 7623 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7624 key.offset = 0; 7625 key.type = 0; 7626 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7627 if (ret < 0) 7628 goto error; 7629 while (1) { 7630 struct extent_buffer *node; 7631 7632 leaf = path->nodes[0]; 7633 slot = path->slots[0]; 7634 if (slot >= btrfs_header_nritems(leaf)) { 7635 ret = btrfs_next_leaf(root, path); 7636 if (ret == 0) 7637 continue; 7638 if (ret < 0) 7639 goto error; 7640 break; 7641 } 7642 node = path->nodes[1]; 7643 if (node) { 7644 if (last_ra_node != node->start) { 7645 readahead_tree_node_children(node); 7646 last_ra_node = node->start; 7647 } 7648 } 7649 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7650 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7651 struct btrfs_dev_item *dev_item; 7652 dev_item = btrfs_item_ptr(leaf, slot, 7653 struct btrfs_dev_item); 7654 ret = read_one_dev(leaf, dev_item); 7655 if (ret) 7656 goto error; 7657 total_dev++; 7658 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7659 struct btrfs_chunk *chunk; 7660 7661 /* 7662 * We are only called at mount time, so no need to take 7663 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7664 * we always lock first fs_info->chunk_mutex before 7665 * acquiring any locks on the chunk tree. This is a 7666 * requirement for chunk allocation, see the comment on 7667 * top of btrfs_chunk_alloc() for details. 7668 */ 7669 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7670 ret = read_one_chunk(&found_key, leaf, chunk); 7671 if (ret) 7672 goto error; 7673 } 7674 path->slots[0]++; 7675 } 7676 7677 /* 7678 * After loading chunk tree, we've got all device information, 7679 * do another round of validation checks. 7680 */ 7681 if (total_dev != fs_info->fs_devices->total_devices) { 7682 btrfs_err(fs_info, 7683 "super_num_devices %llu mismatch with num_devices %llu found here", 7684 btrfs_super_num_devices(fs_info->super_copy), 7685 total_dev); 7686 ret = -EINVAL; 7687 goto error; 7688 } 7689 if (btrfs_super_total_bytes(fs_info->super_copy) < 7690 fs_info->fs_devices->total_rw_bytes) { 7691 btrfs_err(fs_info, 7692 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7693 btrfs_super_total_bytes(fs_info->super_copy), 7694 fs_info->fs_devices->total_rw_bytes); 7695 ret = -EINVAL; 7696 goto error; 7697 } 7698 ret = 0; 7699 error: 7700 mutex_unlock(&uuid_mutex); 7701 7702 btrfs_free_path(path); 7703 return ret; 7704 } 7705 7706 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7707 { 7708 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7709 struct btrfs_device *device; 7710 7711 fs_devices->fs_info = fs_info; 7712 7713 mutex_lock(&fs_devices->device_list_mutex); 7714 list_for_each_entry(device, &fs_devices->devices, dev_list) 7715 device->fs_info = fs_info; 7716 7717 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7718 list_for_each_entry(device, &seed_devs->devices, dev_list) 7719 device->fs_info = fs_info; 7720 7721 seed_devs->fs_info = fs_info; 7722 } 7723 mutex_unlock(&fs_devices->device_list_mutex); 7724 } 7725 7726 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7727 const struct btrfs_dev_stats_item *ptr, 7728 int index) 7729 { 7730 u64 val; 7731 7732 read_extent_buffer(eb, &val, 7733 offsetof(struct btrfs_dev_stats_item, values) + 7734 ((unsigned long)ptr) + (index * sizeof(u64)), 7735 sizeof(val)); 7736 return val; 7737 } 7738 7739 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7740 struct btrfs_dev_stats_item *ptr, 7741 int index, u64 val) 7742 { 7743 write_extent_buffer(eb, &val, 7744 offsetof(struct btrfs_dev_stats_item, values) + 7745 ((unsigned long)ptr) + (index * sizeof(u64)), 7746 sizeof(val)); 7747 } 7748 7749 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7750 struct btrfs_path *path) 7751 { 7752 struct btrfs_dev_stats_item *ptr; 7753 struct extent_buffer *eb; 7754 struct btrfs_key key; 7755 int item_size; 7756 int i, ret, slot; 7757 7758 if (!device->fs_info->dev_root) 7759 return 0; 7760 7761 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7762 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7763 key.offset = device->devid; 7764 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7765 if (ret) { 7766 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7767 btrfs_dev_stat_set(device, i, 0); 7768 device->dev_stats_valid = 1; 7769 btrfs_release_path(path); 7770 return ret < 0 ? ret : 0; 7771 } 7772 slot = path->slots[0]; 7773 eb = path->nodes[0]; 7774 item_size = btrfs_item_size(eb, slot); 7775 7776 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7777 7778 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7779 if (item_size >= (1 + i) * sizeof(__le64)) 7780 btrfs_dev_stat_set(device, i, 7781 btrfs_dev_stats_value(eb, ptr, i)); 7782 else 7783 btrfs_dev_stat_set(device, i, 0); 7784 } 7785 7786 device->dev_stats_valid = 1; 7787 btrfs_dev_stat_print_on_load(device); 7788 btrfs_release_path(path); 7789 7790 return 0; 7791 } 7792 7793 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7794 { 7795 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7796 struct btrfs_device *device; 7797 struct btrfs_path *path = NULL; 7798 int ret = 0; 7799 7800 path = btrfs_alloc_path(); 7801 if (!path) 7802 return -ENOMEM; 7803 7804 mutex_lock(&fs_devices->device_list_mutex); 7805 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7806 ret = btrfs_device_init_dev_stats(device, path); 7807 if (ret) 7808 goto out; 7809 } 7810 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7811 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7812 ret = btrfs_device_init_dev_stats(device, path); 7813 if (ret) 7814 goto out; 7815 } 7816 } 7817 out: 7818 mutex_unlock(&fs_devices->device_list_mutex); 7819 7820 btrfs_free_path(path); 7821 return ret; 7822 } 7823 7824 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7825 struct btrfs_device *device) 7826 { 7827 struct btrfs_fs_info *fs_info = trans->fs_info; 7828 struct btrfs_root *dev_root = fs_info->dev_root; 7829 struct btrfs_path *path; 7830 struct btrfs_key key; 7831 struct extent_buffer *eb; 7832 struct btrfs_dev_stats_item *ptr; 7833 int ret; 7834 int i; 7835 7836 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7837 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7838 key.offset = device->devid; 7839 7840 path = btrfs_alloc_path(); 7841 if (!path) 7842 return -ENOMEM; 7843 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7844 if (ret < 0) { 7845 btrfs_warn_in_rcu(fs_info, 7846 "error %d while searching for dev_stats item for device %s", 7847 ret, rcu_str_deref(device->name)); 7848 goto out; 7849 } 7850 7851 if (ret == 0 && 7852 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7853 /* need to delete old one and insert a new one */ 7854 ret = btrfs_del_item(trans, dev_root, path); 7855 if (ret != 0) { 7856 btrfs_warn_in_rcu(fs_info, 7857 "delete too small dev_stats item for device %s failed %d", 7858 rcu_str_deref(device->name), ret); 7859 goto out; 7860 } 7861 ret = 1; 7862 } 7863 7864 if (ret == 1) { 7865 /* need to insert a new item */ 7866 btrfs_release_path(path); 7867 ret = btrfs_insert_empty_item(trans, dev_root, path, 7868 &key, sizeof(*ptr)); 7869 if (ret < 0) { 7870 btrfs_warn_in_rcu(fs_info, 7871 "insert dev_stats item for device %s failed %d", 7872 rcu_str_deref(device->name), ret); 7873 goto out; 7874 } 7875 } 7876 7877 eb = path->nodes[0]; 7878 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7879 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7880 btrfs_set_dev_stats_value(eb, ptr, i, 7881 btrfs_dev_stat_read(device, i)); 7882 btrfs_mark_buffer_dirty(eb); 7883 7884 out: 7885 btrfs_free_path(path); 7886 return ret; 7887 } 7888 7889 /* 7890 * called from commit_transaction. Writes all changed device stats to disk. 7891 */ 7892 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7893 { 7894 struct btrfs_fs_info *fs_info = trans->fs_info; 7895 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7896 struct btrfs_device *device; 7897 int stats_cnt; 7898 int ret = 0; 7899 7900 mutex_lock(&fs_devices->device_list_mutex); 7901 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7902 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7903 if (!device->dev_stats_valid || stats_cnt == 0) 7904 continue; 7905 7906 7907 /* 7908 * There is a LOAD-LOAD control dependency between the value of 7909 * dev_stats_ccnt and updating the on-disk values which requires 7910 * reading the in-memory counters. Such control dependencies 7911 * require explicit read memory barriers. 7912 * 7913 * This memory barriers pairs with smp_mb__before_atomic in 7914 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7915 * barrier implied by atomic_xchg in 7916 * btrfs_dev_stats_read_and_reset 7917 */ 7918 smp_rmb(); 7919 7920 ret = update_dev_stat_item(trans, device); 7921 if (!ret) 7922 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7923 } 7924 mutex_unlock(&fs_devices->device_list_mutex); 7925 7926 return ret; 7927 } 7928 7929 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7930 { 7931 btrfs_dev_stat_inc(dev, index); 7932 btrfs_dev_stat_print_on_error(dev); 7933 } 7934 7935 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7936 { 7937 if (!dev->dev_stats_valid) 7938 return; 7939 btrfs_err_rl_in_rcu(dev->fs_info, 7940 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7941 rcu_str_deref(dev->name), 7942 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7943 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7944 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7945 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7946 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7947 } 7948 7949 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7950 { 7951 int i; 7952 7953 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7954 if (btrfs_dev_stat_read(dev, i) != 0) 7955 break; 7956 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7957 return; /* all values == 0, suppress message */ 7958 7959 btrfs_info_in_rcu(dev->fs_info, 7960 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7961 rcu_str_deref(dev->name), 7962 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7963 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7964 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7965 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7966 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7967 } 7968 7969 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7970 struct btrfs_ioctl_get_dev_stats *stats) 7971 { 7972 BTRFS_DEV_LOOKUP_ARGS(args); 7973 struct btrfs_device *dev; 7974 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7975 int i; 7976 7977 mutex_lock(&fs_devices->device_list_mutex); 7978 args.devid = stats->devid; 7979 dev = btrfs_find_device(fs_info->fs_devices, &args); 7980 mutex_unlock(&fs_devices->device_list_mutex); 7981 7982 if (!dev) { 7983 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7984 return -ENODEV; 7985 } else if (!dev->dev_stats_valid) { 7986 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7987 return -ENODEV; 7988 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7989 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7990 if (stats->nr_items > i) 7991 stats->values[i] = 7992 btrfs_dev_stat_read_and_reset(dev, i); 7993 else 7994 btrfs_dev_stat_set(dev, i, 0); 7995 } 7996 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7997 current->comm, task_pid_nr(current)); 7998 } else { 7999 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 8000 if (stats->nr_items > i) 8001 stats->values[i] = btrfs_dev_stat_read(dev, i); 8002 } 8003 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 8004 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 8005 return 0; 8006 } 8007 8008 /* 8009 * Update the size and bytes used for each device where it changed. This is 8010 * delayed since we would otherwise get errors while writing out the 8011 * superblocks. 8012 * 8013 * Must be invoked during transaction commit. 8014 */ 8015 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 8016 { 8017 struct btrfs_device *curr, *next; 8018 8019 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 8020 8021 if (list_empty(&trans->dev_update_list)) 8022 return; 8023 8024 /* 8025 * We don't need the device_list_mutex here. This list is owned by the 8026 * transaction and the transaction must complete before the device is 8027 * released. 8028 */ 8029 mutex_lock(&trans->fs_info->chunk_mutex); 8030 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 8031 post_commit_list) { 8032 list_del_init(&curr->post_commit_list); 8033 curr->commit_total_bytes = curr->disk_total_bytes; 8034 curr->commit_bytes_used = curr->bytes_used; 8035 } 8036 mutex_unlock(&trans->fs_info->chunk_mutex); 8037 } 8038 8039 /* 8040 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 8041 */ 8042 int btrfs_bg_type_to_factor(u64 flags) 8043 { 8044 const int index = btrfs_bg_flags_to_raid_index(flags); 8045 8046 return btrfs_raid_array[index].ncopies; 8047 } 8048 8049 8050 8051 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 8052 u64 chunk_offset, u64 devid, 8053 u64 physical_offset, u64 physical_len) 8054 { 8055 struct btrfs_dev_lookup_args args = { .devid = devid }; 8056 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8057 struct extent_map *em; 8058 struct map_lookup *map; 8059 struct btrfs_device *dev; 8060 u64 stripe_len; 8061 bool found = false; 8062 int ret = 0; 8063 int i; 8064 8065 read_lock(&em_tree->lock); 8066 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8067 read_unlock(&em_tree->lock); 8068 8069 if (!em) { 8070 btrfs_err(fs_info, 8071 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8072 physical_offset, devid); 8073 ret = -EUCLEAN; 8074 goto out; 8075 } 8076 8077 map = em->map_lookup; 8078 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8079 if (physical_len != stripe_len) { 8080 btrfs_err(fs_info, 8081 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8082 physical_offset, devid, em->start, physical_len, 8083 stripe_len); 8084 ret = -EUCLEAN; 8085 goto out; 8086 } 8087 8088 for (i = 0; i < map->num_stripes; i++) { 8089 if (map->stripes[i].dev->devid == devid && 8090 map->stripes[i].physical == physical_offset) { 8091 found = true; 8092 if (map->verified_stripes >= map->num_stripes) { 8093 btrfs_err(fs_info, 8094 "too many dev extents for chunk %llu found", 8095 em->start); 8096 ret = -EUCLEAN; 8097 goto out; 8098 } 8099 map->verified_stripes++; 8100 break; 8101 } 8102 } 8103 if (!found) { 8104 btrfs_err(fs_info, 8105 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8106 physical_offset, devid); 8107 ret = -EUCLEAN; 8108 } 8109 8110 /* Make sure no dev extent is beyond device boundary */ 8111 dev = btrfs_find_device(fs_info->fs_devices, &args); 8112 if (!dev) { 8113 btrfs_err(fs_info, "failed to find devid %llu", devid); 8114 ret = -EUCLEAN; 8115 goto out; 8116 } 8117 8118 if (physical_offset + physical_len > dev->disk_total_bytes) { 8119 btrfs_err(fs_info, 8120 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8121 devid, physical_offset, physical_len, 8122 dev->disk_total_bytes); 8123 ret = -EUCLEAN; 8124 goto out; 8125 } 8126 8127 if (dev->zone_info) { 8128 u64 zone_size = dev->zone_info->zone_size; 8129 8130 if (!IS_ALIGNED(physical_offset, zone_size) || 8131 !IS_ALIGNED(physical_len, zone_size)) { 8132 btrfs_err(fs_info, 8133 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8134 devid, physical_offset, physical_len); 8135 ret = -EUCLEAN; 8136 goto out; 8137 } 8138 } 8139 8140 out: 8141 free_extent_map(em); 8142 return ret; 8143 } 8144 8145 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8146 { 8147 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8148 struct extent_map *em; 8149 struct rb_node *node; 8150 int ret = 0; 8151 8152 read_lock(&em_tree->lock); 8153 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8154 em = rb_entry(node, struct extent_map, rb_node); 8155 if (em->map_lookup->num_stripes != 8156 em->map_lookup->verified_stripes) { 8157 btrfs_err(fs_info, 8158 "chunk %llu has missing dev extent, have %d expect %d", 8159 em->start, em->map_lookup->verified_stripes, 8160 em->map_lookup->num_stripes); 8161 ret = -EUCLEAN; 8162 goto out; 8163 } 8164 } 8165 out: 8166 read_unlock(&em_tree->lock); 8167 return ret; 8168 } 8169 8170 /* 8171 * Ensure that all dev extents are mapped to correct chunk, otherwise 8172 * later chunk allocation/free would cause unexpected behavior. 8173 * 8174 * NOTE: This will iterate through the whole device tree, which should be of 8175 * the same size level as the chunk tree. This slightly increases mount time. 8176 */ 8177 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8178 { 8179 struct btrfs_path *path; 8180 struct btrfs_root *root = fs_info->dev_root; 8181 struct btrfs_key key; 8182 u64 prev_devid = 0; 8183 u64 prev_dev_ext_end = 0; 8184 int ret = 0; 8185 8186 /* 8187 * We don't have a dev_root because we mounted with ignorebadroots and 8188 * failed to load the root, so we want to skip the verification in this 8189 * case for sure. 8190 * 8191 * However if the dev root is fine, but the tree itself is corrupted 8192 * we'd still fail to mount. This verification is only to make sure 8193 * writes can happen safely, so instead just bypass this check 8194 * completely in the case of IGNOREBADROOTS. 8195 */ 8196 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8197 return 0; 8198 8199 key.objectid = 1; 8200 key.type = BTRFS_DEV_EXTENT_KEY; 8201 key.offset = 0; 8202 8203 path = btrfs_alloc_path(); 8204 if (!path) 8205 return -ENOMEM; 8206 8207 path->reada = READA_FORWARD; 8208 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8209 if (ret < 0) 8210 goto out; 8211 8212 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8213 ret = btrfs_next_leaf(root, path); 8214 if (ret < 0) 8215 goto out; 8216 /* No dev extents at all? Not good */ 8217 if (ret > 0) { 8218 ret = -EUCLEAN; 8219 goto out; 8220 } 8221 } 8222 while (1) { 8223 struct extent_buffer *leaf = path->nodes[0]; 8224 struct btrfs_dev_extent *dext; 8225 int slot = path->slots[0]; 8226 u64 chunk_offset; 8227 u64 physical_offset; 8228 u64 physical_len; 8229 u64 devid; 8230 8231 btrfs_item_key_to_cpu(leaf, &key, slot); 8232 if (key.type != BTRFS_DEV_EXTENT_KEY) 8233 break; 8234 devid = key.objectid; 8235 physical_offset = key.offset; 8236 8237 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8238 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8239 physical_len = btrfs_dev_extent_length(leaf, dext); 8240 8241 /* Check if this dev extent overlaps with the previous one */ 8242 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8243 btrfs_err(fs_info, 8244 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8245 devid, physical_offset, prev_dev_ext_end); 8246 ret = -EUCLEAN; 8247 goto out; 8248 } 8249 8250 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8251 physical_offset, physical_len); 8252 if (ret < 0) 8253 goto out; 8254 prev_devid = devid; 8255 prev_dev_ext_end = physical_offset + physical_len; 8256 8257 ret = btrfs_next_item(root, path); 8258 if (ret < 0) 8259 goto out; 8260 if (ret > 0) { 8261 ret = 0; 8262 break; 8263 } 8264 } 8265 8266 /* Ensure all chunks have corresponding dev extents */ 8267 ret = verify_chunk_dev_extent_mapping(fs_info); 8268 out: 8269 btrfs_free_path(path); 8270 return ret; 8271 } 8272 8273 /* 8274 * Check whether the given block group or device is pinned by any inode being 8275 * used as a swapfile. 8276 */ 8277 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8278 { 8279 struct btrfs_swapfile_pin *sp; 8280 struct rb_node *node; 8281 8282 spin_lock(&fs_info->swapfile_pins_lock); 8283 node = fs_info->swapfile_pins.rb_node; 8284 while (node) { 8285 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8286 if (ptr < sp->ptr) 8287 node = node->rb_left; 8288 else if (ptr > sp->ptr) 8289 node = node->rb_right; 8290 else 8291 break; 8292 } 8293 spin_unlock(&fs_info->swapfile_pins_lock); 8294 return node != NULL; 8295 } 8296 8297 static int relocating_repair_kthread(void *data) 8298 { 8299 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8300 struct btrfs_fs_info *fs_info = cache->fs_info; 8301 u64 target; 8302 int ret = 0; 8303 8304 target = cache->start; 8305 btrfs_put_block_group(cache); 8306 8307 sb_start_write(fs_info->sb); 8308 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8309 btrfs_info(fs_info, 8310 "zoned: skip relocating block group %llu to repair: EBUSY", 8311 target); 8312 sb_end_write(fs_info->sb); 8313 return -EBUSY; 8314 } 8315 8316 mutex_lock(&fs_info->reclaim_bgs_lock); 8317 8318 /* Ensure block group still exists */ 8319 cache = btrfs_lookup_block_group(fs_info, target); 8320 if (!cache) 8321 goto out; 8322 8323 if (!cache->relocating_repair) 8324 goto out; 8325 8326 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8327 if (ret < 0) 8328 goto out; 8329 8330 btrfs_info(fs_info, 8331 "zoned: relocating block group %llu to repair IO failure", 8332 target); 8333 ret = btrfs_relocate_chunk(fs_info, target); 8334 8335 out: 8336 if (cache) 8337 btrfs_put_block_group(cache); 8338 mutex_unlock(&fs_info->reclaim_bgs_lock); 8339 btrfs_exclop_finish(fs_info); 8340 sb_end_write(fs_info->sb); 8341 8342 return ret; 8343 } 8344 8345 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8346 { 8347 struct btrfs_block_group *cache; 8348 8349 if (!btrfs_is_zoned(fs_info)) 8350 return false; 8351 8352 /* Do not attempt to repair in degraded state */ 8353 if (btrfs_test_opt(fs_info, DEGRADED)) 8354 return true; 8355 8356 cache = btrfs_lookup_block_group(fs_info, logical); 8357 if (!cache) 8358 return true; 8359 8360 spin_lock(&cache->lock); 8361 if (cache->relocating_repair) { 8362 spin_unlock(&cache->lock); 8363 btrfs_put_block_group(cache); 8364 return true; 8365 } 8366 cache->relocating_repair = 1; 8367 spin_unlock(&cache->lock); 8368 8369 kthread_run(relocating_repair_kthread, cache, 8370 "btrfs-relocating-repair"); 8371 8372 return true; 8373 } 8374