1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 38 [BTRFS_RAID_RAID10] = { 39 .sub_stripes = 2, 40 .dev_stripes = 1, 41 .devs_max = 0, /* 0 == as many as possible */ 42 .devs_min = 2, 43 .tolerated_failures = 1, 44 .devs_increment = 2, 45 .ncopies = 2, 46 .nparity = 0, 47 .raid_name = "raid10", 48 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 49 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 50 }, 51 [BTRFS_RAID_RAID1] = { 52 .sub_stripes = 1, 53 .dev_stripes = 1, 54 .devs_max = 2, 55 .devs_min = 2, 56 .tolerated_failures = 1, 57 .devs_increment = 2, 58 .ncopies = 2, 59 .nparity = 0, 60 .raid_name = "raid1", 61 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 62 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 63 }, 64 [BTRFS_RAID_RAID1C3] = { 65 .sub_stripes = 1, 66 .dev_stripes = 1, 67 .devs_max = 3, 68 .devs_min = 3, 69 .tolerated_failures = 2, 70 .devs_increment = 3, 71 .ncopies = 3, 72 .nparity = 0, 73 .raid_name = "raid1c3", 74 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 75 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 76 }, 77 [BTRFS_RAID_RAID1C4] = { 78 .sub_stripes = 1, 79 .dev_stripes = 1, 80 .devs_max = 4, 81 .devs_min = 4, 82 .tolerated_failures = 3, 83 .devs_increment = 4, 84 .ncopies = 4, 85 .nparity = 0, 86 .raid_name = "raid1c4", 87 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 88 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 89 }, 90 [BTRFS_RAID_DUP] = { 91 .sub_stripes = 1, 92 .dev_stripes = 2, 93 .devs_max = 1, 94 .devs_min = 1, 95 .tolerated_failures = 0, 96 .devs_increment = 1, 97 .ncopies = 2, 98 .nparity = 0, 99 .raid_name = "dup", 100 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 101 .mindev_error = 0, 102 }, 103 [BTRFS_RAID_RAID0] = { 104 .sub_stripes = 1, 105 .dev_stripes = 1, 106 .devs_max = 0, 107 .devs_min = 1, 108 .tolerated_failures = 0, 109 .devs_increment = 1, 110 .ncopies = 1, 111 .nparity = 0, 112 .raid_name = "raid0", 113 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 114 .mindev_error = 0, 115 }, 116 [BTRFS_RAID_SINGLE] = { 117 .sub_stripes = 1, 118 .dev_stripes = 1, 119 .devs_max = 1, 120 .devs_min = 1, 121 .tolerated_failures = 0, 122 .devs_increment = 1, 123 .ncopies = 1, 124 .nparity = 0, 125 .raid_name = "single", 126 .bg_flag = 0, 127 .mindev_error = 0, 128 }, 129 [BTRFS_RAID_RAID5] = { 130 .sub_stripes = 1, 131 .dev_stripes = 1, 132 .devs_max = 0, 133 .devs_min = 2, 134 .tolerated_failures = 1, 135 .devs_increment = 1, 136 .ncopies = 1, 137 .nparity = 1, 138 .raid_name = "raid5", 139 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 140 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 141 }, 142 [BTRFS_RAID_RAID6] = { 143 .sub_stripes = 1, 144 .dev_stripes = 1, 145 .devs_max = 0, 146 .devs_min = 3, 147 .tolerated_failures = 2, 148 .devs_increment = 1, 149 .ncopies = 1, 150 .nparity = 2, 151 .raid_name = "raid6", 152 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 153 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 154 }, 155 }; 156 157 /* 158 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 159 * can be used as index to access btrfs_raid_array[]. 160 */ 161 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 162 { 163 if (flags & BTRFS_BLOCK_GROUP_RAID10) 164 return BTRFS_RAID_RAID10; 165 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 166 return BTRFS_RAID_RAID1; 167 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 168 return BTRFS_RAID_RAID1C3; 169 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 170 return BTRFS_RAID_RAID1C4; 171 else if (flags & BTRFS_BLOCK_GROUP_DUP) 172 return BTRFS_RAID_DUP; 173 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 174 return BTRFS_RAID_RAID0; 175 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 176 return BTRFS_RAID_RAID5; 177 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 178 return BTRFS_RAID_RAID6; 179 180 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 181 } 182 183 const char *btrfs_bg_type_to_raid_name(u64 flags) 184 { 185 const int index = btrfs_bg_flags_to_raid_index(flags); 186 187 if (index >= BTRFS_NR_RAID_TYPES) 188 return NULL; 189 190 return btrfs_raid_array[index].raid_name; 191 } 192 193 /* 194 * Fill @buf with textual description of @bg_flags, no more than @size_buf 195 * bytes including terminating null byte. 196 */ 197 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 198 { 199 int i; 200 int ret; 201 char *bp = buf; 202 u64 flags = bg_flags; 203 u32 size_bp = size_buf; 204 205 if (!flags) { 206 strcpy(bp, "NONE"); 207 return; 208 } 209 210 #define DESCRIBE_FLAG(flag, desc) \ 211 do { \ 212 if (flags & (flag)) { \ 213 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 214 if (ret < 0 || ret >= size_bp) \ 215 goto out_overflow; \ 216 size_bp -= ret; \ 217 bp += ret; \ 218 flags &= ~(flag); \ 219 } \ 220 } while (0) 221 222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 225 226 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 227 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 228 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 229 btrfs_raid_array[i].raid_name); 230 #undef DESCRIBE_FLAG 231 232 if (flags) { 233 ret = snprintf(bp, size_bp, "0x%llx|", flags); 234 size_bp -= ret; 235 } 236 237 if (size_bp < size_buf) 238 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 239 240 /* 241 * The text is trimmed, it's up to the caller to provide sufficiently 242 * large buffer 243 */ 244 out_overflow:; 245 } 246 247 static int init_first_rw_device(struct btrfs_trans_handle *trans); 248 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 249 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 252 enum btrfs_map_op op, 253 u64 logical, u64 *length, 254 struct btrfs_io_context **bioc_ret, 255 int mirror_num, int need_raid_map); 256 257 /* 258 * Device locking 259 * ============== 260 * 261 * There are several mutexes that protect manipulation of devices and low-level 262 * structures like chunks but not block groups, extents or files 263 * 264 * uuid_mutex (global lock) 265 * ------------------------ 266 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 267 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 268 * device) or requested by the device= mount option 269 * 270 * the mutex can be very coarse and can cover long-running operations 271 * 272 * protects: updates to fs_devices counters like missing devices, rw devices, 273 * seeding, structure cloning, opening/closing devices at mount/umount time 274 * 275 * global::fs_devs - add, remove, updates to the global list 276 * 277 * does not protect: manipulation of the fs_devices::devices list in general 278 * but in mount context it could be used to exclude list modifications by eg. 279 * scan ioctl 280 * 281 * btrfs_device::name - renames (write side), read is RCU 282 * 283 * fs_devices::device_list_mutex (per-fs, with RCU) 284 * ------------------------------------------------ 285 * protects updates to fs_devices::devices, ie. adding and deleting 286 * 287 * simple list traversal with read-only actions can be done with RCU protection 288 * 289 * may be used to exclude some operations from running concurrently without any 290 * modifications to the list (see write_all_supers) 291 * 292 * Is not required at mount and close times, because our device list is 293 * protected by the uuid_mutex at that point. 294 * 295 * balance_mutex 296 * ------------- 297 * protects balance structures (status, state) and context accessed from 298 * several places (internally, ioctl) 299 * 300 * chunk_mutex 301 * ----------- 302 * protects chunks, adding or removing during allocation, trim or when a new 303 * device is added/removed. Additionally it also protects post_commit_list of 304 * individual devices, since they can be added to the transaction's 305 * post_commit_list only with chunk_mutex held. 306 * 307 * cleaner_mutex 308 * ------------- 309 * a big lock that is held by the cleaner thread and prevents running subvolume 310 * cleaning together with relocation or delayed iputs 311 * 312 * 313 * Lock nesting 314 * ============ 315 * 316 * uuid_mutex 317 * device_list_mutex 318 * chunk_mutex 319 * balance_mutex 320 * 321 * 322 * Exclusive operations 323 * ==================== 324 * 325 * Maintains the exclusivity of the following operations that apply to the 326 * whole filesystem and cannot run in parallel. 327 * 328 * - Balance (*) 329 * - Device add 330 * - Device remove 331 * - Device replace (*) 332 * - Resize 333 * 334 * The device operations (as above) can be in one of the following states: 335 * 336 * - Running state 337 * - Paused state 338 * - Completed state 339 * 340 * Only device operations marked with (*) can go into the Paused state for the 341 * following reasons: 342 * 343 * - ioctl (only Balance can be Paused through ioctl) 344 * - filesystem remounted as read-only 345 * - filesystem unmounted and mounted as read-only 346 * - system power-cycle and filesystem mounted as read-only 347 * - filesystem or device errors leading to forced read-only 348 * 349 * The status of exclusive operation is set and cleared atomically. 350 * During the course of Paused state, fs_info::exclusive_operation remains set. 351 * A device operation in Paused or Running state can be canceled or resumed 352 * either by ioctl (Balance only) or when remounted as read-write. 353 * The exclusive status is cleared when the device operation is canceled or 354 * completed. 355 */ 356 357 DEFINE_MUTEX(uuid_mutex); 358 static LIST_HEAD(fs_uuids); 359 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 360 { 361 return &fs_uuids; 362 } 363 364 /* 365 * alloc_fs_devices - allocate struct btrfs_fs_devices 366 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 367 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 368 * 369 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 370 * The returned struct is not linked onto any lists and can be destroyed with 371 * kfree() right away. 372 */ 373 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 374 const u8 *metadata_fsid) 375 { 376 struct btrfs_fs_devices *fs_devs; 377 378 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 379 if (!fs_devs) 380 return ERR_PTR(-ENOMEM); 381 382 mutex_init(&fs_devs->device_list_mutex); 383 384 INIT_LIST_HEAD(&fs_devs->devices); 385 INIT_LIST_HEAD(&fs_devs->alloc_list); 386 INIT_LIST_HEAD(&fs_devs->fs_list); 387 INIT_LIST_HEAD(&fs_devs->seed_list); 388 if (fsid) 389 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 390 391 if (metadata_fsid) 392 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 393 else if (fsid) 394 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 395 396 return fs_devs; 397 } 398 399 void btrfs_free_device(struct btrfs_device *device) 400 { 401 WARN_ON(!list_empty(&device->post_commit_list)); 402 rcu_string_free(device->name); 403 extent_io_tree_release(&device->alloc_state); 404 bio_put(device->flush_bio); 405 btrfs_destroy_dev_zone_info(device); 406 kfree(device); 407 } 408 409 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 410 { 411 struct btrfs_device *device; 412 WARN_ON(fs_devices->opened); 413 while (!list_empty(&fs_devices->devices)) { 414 device = list_entry(fs_devices->devices.next, 415 struct btrfs_device, dev_list); 416 list_del(&device->dev_list); 417 btrfs_free_device(device); 418 } 419 kfree(fs_devices); 420 } 421 422 void __exit btrfs_cleanup_fs_uuids(void) 423 { 424 struct btrfs_fs_devices *fs_devices; 425 426 while (!list_empty(&fs_uuids)) { 427 fs_devices = list_entry(fs_uuids.next, 428 struct btrfs_fs_devices, fs_list); 429 list_del(&fs_devices->fs_list); 430 free_fs_devices(fs_devices); 431 } 432 } 433 434 static noinline struct btrfs_fs_devices *find_fsid( 435 const u8 *fsid, const u8 *metadata_fsid) 436 { 437 struct btrfs_fs_devices *fs_devices; 438 439 ASSERT(fsid); 440 441 /* Handle non-split brain cases */ 442 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 443 if (metadata_fsid) { 444 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 445 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 446 BTRFS_FSID_SIZE) == 0) 447 return fs_devices; 448 } else { 449 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 450 return fs_devices; 451 } 452 } 453 return NULL; 454 } 455 456 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 457 struct btrfs_super_block *disk_super) 458 { 459 460 struct btrfs_fs_devices *fs_devices; 461 462 /* 463 * Handle scanned device having completed its fsid change but 464 * belonging to a fs_devices that was created by first scanning 465 * a device which didn't have its fsid/metadata_uuid changed 466 * at all and the CHANGING_FSID_V2 flag set. 467 */ 468 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 469 if (fs_devices->fsid_change && 470 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 471 BTRFS_FSID_SIZE) == 0 && 472 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 473 BTRFS_FSID_SIZE) == 0) { 474 return fs_devices; 475 } 476 } 477 /* 478 * Handle scanned device having completed its fsid change but 479 * belonging to a fs_devices that was created by a device that 480 * has an outdated pair of fsid/metadata_uuid and 481 * CHANGING_FSID_V2 flag set. 482 */ 483 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 484 if (fs_devices->fsid_change && 485 memcmp(fs_devices->metadata_uuid, 486 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 487 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 488 BTRFS_FSID_SIZE) == 0) { 489 return fs_devices; 490 } 491 } 492 493 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 494 } 495 496 497 static int 498 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 499 int flush, struct block_device **bdev, 500 struct btrfs_super_block **disk_super) 501 { 502 int ret; 503 504 *bdev = blkdev_get_by_path(device_path, flags, holder); 505 506 if (IS_ERR(*bdev)) { 507 ret = PTR_ERR(*bdev); 508 goto error; 509 } 510 511 if (flush) 512 sync_blockdev(*bdev); 513 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 514 if (ret) { 515 blkdev_put(*bdev, flags); 516 goto error; 517 } 518 invalidate_bdev(*bdev); 519 *disk_super = btrfs_read_dev_super(*bdev); 520 if (IS_ERR(*disk_super)) { 521 ret = PTR_ERR(*disk_super); 522 blkdev_put(*bdev, flags); 523 goto error; 524 } 525 526 return 0; 527 528 error: 529 *bdev = NULL; 530 return ret; 531 } 532 533 static bool device_path_matched(const char *path, struct btrfs_device *device) 534 { 535 int found; 536 537 rcu_read_lock(); 538 found = strcmp(rcu_str_deref(device->name), path); 539 rcu_read_unlock(); 540 541 return found == 0; 542 } 543 544 /* 545 * Search and remove all stale (devices which are not mounted) devices. 546 * When both inputs are NULL, it will search and release all stale devices. 547 * path: Optional. When provided will it release all unmounted devices 548 * matching this path only. 549 * skip_dev: Optional. Will skip this device when searching for the stale 550 * devices. 551 * Return: 0 for success or if @path is NULL. 552 * -EBUSY if @path is a mounted device. 553 * -ENOENT if @path does not match any device in the list. 554 */ 555 static int btrfs_free_stale_devices(const char *path, 556 struct btrfs_device *skip_device) 557 { 558 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 559 struct btrfs_device *device, *tmp_device; 560 int ret = 0; 561 562 lockdep_assert_held(&uuid_mutex); 563 564 if (path) 565 ret = -ENOENT; 566 567 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 568 569 mutex_lock(&fs_devices->device_list_mutex); 570 list_for_each_entry_safe(device, tmp_device, 571 &fs_devices->devices, dev_list) { 572 if (skip_device && skip_device == device) 573 continue; 574 if (path && !device->name) 575 continue; 576 if (path && !device_path_matched(path, device)) 577 continue; 578 if (fs_devices->opened) { 579 /* for an already deleted device return 0 */ 580 if (path && ret != 0) 581 ret = -EBUSY; 582 break; 583 } 584 585 /* delete the stale device */ 586 fs_devices->num_devices--; 587 list_del(&device->dev_list); 588 btrfs_free_device(device); 589 590 ret = 0; 591 } 592 mutex_unlock(&fs_devices->device_list_mutex); 593 594 if (fs_devices->num_devices == 0) { 595 btrfs_sysfs_remove_fsid(fs_devices); 596 list_del(&fs_devices->fs_list); 597 free_fs_devices(fs_devices); 598 } 599 } 600 601 return ret; 602 } 603 604 /* 605 * This is only used on mount, and we are protected from competing things 606 * messing with our fs_devices by the uuid_mutex, thus we do not need the 607 * fs_devices->device_list_mutex here. 608 */ 609 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 610 struct btrfs_device *device, fmode_t flags, 611 void *holder) 612 { 613 struct request_queue *q; 614 struct block_device *bdev; 615 struct btrfs_super_block *disk_super; 616 u64 devid; 617 int ret; 618 619 if (device->bdev) 620 return -EINVAL; 621 if (!device->name) 622 return -EINVAL; 623 624 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 625 &bdev, &disk_super); 626 if (ret) 627 return ret; 628 629 devid = btrfs_stack_device_id(&disk_super->dev_item); 630 if (devid != device->devid) 631 goto error_free_page; 632 633 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 634 goto error_free_page; 635 636 device->generation = btrfs_super_generation(disk_super); 637 638 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 639 if (btrfs_super_incompat_flags(disk_super) & 640 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 641 pr_err( 642 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 643 goto error_free_page; 644 } 645 646 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 647 fs_devices->seeding = true; 648 } else { 649 if (bdev_read_only(bdev)) 650 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 651 else 652 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 653 } 654 655 q = bdev_get_queue(bdev); 656 if (!blk_queue_nonrot(q)) 657 fs_devices->rotating = true; 658 659 device->bdev = bdev; 660 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 661 device->mode = flags; 662 663 fs_devices->open_devices++; 664 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 665 device->devid != BTRFS_DEV_REPLACE_DEVID) { 666 fs_devices->rw_devices++; 667 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 668 } 669 btrfs_release_disk_super(disk_super); 670 671 return 0; 672 673 error_free_page: 674 btrfs_release_disk_super(disk_super); 675 blkdev_put(bdev, flags); 676 677 return -EINVAL; 678 } 679 680 /* 681 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 682 * being created with a disk that has already completed its fsid change. Such 683 * disk can belong to an fs which has its FSID changed or to one which doesn't. 684 * Handle both cases here. 685 */ 686 static struct btrfs_fs_devices *find_fsid_inprogress( 687 struct btrfs_super_block *disk_super) 688 { 689 struct btrfs_fs_devices *fs_devices; 690 691 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 692 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 693 BTRFS_FSID_SIZE) != 0 && 694 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 695 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 696 return fs_devices; 697 } 698 } 699 700 return find_fsid(disk_super->fsid, NULL); 701 } 702 703 704 static struct btrfs_fs_devices *find_fsid_changed( 705 struct btrfs_super_block *disk_super) 706 { 707 struct btrfs_fs_devices *fs_devices; 708 709 /* 710 * Handles the case where scanned device is part of an fs that had 711 * multiple successful changes of FSID but currently device didn't 712 * observe it. Meaning our fsid will be different than theirs. We need 713 * to handle two subcases : 714 * 1 - The fs still continues to have different METADATA/FSID uuids. 715 * 2 - The fs is switched back to its original FSID (METADATA/FSID 716 * are equal). 717 */ 718 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 719 /* Changed UUIDs */ 720 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 721 BTRFS_FSID_SIZE) != 0 && 722 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 723 BTRFS_FSID_SIZE) == 0 && 724 memcmp(fs_devices->fsid, disk_super->fsid, 725 BTRFS_FSID_SIZE) != 0) 726 return fs_devices; 727 728 /* Unchanged UUIDs */ 729 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 730 BTRFS_FSID_SIZE) == 0 && 731 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 732 BTRFS_FSID_SIZE) == 0) 733 return fs_devices; 734 } 735 736 return NULL; 737 } 738 739 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 740 struct btrfs_super_block *disk_super) 741 { 742 struct btrfs_fs_devices *fs_devices; 743 744 /* 745 * Handle the case where the scanned device is part of an fs whose last 746 * metadata UUID change reverted it to the original FSID. At the same 747 * time * fs_devices was first created by another constitutent device 748 * which didn't fully observe the operation. This results in an 749 * btrfs_fs_devices created with metadata/fsid different AND 750 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 751 * fs_devices equal to the FSID of the disk. 752 */ 753 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 754 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 755 BTRFS_FSID_SIZE) != 0 && 756 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 757 BTRFS_FSID_SIZE) == 0 && 758 fs_devices->fsid_change) 759 return fs_devices; 760 } 761 762 return NULL; 763 } 764 /* 765 * Add new device to list of registered devices 766 * 767 * Returns: 768 * device pointer which was just added or updated when successful 769 * error pointer when failed 770 */ 771 static noinline struct btrfs_device *device_list_add(const char *path, 772 struct btrfs_super_block *disk_super, 773 bool *new_device_added) 774 { 775 struct btrfs_device *device; 776 struct btrfs_fs_devices *fs_devices = NULL; 777 struct rcu_string *name; 778 u64 found_transid = btrfs_super_generation(disk_super); 779 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 780 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 781 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 782 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 783 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 784 785 if (fsid_change_in_progress) { 786 if (!has_metadata_uuid) 787 fs_devices = find_fsid_inprogress(disk_super); 788 else 789 fs_devices = find_fsid_changed(disk_super); 790 } else if (has_metadata_uuid) { 791 fs_devices = find_fsid_with_metadata_uuid(disk_super); 792 } else { 793 fs_devices = find_fsid_reverted_metadata(disk_super); 794 if (!fs_devices) 795 fs_devices = find_fsid(disk_super->fsid, NULL); 796 } 797 798 799 if (!fs_devices) { 800 if (has_metadata_uuid) 801 fs_devices = alloc_fs_devices(disk_super->fsid, 802 disk_super->metadata_uuid); 803 else 804 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 805 806 if (IS_ERR(fs_devices)) 807 return ERR_CAST(fs_devices); 808 809 fs_devices->fsid_change = fsid_change_in_progress; 810 811 mutex_lock(&fs_devices->device_list_mutex); 812 list_add(&fs_devices->fs_list, &fs_uuids); 813 814 device = NULL; 815 } else { 816 struct btrfs_dev_lookup_args args = { 817 .devid = devid, 818 .uuid = disk_super->dev_item.uuid, 819 }; 820 821 mutex_lock(&fs_devices->device_list_mutex); 822 device = btrfs_find_device(fs_devices, &args); 823 824 /* 825 * If this disk has been pulled into an fs devices created by 826 * a device which had the CHANGING_FSID_V2 flag then replace the 827 * metadata_uuid/fsid values of the fs_devices. 828 */ 829 if (fs_devices->fsid_change && 830 found_transid > fs_devices->latest_generation) { 831 memcpy(fs_devices->fsid, disk_super->fsid, 832 BTRFS_FSID_SIZE); 833 834 if (has_metadata_uuid) 835 memcpy(fs_devices->metadata_uuid, 836 disk_super->metadata_uuid, 837 BTRFS_FSID_SIZE); 838 else 839 memcpy(fs_devices->metadata_uuid, 840 disk_super->fsid, BTRFS_FSID_SIZE); 841 842 fs_devices->fsid_change = false; 843 } 844 } 845 846 if (!device) { 847 if (fs_devices->opened) { 848 mutex_unlock(&fs_devices->device_list_mutex); 849 return ERR_PTR(-EBUSY); 850 } 851 852 device = btrfs_alloc_device(NULL, &devid, 853 disk_super->dev_item.uuid); 854 if (IS_ERR(device)) { 855 mutex_unlock(&fs_devices->device_list_mutex); 856 /* we can safely leave the fs_devices entry around */ 857 return device; 858 } 859 860 name = rcu_string_strdup(path, GFP_NOFS); 861 if (!name) { 862 btrfs_free_device(device); 863 mutex_unlock(&fs_devices->device_list_mutex); 864 return ERR_PTR(-ENOMEM); 865 } 866 rcu_assign_pointer(device->name, name); 867 868 list_add_rcu(&device->dev_list, &fs_devices->devices); 869 fs_devices->num_devices++; 870 871 device->fs_devices = fs_devices; 872 *new_device_added = true; 873 874 if (disk_super->label[0]) 875 pr_info( 876 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 877 disk_super->label, devid, found_transid, path, 878 current->comm, task_pid_nr(current)); 879 else 880 pr_info( 881 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 882 disk_super->fsid, devid, found_transid, path, 883 current->comm, task_pid_nr(current)); 884 885 } else if (!device->name || strcmp(device->name->str, path)) { 886 /* 887 * When FS is already mounted. 888 * 1. If you are here and if the device->name is NULL that 889 * means this device was missing at time of FS mount. 890 * 2. If you are here and if the device->name is different 891 * from 'path' that means either 892 * a. The same device disappeared and reappeared with 893 * different name. or 894 * b. The missing-disk-which-was-replaced, has 895 * reappeared now. 896 * 897 * We must allow 1 and 2a above. But 2b would be a spurious 898 * and unintentional. 899 * 900 * Further in case of 1 and 2a above, the disk at 'path' 901 * would have missed some transaction when it was away and 902 * in case of 2a the stale bdev has to be updated as well. 903 * 2b must not be allowed at all time. 904 */ 905 906 /* 907 * For now, we do allow update to btrfs_fs_device through the 908 * btrfs dev scan cli after FS has been mounted. We're still 909 * tracking a problem where systems fail mount by subvolume id 910 * when we reject replacement on a mounted FS. 911 */ 912 if (!fs_devices->opened && found_transid < device->generation) { 913 /* 914 * That is if the FS is _not_ mounted and if you 915 * are here, that means there is more than one 916 * disk with same uuid and devid.We keep the one 917 * with larger generation number or the last-in if 918 * generation are equal. 919 */ 920 mutex_unlock(&fs_devices->device_list_mutex); 921 return ERR_PTR(-EEXIST); 922 } 923 924 /* 925 * We are going to replace the device path for a given devid, 926 * make sure it's the same device if the device is mounted 927 */ 928 if (device->bdev) { 929 int error; 930 dev_t path_dev; 931 932 error = lookup_bdev(path, &path_dev); 933 if (error) { 934 mutex_unlock(&fs_devices->device_list_mutex); 935 return ERR_PTR(error); 936 } 937 938 if (device->bdev->bd_dev != path_dev) { 939 mutex_unlock(&fs_devices->device_list_mutex); 940 /* 941 * device->fs_info may not be reliable here, so 942 * pass in a NULL instead. This avoids a 943 * possible use-after-free when the fs_info and 944 * fs_info->sb are already torn down. 945 */ 946 btrfs_warn_in_rcu(NULL, 947 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 948 path, devid, found_transid, 949 current->comm, 950 task_pid_nr(current)); 951 return ERR_PTR(-EEXIST); 952 } 953 btrfs_info_in_rcu(device->fs_info, 954 "devid %llu device path %s changed to %s scanned by %s (%d)", 955 devid, rcu_str_deref(device->name), 956 path, current->comm, 957 task_pid_nr(current)); 958 } 959 960 name = rcu_string_strdup(path, GFP_NOFS); 961 if (!name) { 962 mutex_unlock(&fs_devices->device_list_mutex); 963 return ERR_PTR(-ENOMEM); 964 } 965 rcu_string_free(device->name); 966 rcu_assign_pointer(device->name, name); 967 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 968 fs_devices->missing_devices--; 969 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 970 } 971 } 972 973 /* 974 * Unmount does not free the btrfs_device struct but would zero 975 * generation along with most of the other members. So just update 976 * it back. We need it to pick the disk with largest generation 977 * (as above). 978 */ 979 if (!fs_devices->opened) { 980 device->generation = found_transid; 981 fs_devices->latest_generation = max_t(u64, found_transid, 982 fs_devices->latest_generation); 983 } 984 985 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 986 987 mutex_unlock(&fs_devices->device_list_mutex); 988 return device; 989 } 990 991 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 992 { 993 struct btrfs_fs_devices *fs_devices; 994 struct btrfs_device *device; 995 struct btrfs_device *orig_dev; 996 int ret = 0; 997 998 lockdep_assert_held(&uuid_mutex); 999 1000 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1001 if (IS_ERR(fs_devices)) 1002 return fs_devices; 1003 1004 fs_devices->total_devices = orig->total_devices; 1005 1006 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1007 struct rcu_string *name; 1008 1009 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1010 orig_dev->uuid); 1011 if (IS_ERR(device)) { 1012 ret = PTR_ERR(device); 1013 goto error; 1014 } 1015 1016 /* 1017 * This is ok to do without rcu read locked because we hold the 1018 * uuid mutex so nothing we touch in here is going to disappear. 1019 */ 1020 if (orig_dev->name) { 1021 name = rcu_string_strdup(orig_dev->name->str, 1022 GFP_KERNEL); 1023 if (!name) { 1024 btrfs_free_device(device); 1025 ret = -ENOMEM; 1026 goto error; 1027 } 1028 rcu_assign_pointer(device->name, name); 1029 } 1030 1031 list_add(&device->dev_list, &fs_devices->devices); 1032 device->fs_devices = fs_devices; 1033 fs_devices->num_devices++; 1034 } 1035 return fs_devices; 1036 error: 1037 free_fs_devices(fs_devices); 1038 return ERR_PTR(ret); 1039 } 1040 1041 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1042 struct btrfs_device **latest_dev) 1043 { 1044 struct btrfs_device *device, *next; 1045 1046 /* This is the initialized path, it is safe to release the devices. */ 1047 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1048 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1049 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1050 &device->dev_state) && 1051 !test_bit(BTRFS_DEV_STATE_MISSING, 1052 &device->dev_state) && 1053 (!*latest_dev || 1054 device->generation > (*latest_dev)->generation)) { 1055 *latest_dev = device; 1056 } 1057 continue; 1058 } 1059 1060 /* 1061 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1062 * in btrfs_init_dev_replace() so just continue. 1063 */ 1064 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1065 continue; 1066 1067 if (device->bdev) { 1068 blkdev_put(device->bdev, device->mode); 1069 device->bdev = NULL; 1070 fs_devices->open_devices--; 1071 } 1072 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1073 list_del_init(&device->dev_alloc_list); 1074 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1075 fs_devices->rw_devices--; 1076 } 1077 list_del_init(&device->dev_list); 1078 fs_devices->num_devices--; 1079 btrfs_free_device(device); 1080 } 1081 1082 } 1083 1084 /* 1085 * After we have read the system tree and know devids belonging to this 1086 * filesystem, remove the device which does not belong there. 1087 */ 1088 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1089 { 1090 struct btrfs_device *latest_dev = NULL; 1091 struct btrfs_fs_devices *seed_dev; 1092 1093 mutex_lock(&uuid_mutex); 1094 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1095 1096 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1097 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1098 1099 fs_devices->latest_dev = latest_dev; 1100 1101 mutex_unlock(&uuid_mutex); 1102 } 1103 1104 static void btrfs_close_bdev(struct btrfs_device *device) 1105 { 1106 if (!device->bdev) 1107 return; 1108 1109 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1110 sync_blockdev(device->bdev); 1111 invalidate_bdev(device->bdev); 1112 } 1113 1114 blkdev_put(device->bdev, device->mode); 1115 } 1116 1117 static void btrfs_close_one_device(struct btrfs_device *device) 1118 { 1119 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1120 1121 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1122 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1123 list_del_init(&device->dev_alloc_list); 1124 fs_devices->rw_devices--; 1125 } 1126 1127 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1128 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1129 1130 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1131 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1132 fs_devices->missing_devices--; 1133 } 1134 1135 btrfs_close_bdev(device); 1136 if (device->bdev) { 1137 fs_devices->open_devices--; 1138 device->bdev = NULL; 1139 } 1140 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1141 btrfs_destroy_dev_zone_info(device); 1142 1143 device->fs_info = NULL; 1144 atomic_set(&device->dev_stats_ccnt, 0); 1145 extent_io_tree_release(&device->alloc_state); 1146 1147 /* 1148 * Reset the flush error record. We might have a transient flush error 1149 * in this mount, and if so we aborted the current transaction and set 1150 * the fs to an error state, guaranteeing no super blocks can be further 1151 * committed. However that error might be transient and if we unmount the 1152 * filesystem and mount it again, we should allow the mount to succeed 1153 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1154 * filesystem again we still get flush errors, then we will again abort 1155 * any transaction and set the error state, guaranteeing no commits of 1156 * unsafe super blocks. 1157 */ 1158 device->last_flush_error = 0; 1159 1160 /* Verify the device is back in a pristine state */ 1161 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1162 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1163 ASSERT(list_empty(&device->dev_alloc_list)); 1164 ASSERT(list_empty(&device->post_commit_list)); 1165 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1166 } 1167 1168 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1169 { 1170 struct btrfs_device *device, *tmp; 1171 1172 lockdep_assert_held(&uuid_mutex); 1173 1174 if (--fs_devices->opened > 0) 1175 return; 1176 1177 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1178 btrfs_close_one_device(device); 1179 1180 WARN_ON(fs_devices->open_devices); 1181 WARN_ON(fs_devices->rw_devices); 1182 fs_devices->opened = 0; 1183 fs_devices->seeding = false; 1184 fs_devices->fs_info = NULL; 1185 } 1186 1187 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1188 { 1189 LIST_HEAD(list); 1190 struct btrfs_fs_devices *tmp; 1191 1192 mutex_lock(&uuid_mutex); 1193 close_fs_devices(fs_devices); 1194 if (!fs_devices->opened) 1195 list_splice_init(&fs_devices->seed_list, &list); 1196 1197 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1198 close_fs_devices(fs_devices); 1199 list_del(&fs_devices->seed_list); 1200 free_fs_devices(fs_devices); 1201 } 1202 mutex_unlock(&uuid_mutex); 1203 } 1204 1205 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1206 fmode_t flags, void *holder) 1207 { 1208 struct btrfs_device *device; 1209 struct btrfs_device *latest_dev = NULL; 1210 struct btrfs_device *tmp_device; 1211 1212 flags |= FMODE_EXCL; 1213 1214 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1215 dev_list) { 1216 int ret; 1217 1218 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1219 if (ret == 0 && 1220 (!latest_dev || device->generation > latest_dev->generation)) { 1221 latest_dev = device; 1222 } else if (ret == -ENODATA) { 1223 fs_devices->num_devices--; 1224 list_del(&device->dev_list); 1225 btrfs_free_device(device); 1226 } 1227 } 1228 if (fs_devices->open_devices == 0) 1229 return -EINVAL; 1230 1231 fs_devices->opened = 1; 1232 fs_devices->latest_dev = latest_dev; 1233 fs_devices->total_rw_bytes = 0; 1234 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1235 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1236 1237 return 0; 1238 } 1239 1240 static int devid_cmp(void *priv, const struct list_head *a, 1241 const struct list_head *b) 1242 { 1243 const struct btrfs_device *dev1, *dev2; 1244 1245 dev1 = list_entry(a, struct btrfs_device, dev_list); 1246 dev2 = list_entry(b, struct btrfs_device, dev_list); 1247 1248 if (dev1->devid < dev2->devid) 1249 return -1; 1250 else if (dev1->devid > dev2->devid) 1251 return 1; 1252 return 0; 1253 } 1254 1255 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1256 fmode_t flags, void *holder) 1257 { 1258 int ret; 1259 1260 lockdep_assert_held(&uuid_mutex); 1261 /* 1262 * The device_list_mutex cannot be taken here in case opening the 1263 * underlying device takes further locks like open_mutex. 1264 * 1265 * We also don't need the lock here as this is called during mount and 1266 * exclusion is provided by uuid_mutex 1267 */ 1268 1269 if (fs_devices->opened) { 1270 fs_devices->opened++; 1271 ret = 0; 1272 } else { 1273 list_sort(NULL, &fs_devices->devices, devid_cmp); 1274 ret = open_fs_devices(fs_devices, flags, holder); 1275 } 1276 1277 return ret; 1278 } 1279 1280 void btrfs_release_disk_super(struct btrfs_super_block *super) 1281 { 1282 struct page *page = virt_to_page(super); 1283 1284 put_page(page); 1285 } 1286 1287 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1288 u64 bytenr, u64 bytenr_orig) 1289 { 1290 struct btrfs_super_block *disk_super; 1291 struct page *page; 1292 void *p; 1293 pgoff_t index; 1294 1295 /* make sure our super fits in the device */ 1296 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1297 return ERR_PTR(-EINVAL); 1298 1299 /* make sure our super fits in the page */ 1300 if (sizeof(*disk_super) > PAGE_SIZE) 1301 return ERR_PTR(-EINVAL); 1302 1303 /* make sure our super doesn't straddle pages on disk */ 1304 index = bytenr >> PAGE_SHIFT; 1305 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1306 return ERR_PTR(-EINVAL); 1307 1308 /* pull in the page with our super */ 1309 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1310 1311 if (IS_ERR(page)) 1312 return ERR_CAST(page); 1313 1314 p = page_address(page); 1315 1316 /* align our pointer to the offset of the super block */ 1317 disk_super = p + offset_in_page(bytenr); 1318 1319 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1320 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1321 btrfs_release_disk_super(p); 1322 return ERR_PTR(-EINVAL); 1323 } 1324 1325 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1326 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1327 1328 return disk_super; 1329 } 1330 1331 int btrfs_forget_devices(const char *path) 1332 { 1333 int ret; 1334 1335 mutex_lock(&uuid_mutex); 1336 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1337 mutex_unlock(&uuid_mutex); 1338 1339 return ret; 1340 } 1341 1342 /* 1343 * Look for a btrfs signature on a device. This may be called out of the mount path 1344 * and we are not allowed to call set_blocksize during the scan. The superblock 1345 * is read via pagecache 1346 */ 1347 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1348 void *holder) 1349 { 1350 struct btrfs_super_block *disk_super; 1351 bool new_device_added = false; 1352 struct btrfs_device *device = NULL; 1353 struct block_device *bdev; 1354 u64 bytenr, bytenr_orig; 1355 int ret; 1356 1357 lockdep_assert_held(&uuid_mutex); 1358 1359 /* 1360 * we would like to check all the supers, but that would make 1361 * a btrfs mount succeed after a mkfs from a different FS. 1362 * So, we need to add a special mount option to scan for 1363 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1364 */ 1365 flags |= FMODE_EXCL; 1366 1367 bdev = blkdev_get_by_path(path, flags, holder); 1368 if (IS_ERR(bdev)) 1369 return ERR_CAST(bdev); 1370 1371 bytenr_orig = btrfs_sb_offset(0); 1372 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1373 if (ret) { 1374 device = ERR_PTR(ret); 1375 goto error_bdev_put; 1376 } 1377 1378 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1379 if (IS_ERR(disk_super)) { 1380 device = ERR_CAST(disk_super); 1381 goto error_bdev_put; 1382 } 1383 1384 device = device_list_add(path, disk_super, &new_device_added); 1385 if (!IS_ERR(device)) { 1386 if (new_device_added) 1387 btrfs_free_stale_devices(path, device); 1388 } 1389 1390 btrfs_release_disk_super(disk_super); 1391 1392 error_bdev_put: 1393 blkdev_put(bdev, flags); 1394 1395 return device; 1396 } 1397 1398 /* 1399 * Try to find a chunk that intersects [start, start + len] range and when one 1400 * such is found, record the end of it in *start 1401 */ 1402 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1403 u64 len) 1404 { 1405 u64 physical_start, physical_end; 1406 1407 lockdep_assert_held(&device->fs_info->chunk_mutex); 1408 1409 if (!find_first_extent_bit(&device->alloc_state, *start, 1410 &physical_start, &physical_end, 1411 CHUNK_ALLOCATED, NULL)) { 1412 1413 if (in_range(physical_start, *start, len) || 1414 in_range(*start, physical_start, 1415 physical_end - physical_start)) { 1416 *start = physical_end + 1; 1417 return true; 1418 } 1419 } 1420 return false; 1421 } 1422 1423 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1424 { 1425 switch (device->fs_devices->chunk_alloc_policy) { 1426 case BTRFS_CHUNK_ALLOC_REGULAR: 1427 /* 1428 * We don't want to overwrite the superblock on the drive nor 1429 * any area used by the boot loader (grub for example), so we 1430 * make sure to start at an offset of at least 1MB. 1431 */ 1432 return max_t(u64, start, SZ_1M); 1433 case BTRFS_CHUNK_ALLOC_ZONED: 1434 /* 1435 * We don't care about the starting region like regular 1436 * allocator, because we anyway use/reserve the first two zones 1437 * for superblock logging. 1438 */ 1439 return ALIGN(start, device->zone_info->zone_size); 1440 default: 1441 BUG(); 1442 } 1443 } 1444 1445 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1446 u64 *hole_start, u64 *hole_size, 1447 u64 num_bytes) 1448 { 1449 u64 zone_size = device->zone_info->zone_size; 1450 u64 pos; 1451 int ret; 1452 bool changed = false; 1453 1454 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1455 1456 while (*hole_size > 0) { 1457 pos = btrfs_find_allocatable_zones(device, *hole_start, 1458 *hole_start + *hole_size, 1459 num_bytes); 1460 if (pos != *hole_start) { 1461 *hole_size = *hole_start + *hole_size - pos; 1462 *hole_start = pos; 1463 changed = true; 1464 if (*hole_size < num_bytes) 1465 break; 1466 } 1467 1468 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1469 1470 /* Range is ensured to be empty */ 1471 if (!ret) 1472 return changed; 1473 1474 /* Given hole range was invalid (outside of device) */ 1475 if (ret == -ERANGE) { 1476 *hole_start += *hole_size; 1477 *hole_size = 0; 1478 return true; 1479 } 1480 1481 *hole_start += zone_size; 1482 *hole_size -= zone_size; 1483 changed = true; 1484 } 1485 1486 return changed; 1487 } 1488 1489 /** 1490 * dev_extent_hole_check - check if specified hole is suitable for allocation 1491 * @device: the device which we have the hole 1492 * @hole_start: starting position of the hole 1493 * @hole_size: the size of the hole 1494 * @num_bytes: the size of the free space that we need 1495 * 1496 * This function may modify @hole_start and @hole_size to reflect the suitable 1497 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1498 */ 1499 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1500 u64 *hole_size, u64 num_bytes) 1501 { 1502 bool changed = false; 1503 u64 hole_end = *hole_start + *hole_size; 1504 1505 for (;;) { 1506 /* 1507 * Check before we set max_hole_start, otherwise we could end up 1508 * sending back this offset anyway. 1509 */ 1510 if (contains_pending_extent(device, hole_start, *hole_size)) { 1511 if (hole_end >= *hole_start) 1512 *hole_size = hole_end - *hole_start; 1513 else 1514 *hole_size = 0; 1515 changed = true; 1516 } 1517 1518 switch (device->fs_devices->chunk_alloc_policy) { 1519 case BTRFS_CHUNK_ALLOC_REGULAR: 1520 /* No extra check */ 1521 break; 1522 case BTRFS_CHUNK_ALLOC_ZONED: 1523 if (dev_extent_hole_check_zoned(device, hole_start, 1524 hole_size, num_bytes)) { 1525 changed = true; 1526 /* 1527 * The changed hole can contain pending extent. 1528 * Loop again to check that. 1529 */ 1530 continue; 1531 } 1532 break; 1533 default: 1534 BUG(); 1535 } 1536 1537 break; 1538 } 1539 1540 return changed; 1541 } 1542 1543 /* 1544 * find_free_dev_extent_start - find free space in the specified device 1545 * @device: the device which we search the free space in 1546 * @num_bytes: the size of the free space that we need 1547 * @search_start: the position from which to begin the search 1548 * @start: store the start of the free space. 1549 * @len: the size of the free space. that we find, or the size 1550 * of the max free space if we don't find suitable free space 1551 * 1552 * this uses a pretty simple search, the expectation is that it is 1553 * called very infrequently and that a given device has a small number 1554 * of extents 1555 * 1556 * @start is used to store the start of the free space if we find. But if we 1557 * don't find suitable free space, it will be used to store the start position 1558 * of the max free space. 1559 * 1560 * @len is used to store the size of the free space that we find. 1561 * But if we don't find suitable free space, it is used to store the size of 1562 * the max free space. 1563 * 1564 * NOTE: This function will search *commit* root of device tree, and does extra 1565 * check to ensure dev extents are not double allocated. 1566 * This makes the function safe to allocate dev extents but may not report 1567 * correct usable device space, as device extent freed in current transaction 1568 * is not reported as available. 1569 */ 1570 static int find_free_dev_extent_start(struct btrfs_device *device, 1571 u64 num_bytes, u64 search_start, u64 *start, 1572 u64 *len) 1573 { 1574 struct btrfs_fs_info *fs_info = device->fs_info; 1575 struct btrfs_root *root = fs_info->dev_root; 1576 struct btrfs_key key; 1577 struct btrfs_dev_extent *dev_extent; 1578 struct btrfs_path *path; 1579 u64 hole_size; 1580 u64 max_hole_start; 1581 u64 max_hole_size; 1582 u64 extent_end; 1583 u64 search_end = device->total_bytes; 1584 int ret; 1585 int slot; 1586 struct extent_buffer *l; 1587 1588 search_start = dev_extent_search_start(device, search_start); 1589 1590 WARN_ON(device->zone_info && 1591 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1592 1593 path = btrfs_alloc_path(); 1594 if (!path) 1595 return -ENOMEM; 1596 1597 max_hole_start = search_start; 1598 max_hole_size = 0; 1599 1600 again: 1601 if (search_start >= search_end || 1602 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1603 ret = -ENOSPC; 1604 goto out; 1605 } 1606 1607 path->reada = READA_FORWARD; 1608 path->search_commit_root = 1; 1609 path->skip_locking = 1; 1610 1611 key.objectid = device->devid; 1612 key.offset = search_start; 1613 key.type = BTRFS_DEV_EXTENT_KEY; 1614 1615 ret = btrfs_search_backwards(root, &key, path); 1616 if (ret < 0) 1617 goto out; 1618 1619 while (1) { 1620 l = path->nodes[0]; 1621 slot = path->slots[0]; 1622 if (slot >= btrfs_header_nritems(l)) { 1623 ret = btrfs_next_leaf(root, path); 1624 if (ret == 0) 1625 continue; 1626 if (ret < 0) 1627 goto out; 1628 1629 break; 1630 } 1631 btrfs_item_key_to_cpu(l, &key, slot); 1632 1633 if (key.objectid < device->devid) 1634 goto next; 1635 1636 if (key.objectid > device->devid) 1637 break; 1638 1639 if (key.type != BTRFS_DEV_EXTENT_KEY) 1640 goto next; 1641 1642 if (key.offset > search_start) { 1643 hole_size = key.offset - search_start; 1644 dev_extent_hole_check(device, &search_start, &hole_size, 1645 num_bytes); 1646 1647 if (hole_size > max_hole_size) { 1648 max_hole_start = search_start; 1649 max_hole_size = hole_size; 1650 } 1651 1652 /* 1653 * If this free space is greater than which we need, 1654 * it must be the max free space that we have found 1655 * until now, so max_hole_start must point to the start 1656 * of this free space and the length of this free space 1657 * is stored in max_hole_size. Thus, we return 1658 * max_hole_start and max_hole_size and go back to the 1659 * caller. 1660 */ 1661 if (hole_size >= num_bytes) { 1662 ret = 0; 1663 goto out; 1664 } 1665 } 1666 1667 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1668 extent_end = key.offset + btrfs_dev_extent_length(l, 1669 dev_extent); 1670 if (extent_end > search_start) 1671 search_start = extent_end; 1672 next: 1673 path->slots[0]++; 1674 cond_resched(); 1675 } 1676 1677 /* 1678 * At this point, search_start should be the end of 1679 * allocated dev extents, and when shrinking the device, 1680 * search_end may be smaller than search_start. 1681 */ 1682 if (search_end > search_start) { 1683 hole_size = search_end - search_start; 1684 if (dev_extent_hole_check(device, &search_start, &hole_size, 1685 num_bytes)) { 1686 btrfs_release_path(path); 1687 goto again; 1688 } 1689 1690 if (hole_size > max_hole_size) { 1691 max_hole_start = search_start; 1692 max_hole_size = hole_size; 1693 } 1694 } 1695 1696 /* See above. */ 1697 if (max_hole_size < num_bytes) 1698 ret = -ENOSPC; 1699 else 1700 ret = 0; 1701 1702 out: 1703 btrfs_free_path(path); 1704 *start = max_hole_start; 1705 if (len) 1706 *len = max_hole_size; 1707 return ret; 1708 } 1709 1710 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1711 u64 *start, u64 *len) 1712 { 1713 /* FIXME use last free of some kind */ 1714 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1715 } 1716 1717 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1718 struct btrfs_device *device, 1719 u64 start, u64 *dev_extent_len) 1720 { 1721 struct btrfs_fs_info *fs_info = device->fs_info; 1722 struct btrfs_root *root = fs_info->dev_root; 1723 int ret; 1724 struct btrfs_path *path; 1725 struct btrfs_key key; 1726 struct btrfs_key found_key; 1727 struct extent_buffer *leaf = NULL; 1728 struct btrfs_dev_extent *extent = NULL; 1729 1730 path = btrfs_alloc_path(); 1731 if (!path) 1732 return -ENOMEM; 1733 1734 key.objectid = device->devid; 1735 key.offset = start; 1736 key.type = BTRFS_DEV_EXTENT_KEY; 1737 again: 1738 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1739 if (ret > 0) { 1740 ret = btrfs_previous_item(root, path, key.objectid, 1741 BTRFS_DEV_EXTENT_KEY); 1742 if (ret) 1743 goto out; 1744 leaf = path->nodes[0]; 1745 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1746 extent = btrfs_item_ptr(leaf, path->slots[0], 1747 struct btrfs_dev_extent); 1748 BUG_ON(found_key.offset > start || found_key.offset + 1749 btrfs_dev_extent_length(leaf, extent) < start); 1750 key = found_key; 1751 btrfs_release_path(path); 1752 goto again; 1753 } else if (ret == 0) { 1754 leaf = path->nodes[0]; 1755 extent = btrfs_item_ptr(leaf, path->slots[0], 1756 struct btrfs_dev_extent); 1757 } else { 1758 goto out; 1759 } 1760 1761 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1762 1763 ret = btrfs_del_item(trans, root, path); 1764 if (ret == 0) 1765 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1766 out: 1767 btrfs_free_path(path); 1768 return ret; 1769 } 1770 1771 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1772 { 1773 struct extent_map_tree *em_tree; 1774 struct extent_map *em; 1775 struct rb_node *n; 1776 u64 ret = 0; 1777 1778 em_tree = &fs_info->mapping_tree; 1779 read_lock(&em_tree->lock); 1780 n = rb_last(&em_tree->map.rb_root); 1781 if (n) { 1782 em = rb_entry(n, struct extent_map, rb_node); 1783 ret = em->start + em->len; 1784 } 1785 read_unlock(&em_tree->lock); 1786 1787 return ret; 1788 } 1789 1790 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1791 u64 *devid_ret) 1792 { 1793 int ret; 1794 struct btrfs_key key; 1795 struct btrfs_key found_key; 1796 struct btrfs_path *path; 1797 1798 path = btrfs_alloc_path(); 1799 if (!path) 1800 return -ENOMEM; 1801 1802 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1803 key.type = BTRFS_DEV_ITEM_KEY; 1804 key.offset = (u64)-1; 1805 1806 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1807 if (ret < 0) 1808 goto error; 1809 1810 if (ret == 0) { 1811 /* Corruption */ 1812 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1813 ret = -EUCLEAN; 1814 goto error; 1815 } 1816 1817 ret = btrfs_previous_item(fs_info->chunk_root, path, 1818 BTRFS_DEV_ITEMS_OBJECTID, 1819 BTRFS_DEV_ITEM_KEY); 1820 if (ret) { 1821 *devid_ret = 1; 1822 } else { 1823 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1824 path->slots[0]); 1825 *devid_ret = found_key.offset + 1; 1826 } 1827 ret = 0; 1828 error: 1829 btrfs_free_path(path); 1830 return ret; 1831 } 1832 1833 /* 1834 * the device information is stored in the chunk root 1835 * the btrfs_device struct should be fully filled in 1836 */ 1837 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1838 struct btrfs_device *device) 1839 { 1840 int ret; 1841 struct btrfs_path *path; 1842 struct btrfs_dev_item *dev_item; 1843 struct extent_buffer *leaf; 1844 struct btrfs_key key; 1845 unsigned long ptr; 1846 1847 path = btrfs_alloc_path(); 1848 if (!path) 1849 return -ENOMEM; 1850 1851 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1852 key.type = BTRFS_DEV_ITEM_KEY; 1853 key.offset = device->devid; 1854 1855 btrfs_reserve_chunk_metadata(trans, true); 1856 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1857 &key, sizeof(*dev_item)); 1858 btrfs_trans_release_chunk_metadata(trans); 1859 if (ret) 1860 goto out; 1861 1862 leaf = path->nodes[0]; 1863 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1864 1865 btrfs_set_device_id(leaf, dev_item, device->devid); 1866 btrfs_set_device_generation(leaf, dev_item, 0); 1867 btrfs_set_device_type(leaf, dev_item, device->type); 1868 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1869 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1870 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1871 btrfs_set_device_total_bytes(leaf, dev_item, 1872 btrfs_device_get_disk_total_bytes(device)); 1873 btrfs_set_device_bytes_used(leaf, dev_item, 1874 btrfs_device_get_bytes_used(device)); 1875 btrfs_set_device_group(leaf, dev_item, 0); 1876 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1877 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1878 btrfs_set_device_start_offset(leaf, dev_item, 0); 1879 1880 ptr = btrfs_device_uuid(dev_item); 1881 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1882 ptr = btrfs_device_fsid(dev_item); 1883 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1884 ptr, BTRFS_FSID_SIZE); 1885 btrfs_mark_buffer_dirty(leaf); 1886 1887 ret = 0; 1888 out: 1889 btrfs_free_path(path); 1890 return ret; 1891 } 1892 1893 /* 1894 * Function to update ctime/mtime for a given device path. 1895 * Mainly used for ctime/mtime based probe like libblkid. 1896 * 1897 * We don't care about errors here, this is just to be kind to userspace. 1898 */ 1899 static void update_dev_time(const char *device_path) 1900 { 1901 struct path path; 1902 struct timespec64 now; 1903 int ret; 1904 1905 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1906 if (ret) 1907 return; 1908 1909 now = current_time(d_inode(path.dentry)); 1910 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1911 path_put(&path); 1912 } 1913 1914 static int btrfs_rm_dev_item(struct btrfs_device *device) 1915 { 1916 struct btrfs_root *root = device->fs_info->chunk_root; 1917 int ret; 1918 struct btrfs_path *path; 1919 struct btrfs_key key; 1920 struct btrfs_trans_handle *trans; 1921 1922 path = btrfs_alloc_path(); 1923 if (!path) 1924 return -ENOMEM; 1925 1926 trans = btrfs_start_transaction(root, 0); 1927 if (IS_ERR(trans)) { 1928 btrfs_free_path(path); 1929 return PTR_ERR(trans); 1930 } 1931 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1932 key.type = BTRFS_DEV_ITEM_KEY; 1933 key.offset = device->devid; 1934 1935 btrfs_reserve_chunk_metadata(trans, false); 1936 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1937 btrfs_trans_release_chunk_metadata(trans); 1938 if (ret) { 1939 if (ret > 0) 1940 ret = -ENOENT; 1941 btrfs_abort_transaction(trans, ret); 1942 btrfs_end_transaction(trans); 1943 goto out; 1944 } 1945 1946 ret = btrfs_del_item(trans, root, path); 1947 if (ret) { 1948 btrfs_abort_transaction(trans, ret); 1949 btrfs_end_transaction(trans); 1950 } 1951 1952 out: 1953 btrfs_free_path(path); 1954 if (!ret) 1955 ret = btrfs_commit_transaction(trans); 1956 return ret; 1957 } 1958 1959 /* 1960 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1961 * filesystem. It's up to the caller to adjust that number regarding eg. device 1962 * replace. 1963 */ 1964 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1965 u64 num_devices) 1966 { 1967 u64 all_avail; 1968 unsigned seq; 1969 int i; 1970 1971 do { 1972 seq = read_seqbegin(&fs_info->profiles_lock); 1973 1974 all_avail = fs_info->avail_data_alloc_bits | 1975 fs_info->avail_system_alloc_bits | 1976 fs_info->avail_metadata_alloc_bits; 1977 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1978 1979 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1980 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1981 continue; 1982 1983 if (num_devices < btrfs_raid_array[i].devs_min) 1984 return btrfs_raid_array[i].mindev_error; 1985 } 1986 1987 return 0; 1988 } 1989 1990 static struct btrfs_device * btrfs_find_next_active_device( 1991 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1992 { 1993 struct btrfs_device *next_device; 1994 1995 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1996 if (next_device != device && 1997 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1998 && next_device->bdev) 1999 return next_device; 2000 } 2001 2002 return NULL; 2003 } 2004 2005 /* 2006 * Helper function to check if the given device is part of s_bdev / latest_dev 2007 * and replace it with the provided or the next active device, in the context 2008 * where this function called, there should be always be another device (or 2009 * this_dev) which is active. 2010 */ 2011 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2012 struct btrfs_device *next_device) 2013 { 2014 struct btrfs_fs_info *fs_info = device->fs_info; 2015 2016 if (!next_device) 2017 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2018 device); 2019 ASSERT(next_device); 2020 2021 if (fs_info->sb->s_bdev && 2022 (fs_info->sb->s_bdev == device->bdev)) 2023 fs_info->sb->s_bdev = next_device->bdev; 2024 2025 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2026 fs_info->fs_devices->latest_dev = next_device; 2027 } 2028 2029 /* 2030 * Return btrfs_fs_devices::num_devices excluding the device that's being 2031 * currently replaced. 2032 */ 2033 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2034 { 2035 u64 num_devices = fs_info->fs_devices->num_devices; 2036 2037 down_read(&fs_info->dev_replace.rwsem); 2038 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2039 ASSERT(num_devices > 1); 2040 num_devices--; 2041 } 2042 up_read(&fs_info->dev_replace.rwsem); 2043 2044 return num_devices; 2045 } 2046 2047 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2048 struct block_device *bdev, 2049 const char *device_path) 2050 { 2051 struct btrfs_super_block *disk_super; 2052 int copy_num; 2053 2054 if (!bdev) 2055 return; 2056 2057 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2058 struct page *page; 2059 int ret; 2060 2061 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2062 if (IS_ERR(disk_super)) 2063 continue; 2064 2065 if (bdev_is_zoned(bdev)) { 2066 btrfs_reset_sb_log_zones(bdev, copy_num); 2067 continue; 2068 } 2069 2070 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2071 2072 page = virt_to_page(disk_super); 2073 set_page_dirty(page); 2074 lock_page(page); 2075 /* write_on_page() unlocks the page */ 2076 ret = write_one_page(page); 2077 if (ret) 2078 btrfs_warn(fs_info, 2079 "error clearing superblock number %d (%d)", 2080 copy_num, ret); 2081 btrfs_release_disk_super(disk_super); 2082 2083 } 2084 2085 /* Notify udev that device has changed */ 2086 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2087 2088 /* Update ctime/mtime for device path for libblkid */ 2089 update_dev_time(device_path); 2090 } 2091 2092 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2093 struct btrfs_dev_lookup_args *args, 2094 struct block_device **bdev, fmode_t *mode) 2095 { 2096 struct btrfs_device *device; 2097 struct btrfs_fs_devices *cur_devices; 2098 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2099 u64 num_devices; 2100 int ret = 0; 2101 2102 /* 2103 * The device list in fs_devices is accessed without locks (neither 2104 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2105 * filesystem and another device rm cannot run. 2106 */ 2107 num_devices = btrfs_num_devices(fs_info); 2108 2109 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2110 if (ret) 2111 goto out; 2112 2113 device = btrfs_find_device(fs_info->fs_devices, args); 2114 if (!device) { 2115 if (args->missing) 2116 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2117 else 2118 ret = -ENOENT; 2119 goto out; 2120 } 2121 2122 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2123 btrfs_warn_in_rcu(fs_info, 2124 "cannot remove device %s (devid %llu) due to active swapfile", 2125 rcu_str_deref(device->name), device->devid); 2126 ret = -ETXTBSY; 2127 goto out; 2128 } 2129 2130 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2131 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2132 goto out; 2133 } 2134 2135 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2136 fs_info->fs_devices->rw_devices == 1) { 2137 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2138 goto out; 2139 } 2140 2141 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2142 mutex_lock(&fs_info->chunk_mutex); 2143 list_del_init(&device->dev_alloc_list); 2144 device->fs_devices->rw_devices--; 2145 mutex_unlock(&fs_info->chunk_mutex); 2146 } 2147 2148 ret = btrfs_shrink_device(device, 0); 2149 if (!ret) 2150 btrfs_reada_remove_dev(device); 2151 if (ret) 2152 goto error_undo; 2153 2154 /* 2155 * TODO: the superblock still includes this device in its num_devices 2156 * counter although write_all_supers() is not locked out. This 2157 * could give a filesystem state which requires a degraded mount. 2158 */ 2159 ret = btrfs_rm_dev_item(device); 2160 if (ret) 2161 goto error_undo; 2162 2163 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2164 btrfs_scrub_cancel_dev(device); 2165 2166 /* 2167 * the device list mutex makes sure that we don't change 2168 * the device list while someone else is writing out all 2169 * the device supers. Whoever is writing all supers, should 2170 * lock the device list mutex before getting the number of 2171 * devices in the super block (super_copy). Conversely, 2172 * whoever updates the number of devices in the super block 2173 * (super_copy) should hold the device list mutex. 2174 */ 2175 2176 /* 2177 * In normal cases the cur_devices == fs_devices. But in case 2178 * of deleting a seed device, the cur_devices should point to 2179 * its own fs_devices listed under the fs_devices->seed_list. 2180 */ 2181 cur_devices = device->fs_devices; 2182 mutex_lock(&fs_devices->device_list_mutex); 2183 list_del_rcu(&device->dev_list); 2184 2185 cur_devices->num_devices--; 2186 cur_devices->total_devices--; 2187 /* Update total_devices of the parent fs_devices if it's seed */ 2188 if (cur_devices != fs_devices) 2189 fs_devices->total_devices--; 2190 2191 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2192 cur_devices->missing_devices--; 2193 2194 btrfs_assign_next_active_device(device, NULL); 2195 2196 if (device->bdev) { 2197 cur_devices->open_devices--; 2198 /* remove sysfs entry */ 2199 btrfs_sysfs_remove_device(device); 2200 } 2201 2202 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2203 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2204 mutex_unlock(&fs_devices->device_list_mutex); 2205 2206 /* 2207 * At this point, the device is zero sized and detached from the 2208 * devices list. All that's left is to zero out the old supers and 2209 * free the device. 2210 * 2211 * We cannot call btrfs_close_bdev() here because we're holding the sb 2212 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2213 * block device and it's dependencies. Instead just flush the device 2214 * and let the caller do the final blkdev_put. 2215 */ 2216 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2217 btrfs_scratch_superblocks(fs_info, device->bdev, 2218 device->name->str); 2219 if (device->bdev) { 2220 sync_blockdev(device->bdev); 2221 invalidate_bdev(device->bdev); 2222 } 2223 } 2224 2225 *bdev = device->bdev; 2226 *mode = device->mode; 2227 synchronize_rcu(); 2228 btrfs_free_device(device); 2229 2230 /* 2231 * This can happen if cur_devices is the private seed devices list. We 2232 * cannot call close_fs_devices() here because it expects the uuid_mutex 2233 * to be held, but in fact we don't need that for the private 2234 * seed_devices, we can simply decrement cur_devices->opened and then 2235 * remove it from our list and free the fs_devices. 2236 */ 2237 if (cur_devices->num_devices == 0) { 2238 list_del_init(&cur_devices->seed_list); 2239 ASSERT(cur_devices->opened == 1); 2240 cur_devices->opened--; 2241 free_fs_devices(cur_devices); 2242 } 2243 2244 out: 2245 return ret; 2246 2247 error_undo: 2248 btrfs_reada_undo_remove_dev(device); 2249 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2250 mutex_lock(&fs_info->chunk_mutex); 2251 list_add(&device->dev_alloc_list, 2252 &fs_devices->alloc_list); 2253 device->fs_devices->rw_devices++; 2254 mutex_unlock(&fs_info->chunk_mutex); 2255 } 2256 goto out; 2257 } 2258 2259 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2260 { 2261 struct btrfs_fs_devices *fs_devices; 2262 2263 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2264 2265 /* 2266 * in case of fs with no seed, srcdev->fs_devices will point 2267 * to fs_devices of fs_info. However when the dev being replaced is 2268 * a seed dev it will point to the seed's local fs_devices. In short 2269 * srcdev will have its correct fs_devices in both the cases. 2270 */ 2271 fs_devices = srcdev->fs_devices; 2272 2273 list_del_rcu(&srcdev->dev_list); 2274 list_del(&srcdev->dev_alloc_list); 2275 fs_devices->num_devices--; 2276 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2277 fs_devices->missing_devices--; 2278 2279 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2280 fs_devices->rw_devices--; 2281 2282 if (srcdev->bdev) 2283 fs_devices->open_devices--; 2284 } 2285 2286 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2287 { 2288 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2289 2290 mutex_lock(&uuid_mutex); 2291 2292 btrfs_close_bdev(srcdev); 2293 synchronize_rcu(); 2294 btrfs_free_device(srcdev); 2295 2296 /* if this is no devs we rather delete the fs_devices */ 2297 if (!fs_devices->num_devices) { 2298 /* 2299 * On a mounted FS, num_devices can't be zero unless it's a 2300 * seed. In case of a seed device being replaced, the replace 2301 * target added to the sprout FS, so there will be no more 2302 * device left under the seed FS. 2303 */ 2304 ASSERT(fs_devices->seeding); 2305 2306 list_del_init(&fs_devices->seed_list); 2307 close_fs_devices(fs_devices); 2308 free_fs_devices(fs_devices); 2309 } 2310 mutex_unlock(&uuid_mutex); 2311 } 2312 2313 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2314 { 2315 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2316 2317 mutex_lock(&fs_devices->device_list_mutex); 2318 2319 btrfs_sysfs_remove_device(tgtdev); 2320 2321 if (tgtdev->bdev) 2322 fs_devices->open_devices--; 2323 2324 fs_devices->num_devices--; 2325 2326 btrfs_assign_next_active_device(tgtdev, NULL); 2327 2328 list_del_rcu(&tgtdev->dev_list); 2329 2330 mutex_unlock(&fs_devices->device_list_mutex); 2331 2332 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2333 tgtdev->name->str); 2334 2335 btrfs_close_bdev(tgtdev); 2336 synchronize_rcu(); 2337 btrfs_free_device(tgtdev); 2338 } 2339 2340 /** 2341 * Populate args from device at path 2342 * 2343 * @fs_info: the filesystem 2344 * @args: the args to populate 2345 * @path: the path to the device 2346 * 2347 * This will read the super block of the device at @path and populate @args with 2348 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2349 * lookup a device to operate on, but need to do it before we take any locks. 2350 * This properly handles the special case of "missing" that a user may pass in, 2351 * and does some basic sanity checks. The caller must make sure that @path is 2352 * properly NUL terminated before calling in, and must call 2353 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2354 * uuid buffers. 2355 * 2356 * Return: 0 for success, -errno for failure 2357 */ 2358 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2359 struct btrfs_dev_lookup_args *args, 2360 const char *path) 2361 { 2362 struct btrfs_super_block *disk_super; 2363 struct block_device *bdev; 2364 int ret; 2365 2366 if (!path || !path[0]) 2367 return -EINVAL; 2368 if (!strcmp(path, "missing")) { 2369 args->missing = true; 2370 return 0; 2371 } 2372 2373 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2374 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2375 if (!args->uuid || !args->fsid) { 2376 btrfs_put_dev_args_from_path(args); 2377 return -ENOMEM; 2378 } 2379 2380 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2381 &bdev, &disk_super); 2382 if (ret) 2383 return ret; 2384 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2385 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2386 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2387 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2388 else 2389 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2390 btrfs_release_disk_super(disk_super); 2391 blkdev_put(bdev, FMODE_READ); 2392 return 0; 2393 } 2394 2395 /* 2396 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2397 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2398 * that don't need to be freed. 2399 */ 2400 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2401 { 2402 kfree(args->uuid); 2403 kfree(args->fsid); 2404 args->uuid = NULL; 2405 args->fsid = NULL; 2406 } 2407 2408 struct btrfs_device *btrfs_find_device_by_devspec( 2409 struct btrfs_fs_info *fs_info, u64 devid, 2410 const char *device_path) 2411 { 2412 BTRFS_DEV_LOOKUP_ARGS(args); 2413 struct btrfs_device *device; 2414 int ret; 2415 2416 if (devid) { 2417 args.devid = devid; 2418 device = btrfs_find_device(fs_info->fs_devices, &args); 2419 if (!device) 2420 return ERR_PTR(-ENOENT); 2421 return device; 2422 } 2423 2424 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2425 if (ret) 2426 return ERR_PTR(ret); 2427 device = btrfs_find_device(fs_info->fs_devices, &args); 2428 btrfs_put_dev_args_from_path(&args); 2429 if (!device) 2430 return ERR_PTR(-ENOENT); 2431 return device; 2432 } 2433 2434 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2435 { 2436 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2437 struct btrfs_fs_devices *old_devices; 2438 struct btrfs_fs_devices *seed_devices; 2439 2440 lockdep_assert_held(&uuid_mutex); 2441 if (!fs_devices->seeding) 2442 return ERR_PTR(-EINVAL); 2443 2444 /* 2445 * Private copy of the seed devices, anchored at 2446 * fs_info->fs_devices->seed_list 2447 */ 2448 seed_devices = alloc_fs_devices(NULL, NULL); 2449 if (IS_ERR(seed_devices)) 2450 return seed_devices; 2451 2452 /* 2453 * It's necessary to retain a copy of the original seed fs_devices in 2454 * fs_uuids so that filesystems which have been seeded can successfully 2455 * reference the seed device from open_seed_devices. This also supports 2456 * multiple fs seed. 2457 */ 2458 old_devices = clone_fs_devices(fs_devices); 2459 if (IS_ERR(old_devices)) { 2460 kfree(seed_devices); 2461 return old_devices; 2462 } 2463 2464 list_add(&old_devices->fs_list, &fs_uuids); 2465 2466 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2467 seed_devices->opened = 1; 2468 INIT_LIST_HEAD(&seed_devices->devices); 2469 INIT_LIST_HEAD(&seed_devices->alloc_list); 2470 mutex_init(&seed_devices->device_list_mutex); 2471 2472 return seed_devices; 2473 } 2474 2475 /* 2476 * Splice seed devices into the sprout fs_devices. 2477 * Generate a new fsid for the sprouted read-write filesystem. 2478 */ 2479 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2480 struct btrfs_fs_devices *seed_devices) 2481 { 2482 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2483 struct btrfs_super_block *disk_super = fs_info->super_copy; 2484 struct btrfs_device *device; 2485 u64 super_flags; 2486 2487 /* 2488 * We are updating the fsid, the thread leading to device_list_add() 2489 * could race, so uuid_mutex is needed. 2490 */ 2491 lockdep_assert_held(&uuid_mutex); 2492 2493 /* 2494 * The threads listed below may traverse dev_list but can do that without 2495 * device_list_mutex: 2496 * - All device ops and balance - as we are in btrfs_exclop_start. 2497 * - Various dev_list readers - are using RCU. 2498 * - btrfs_ioctl_fitrim() - is using RCU. 2499 * 2500 * For-read threads as below are using device_list_mutex: 2501 * - Readonly scrub btrfs_scrub_dev() 2502 * - Readonly scrub btrfs_scrub_progress() 2503 * - btrfs_get_dev_stats() 2504 */ 2505 lockdep_assert_held(&fs_devices->device_list_mutex); 2506 2507 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2508 synchronize_rcu); 2509 list_for_each_entry(device, &seed_devices->devices, dev_list) 2510 device->fs_devices = seed_devices; 2511 2512 fs_devices->seeding = false; 2513 fs_devices->num_devices = 0; 2514 fs_devices->open_devices = 0; 2515 fs_devices->missing_devices = 0; 2516 fs_devices->rotating = false; 2517 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2518 2519 generate_random_uuid(fs_devices->fsid); 2520 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2521 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2522 2523 super_flags = btrfs_super_flags(disk_super) & 2524 ~BTRFS_SUPER_FLAG_SEEDING; 2525 btrfs_set_super_flags(disk_super, super_flags); 2526 } 2527 2528 /* 2529 * Store the expected generation for seed devices in device items. 2530 */ 2531 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2532 { 2533 BTRFS_DEV_LOOKUP_ARGS(args); 2534 struct btrfs_fs_info *fs_info = trans->fs_info; 2535 struct btrfs_root *root = fs_info->chunk_root; 2536 struct btrfs_path *path; 2537 struct extent_buffer *leaf; 2538 struct btrfs_dev_item *dev_item; 2539 struct btrfs_device *device; 2540 struct btrfs_key key; 2541 u8 fs_uuid[BTRFS_FSID_SIZE]; 2542 u8 dev_uuid[BTRFS_UUID_SIZE]; 2543 int ret; 2544 2545 path = btrfs_alloc_path(); 2546 if (!path) 2547 return -ENOMEM; 2548 2549 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2550 key.offset = 0; 2551 key.type = BTRFS_DEV_ITEM_KEY; 2552 2553 while (1) { 2554 btrfs_reserve_chunk_metadata(trans, false); 2555 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2556 btrfs_trans_release_chunk_metadata(trans); 2557 if (ret < 0) 2558 goto error; 2559 2560 leaf = path->nodes[0]; 2561 next_slot: 2562 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2563 ret = btrfs_next_leaf(root, path); 2564 if (ret > 0) 2565 break; 2566 if (ret < 0) 2567 goto error; 2568 leaf = path->nodes[0]; 2569 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2570 btrfs_release_path(path); 2571 continue; 2572 } 2573 2574 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2575 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2576 key.type != BTRFS_DEV_ITEM_KEY) 2577 break; 2578 2579 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2580 struct btrfs_dev_item); 2581 args.devid = btrfs_device_id(leaf, dev_item); 2582 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2583 BTRFS_UUID_SIZE); 2584 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2585 BTRFS_FSID_SIZE); 2586 args.uuid = dev_uuid; 2587 args.fsid = fs_uuid; 2588 device = btrfs_find_device(fs_info->fs_devices, &args); 2589 BUG_ON(!device); /* Logic error */ 2590 2591 if (device->fs_devices->seeding) { 2592 btrfs_set_device_generation(leaf, dev_item, 2593 device->generation); 2594 btrfs_mark_buffer_dirty(leaf); 2595 } 2596 2597 path->slots[0]++; 2598 goto next_slot; 2599 } 2600 ret = 0; 2601 error: 2602 btrfs_free_path(path); 2603 return ret; 2604 } 2605 2606 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2607 { 2608 struct btrfs_root *root = fs_info->dev_root; 2609 struct request_queue *q; 2610 struct btrfs_trans_handle *trans; 2611 struct btrfs_device *device; 2612 struct block_device *bdev; 2613 struct super_block *sb = fs_info->sb; 2614 struct rcu_string *name; 2615 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2616 struct btrfs_fs_devices *seed_devices; 2617 u64 orig_super_total_bytes; 2618 u64 orig_super_num_devices; 2619 int ret = 0; 2620 bool seeding_dev = false; 2621 bool locked = false; 2622 2623 if (sb_rdonly(sb) && !fs_devices->seeding) 2624 return -EROFS; 2625 2626 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2627 fs_info->bdev_holder); 2628 if (IS_ERR(bdev)) 2629 return PTR_ERR(bdev); 2630 2631 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2632 ret = -EINVAL; 2633 goto error; 2634 } 2635 2636 if (fs_devices->seeding) { 2637 seeding_dev = true; 2638 down_write(&sb->s_umount); 2639 mutex_lock(&uuid_mutex); 2640 locked = true; 2641 } 2642 2643 sync_blockdev(bdev); 2644 2645 rcu_read_lock(); 2646 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2647 if (device->bdev == bdev) { 2648 ret = -EEXIST; 2649 rcu_read_unlock(); 2650 goto error; 2651 } 2652 } 2653 rcu_read_unlock(); 2654 2655 device = btrfs_alloc_device(fs_info, NULL, NULL); 2656 if (IS_ERR(device)) { 2657 /* we can safely leave the fs_devices entry around */ 2658 ret = PTR_ERR(device); 2659 goto error; 2660 } 2661 2662 name = rcu_string_strdup(device_path, GFP_KERNEL); 2663 if (!name) { 2664 ret = -ENOMEM; 2665 goto error_free_device; 2666 } 2667 rcu_assign_pointer(device->name, name); 2668 2669 device->fs_info = fs_info; 2670 device->bdev = bdev; 2671 2672 ret = btrfs_get_dev_zone_info(device, false); 2673 if (ret) 2674 goto error_free_device; 2675 2676 trans = btrfs_start_transaction(root, 0); 2677 if (IS_ERR(trans)) { 2678 ret = PTR_ERR(trans); 2679 goto error_free_zone; 2680 } 2681 2682 q = bdev_get_queue(bdev); 2683 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2684 device->generation = trans->transid; 2685 device->io_width = fs_info->sectorsize; 2686 device->io_align = fs_info->sectorsize; 2687 device->sector_size = fs_info->sectorsize; 2688 device->total_bytes = 2689 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2690 device->disk_total_bytes = device->total_bytes; 2691 device->commit_total_bytes = device->total_bytes; 2692 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2693 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2694 device->mode = FMODE_EXCL; 2695 device->dev_stats_valid = 1; 2696 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2697 2698 if (seeding_dev) { 2699 btrfs_clear_sb_rdonly(sb); 2700 2701 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2702 seed_devices = btrfs_init_sprout(fs_info); 2703 if (IS_ERR(seed_devices)) { 2704 ret = PTR_ERR(seed_devices); 2705 btrfs_abort_transaction(trans, ret); 2706 goto error_trans; 2707 } 2708 } 2709 2710 mutex_lock(&fs_devices->device_list_mutex); 2711 if (seeding_dev) { 2712 btrfs_setup_sprout(fs_info, seed_devices); 2713 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2714 device); 2715 } 2716 2717 device->fs_devices = fs_devices; 2718 2719 mutex_lock(&fs_info->chunk_mutex); 2720 list_add_rcu(&device->dev_list, &fs_devices->devices); 2721 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2722 fs_devices->num_devices++; 2723 fs_devices->open_devices++; 2724 fs_devices->rw_devices++; 2725 fs_devices->total_devices++; 2726 fs_devices->total_rw_bytes += device->total_bytes; 2727 2728 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2729 2730 if (!blk_queue_nonrot(q)) 2731 fs_devices->rotating = true; 2732 2733 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2734 btrfs_set_super_total_bytes(fs_info->super_copy, 2735 round_down(orig_super_total_bytes + device->total_bytes, 2736 fs_info->sectorsize)); 2737 2738 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2739 btrfs_set_super_num_devices(fs_info->super_copy, 2740 orig_super_num_devices + 1); 2741 2742 /* 2743 * we've got more storage, clear any full flags on the space 2744 * infos 2745 */ 2746 btrfs_clear_space_info_full(fs_info); 2747 2748 mutex_unlock(&fs_info->chunk_mutex); 2749 2750 /* Add sysfs device entry */ 2751 btrfs_sysfs_add_device(device); 2752 2753 mutex_unlock(&fs_devices->device_list_mutex); 2754 2755 if (seeding_dev) { 2756 mutex_lock(&fs_info->chunk_mutex); 2757 ret = init_first_rw_device(trans); 2758 mutex_unlock(&fs_info->chunk_mutex); 2759 if (ret) { 2760 btrfs_abort_transaction(trans, ret); 2761 goto error_sysfs; 2762 } 2763 } 2764 2765 ret = btrfs_add_dev_item(trans, device); 2766 if (ret) { 2767 btrfs_abort_transaction(trans, ret); 2768 goto error_sysfs; 2769 } 2770 2771 if (seeding_dev) { 2772 ret = btrfs_finish_sprout(trans); 2773 if (ret) { 2774 btrfs_abort_transaction(trans, ret); 2775 goto error_sysfs; 2776 } 2777 2778 /* 2779 * fs_devices now represents the newly sprouted filesystem and 2780 * its fsid has been changed by btrfs_sprout_splice(). 2781 */ 2782 btrfs_sysfs_update_sprout_fsid(fs_devices); 2783 } 2784 2785 ret = btrfs_commit_transaction(trans); 2786 2787 if (seeding_dev) { 2788 mutex_unlock(&uuid_mutex); 2789 up_write(&sb->s_umount); 2790 locked = false; 2791 2792 if (ret) /* transaction commit */ 2793 return ret; 2794 2795 ret = btrfs_relocate_sys_chunks(fs_info); 2796 if (ret < 0) 2797 btrfs_handle_fs_error(fs_info, ret, 2798 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2799 trans = btrfs_attach_transaction(root); 2800 if (IS_ERR(trans)) { 2801 if (PTR_ERR(trans) == -ENOENT) 2802 return 0; 2803 ret = PTR_ERR(trans); 2804 trans = NULL; 2805 goto error_sysfs; 2806 } 2807 ret = btrfs_commit_transaction(trans); 2808 } 2809 2810 /* 2811 * Now that we have written a new super block to this device, check all 2812 * other fs_devices list if device_path alienates any other scanned 2813 * device. 2814 * We can ignore the return value as it typically returns -EINVAL and 2815 * only succeeds if the device was an alien. 2816 */ 2817 btrfs_forget_devices(device_path); 2818 2819 /* Update ctime/mtime for blkid or udev */ 2820 update_dev_time(device_path); 2821 2822 return ret; 2823 2824 error_sysfs: 2825 btrfs_sysfs_remove_device(device); 2826 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2827 mutex_lock(&fs_info->chunk_mutex); 2828 list_del_rcu(&device->dev_list); 2829 list_del(&device->dev_alloc_list); 2830 fs_info->fs_devices->num_devices--; 2831 fs_info->fs_devices->open_devices--; 2832 fs_info->fs_devices->rw_devices--; 2833 fs_info->fs_devices->total_devices--; 2834 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2835 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2836 btrfs_set_super_total_bytes(fs_info->super_copy, 2837 orig_super_total_bytes); 2838 btrfs_set_super_num_devices(fs_info->super_copy, 2839 orig_super_num_devices); 2840 mutex_unlock(&fs_info->chunk_mutex); 2841 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2842 error_trans: 2843 if (seeding_dev) 2844 btrfs_set_sb_rdonly(sb); 2845 if (trans) 2846 btrfs_end_transaction(trans); 2847 error_free_zone: 2848 btrfs_destroy_dev_zone_info(device); 2849 error_free_device: 2850 btrfs_free_device(device); 2851 error: 2852 blkdev_put(bdev, FMODE_EXCL); 2853 if (locked) { 2854 mutex_unlock(&uuid_mutex); 2855 up_write(&sb->s_umount); 2856 } 2857 return ret; 2858 } 2859 2860 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2861 struct btrfs_device *device) 2862 { 2863 int ret; 2864 struct btrfs_path *path; 2865 struct btrfs_root *root = device->fs_info->chunk_root; 2866 struct btrfs_dev_item *dev_item; 2867 struct extent_buffer *leaf; 2868 struct btrfs_key key; 2869 2870 path = btrfs_alloc_path(); 2871 if (!path) 2872 return -ENOMEM; 2873 2874 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2875 key.type = BTRFS_DEV_ITEM_KEY; 2876 key.offset = device->devid; 2877 2878 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2879 if (ret < 0) 2880 goto out; 2881 2882 if (ret > 0) { 2883 ret = -ENOENT; 2884 goto out; 2885 } 2886 2887 leaf = path->nodes[0]; 2888 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2889 2890 btrfs_set_device_id(leaf, dev_item, device->devid); 2891 btrfs_set_device_type(leaf, dev_item, device->type); 2892 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2893 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2894 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2895 btrfs_set_device_total_bytes(leaf, dev_item, 2896 btrfs_device_get_disk_total_bytes(device)); 2897 btrfs_set_device_bytes_used(leaf, dev_item, 2898 btrfs_device_get_bytes_used(device)); 2899 btrfs_mark_buffer_dirty(leaf); 2900 2901 out: 2902 btrfs_free_path(path); 2903 return ret; 2904 } 2905 2906 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2907 struct btrfs_device *device, u64 new_size) 2908 { 2909 struct btrfs_fs_info *fs_info = device->fs_info; 2910 struct btrfs_super_block *super_copy = fs_info->super_copy; 2911 u64 old_total; 2912 u64 diff; 2913 int ret; 2914 2915 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2916 return -EACCES; 2917 2918 new_size = round_down(new_size, fs_info->sectorsize); 2919 2920 mutex_lock(&fs_info->chunk_mutex); 2921 old_total = btrfs_super_total_bytes(super_copy); 2922 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2923 2924 if (new_size <= device->total_bytes || 2925 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2926 mutex_unlock(&fs_info->chunk_mutex); 2927 return -EINVAL; 2928 } 2929 2930 btrfs_set_super_total_bytes(super_copy, 2931 round_down(old_total + diff, fs_info->sectorsize)); 2932 device->fs_devices->total_rw_bytes += diff; 2933 2934 btrfs_device_set_total_bytes(device, new_size); 2935 btrfs_device_set_disk_total_bytes(device, new_size); 2936 btrfs_clear_space_info_full(device->fs_info); 2937 if (list_empty(&device->post_commit_list)) 2938 list_add_tail(&device->post_commit_list, 2939 &trans->transaction->dev_update_list); 2940 mutex_unlock(&fs_info->chunk_mutex); 2941 2942 btrfs_reserve_chunk_metadata(trans, false); 2943 ret = btrfs_update_device(trans, device); 2944 btrfs_trans_release_chunk_metadata(trans); 2945 2946 return ret; 2947 } 2948 2949 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2950 { 2951 struct btrfs_fs_info *fs_info = trans->fs_info; 2952 struct btrfs_root *root = fs_info->chunk_root; 2953 int ret; 2954 struct btrfs_path *path; 2955 struct btrfs_key key; 2956 2957 path = btrfs_alloc_path(); 2958 if (!path) 2959 return -ENOMEM; 2960 2961 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2962 key.offset = chunk_offset; 2963 key.type = BTRFS_CHUNK_ITEM_KEY; 2964 2965 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2966 if (ret < 0) 2967 goto out; 2968 else if (ret > 0) { /* Logic error or corruption */ 2969 btrfs_handle_fs_error(fs_info, -ENOENT, 2970 "Failed lookup while freeing chunk."); 2971 ret = -ENOENT; 2972 goto out; 2973 } 2974 2975 ret = btrfs_del_item(trans, root, path); 2976 if (ret < 0) 2977 btrfs_handle_fs_error(fs_info, ret, 2978 "Failed to delete chunk item."); 2979 out: 2980 btrfs_free_path(path); 2981 return ret; 2982 } 2983 2984 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2985 { 2986 struct btrfs_super_block *super_copy = fs_info->super_copy; 2987 struct btrfs_disk_key *disk_key; 2988 struct btrfs_chunk *chunk; 2989 u8 *ptr; 2990 int ret = 0; 2991 u32 num_stripes; 2992 u32 array_size; 2993 u32 len = 0; 2994 u32 cur; 2995 struct btrfs_key key; 2996 2997 lockdep_assert_held(&fs_info->chunk_mutex); 2998 array_size = btrfs_super_sys_array_size(super_copy); 2999 3000 ptr = super_copy->sys_chunk_array; 3001 cur = 0; 3002 3003 while (cur < array_size) { 3004 disk_key = (struct btrfs_disk_key *)ptr; 3005 btrfs_disk_key_to_cpu(&key, disk_key); 3006 3007 len = sizeof(*disk_key); 3008 3009 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3010 chunk = (struct btrfs_chunk *)(ptr + len); 3011 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3012 len += btrfs_chunk_item_size(num_stripes); 3013 } else { 3014 ret = -EIO; 3015 break; 3016 } 3017 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3018 key.offset == chunk_offset) { 3019 memmove(ptr, ptr + len, array_size - (cur + len)); 3020 array_size -= len; 3021 btrfs_set_super_sys_array_size(super_copy, array_size); 3022 } else { 3023 ptr += len; 3024 cur += len; 3025 } 3026 } 3027 return ret; 3028 } 3029 3030 /* 3031 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3032 * @logical: Logical block offset in bytes. 3033 * @length: Length of extent in bytes. 3034 * 3035 * Return: Chunk mapping or ERR_PTR. 3036 */ 3037 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3038 u64 logical, u64 length) 3039 { 3040 struct extent_map_tree *em_tree; 3041 struct extent_map *em; 3042 3043 em_tree = &fs_info->mapping_tree; 3044 read_lock(&em_tree->lock); 3045 em = lookup_extent_mapping(em_tree, logical, length); 3046 read_unlock(&em_tree->lock); 3047 3048 if (!em) { 3049 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3050 logical, length); 3051 return ERR_PTR(-EINVAL); 3052 } 3053 3054 if (em->start > logical || em->start + em->len < logical) { 3055 btrfs_crit(fs_info, 3056 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3057 logical, length, em->start, em->start + em->len); 3058 free_extent_map(em); 3059 return ERR_PTR(-EINVAL); 3060 } 3061 3062 /* callers are responsible for dropping em's ref. */ 3063 return em; 3064 } 3065 3066 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3067 struct map_lookup *map, u64 chunk_offset) 3068 { 3069 int i; 3070 3071 /* 3072 * Removing chunk items and updating the device items in the chunks btree 3073 * requires holding the chunk_mutex. 3074 * See the comment at btrfs_chunk_alloc() for the details. 3075 */ 3076 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3077 3078 for (i = 0; i < map->num_stripes; i++) { 3079 int ret; 3080 3081 ret = btrfs_update_device(trans, map->stripes[i].dev); 3082 if (ret) 3083 return ret; 3084 } 3085 3086 return btrfs_free_chunk(trans, chunk_offset); 3087 } 3088 3089 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3090 { 3091 struct btrfs_fs_info *fs_info = trans->fs_info; 3092 struct extent_map *em; 3093 struct map_lookup *map; 3094 u64 dev_extent_len = 0; 3095 int i, ret = 0; 3096 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3097 3098 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3099 if (IS_ERR(em)) { 3100 /* 3101 * This is a logic error, but we don't want to just rely on the 3102 * user having built with ASSERT enabled, so if ASSERT doesn't 3103 * do anything we still error out. 3104 */ 3105 ASSERT(0); 3106 return PTR_ERR(em); 3107 } 3108 map = em->map_lookup; 3109 3110 /* 3111 * First delete the device extent items from the devices btree. 3112 * We take the device_list_mutex to avoid racing with the finishing phase 3113 * of a device replace operation. See the comment below before acquiring 3114 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3115 * because that can result in a deadlock when deleting the device extent 3116 * items from the devices btree - COWing an extent buffer from the btree 3117 * may result in allocating a new metadata chunk, which would attempt to 3118 * lock again fs_info->chunk_mutex. 3119 */ 3120 mutex_lock(&fs_devices->device_list_mutex); 3121 for (i = 0; i < map->num_stripes; i++) { 3122 struct btrfs_device *device = map->stripes[i].dev; 3123 ret = btrfs_free_dev_extent(trans, device, 3124 map->stripes[i].physical, 3125 &dev_extent_len); 3126 if (ret) { 3127 mutex_unlock(&fs_devices->device_list_mutex); 3128 btrfs_abort_transaction(trans, ret); 3129 goto out; 3130 } 3131 3132 if (device->bytes_used > 0) { 3133 mutex_lock(&fs_info->chunk_mutex); 3134 btrfs_device_set_bytes_used(device, 3135 device->bytes_used - dev_extent_len); 3136 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3137 btrfs_clear_space_info_full(fs_info); 3138 mutex_unlock(&fs_info->chunk_mutex); 3139 } 3140 } 3141 mutex_unlock(&fs_devices->device_list_mutex); 3142 3143 /* 3144 * We acquire fs_info->chunk_mutex for 2 reasons: 3145 * 3146 * 1) Just like with the first phase of the chunk allocation, we must 3147 * reserve system space, do all chunk btree updates and deletions, and 3148 * update the system chunk array in the superblock while holding this 3149 * mutex. This is for similar reasons as explained on the comment at 3150 * the top of btrfs_chunk_alloc(); 3151 * 3152 * 2) Prevent races with the final phase of a device replace operation 3153 * that replaces the device object associated with the map's stripes, 3154 * because the device object's id can change at any time during that 3155 * final phase of the device replace operation 3156 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3157 * replaced device and then see it with an ID of 3158 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3159 * the device item, which does not exists on the chunk btree. 3160 * The finishing phase of device replace acquires both the 3161 * device_list_mutex and the chunk_mutex, in that order, so we are 3162 * safe by just acquiring the chunk_mutex. 3163 */ 3164 trans->removing_chunk = true; 3165 mutex_lock(&fs_info->chunk_mutex); 3166 3167 check_system_chunk(trans, map->type); 3168 3169 ret = remove_chunk_item(trans, map, chunk_offset); 3170 /* 3171 * Normally we should not get -ENOSPC since we reserved space before 3172 * through the call to check_system_chunk(). 3173 * 3174 * Despite our system space_info having enough free space, we may not 3175 * be able to allocate extents from its block groups, because all have 3176 * an incompatible profile, which will force us to allocate a new system 3177 * block group with the right profile, or right after we called 3178 * check_system_space() above, a scrub turned the only system block group 3179 * with enough free space into RO mode. 3180 * This is explained with more detail at do_chunk_alloc(). 3181 * 3182 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3183 */ 3184 if (ret == -ENOSPC) { 3185 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3186 struct btrfs_block_group *sys_bg; 3187 3188 sys_bg = btrfs_create_chunk(trans, sys_flags); 3189 if (IS_ERR(sys_bg)) { 3190 ret = PTR_ERR(sys_bg); 3191 btrfs_abort_transaction(trans, ret); 3192 goto out; 3193 } 3194 3195 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3196 if (ret) { 3197 btrfs_abort_transaction(trans, ret); 3198 goto out; 3199 } 3200 3201 ret = remove_chunk_item(trans, map, chunk_offset); 3202 if (ret) { 3203 btrfs_abort_transaction(trans, ret); 3204 goto out; 3205 } 3206 } else if (ret) { 3207 btrfs_abort_transaction(trans, ret); 3208 goto out; 3209 } 3210 3211 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3212 3213 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3214 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3215 if (ret) { 3216 btrfs_abort_transaction(trans, ret); 3217 goto out; 3218 } 3219 } 3220 3221 mutex_unlock(&fs_info->chunk_mutex); 3222 trans->removing_chunk = false; 3223 3224 /* 3225 * We are done with chunk btree updates and deletions, so release the 3226 * system space we previously reserved (with check_system_chunk()). 3227 */ 3228 btrfs_trans_release_chunk_metadata(trans); 3229 3230 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3231 if (ret) { 3232 btrfs_abort_transaction(trans, ret); 3233 goto out; 3234 } 3235 3236 out: 3237 if (trans->removing_chunk) { 3238 mutex_unlock(&fs_info->chunk_mutex); 3239 trans->removing_chunk = false; 3240 } 3241 /* once for us */ 3242 free_extent_map(em); 3243 return ret; 3244 } 3245 3246 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3247 { 3248 struct btrfs_root *root = fs_info->chunk_root; 3249 struct btrfs_trans_handle *trans; 3250 struct btrfs_block_group *block_group; 3251 u64 length; 3252 int ret; 3253 3254 /* 3255 * Prevent races with automatic removal of unused block groups. 3256 * After we relocate and before we remove the chunk with offset 3257 * chunk_offset, automatic removal of the block group can kick in, 3258 * resulting in a failure when calling btrfs_remove_chunk() below. 3259 * 3260 * Make sure to acquire this mutex before doing a tree search (dev 3261 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3262 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3263 * we release the path used to search the chunk/dev tree and before 3264 * the current task acquires this mutex and calls us. 3265 */ 3266 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3267 3268 /* step one, relocate all the extents inside this chunk */ 3269 btrfs_scrub_pause(fs_info); 3270 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3271 btrfs_scrub_continue(fs_info); 3272 if (ret) 3273 return ret; 3274 3275 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3276 if (!block_group) 3277 return -ENOENT; 3278 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3279 length = block_group->length; 3280 btrfs_put_block_group(block_group); 3281 3282 /* 3283 * On a zoned file system, discard the whole block group, this will 3284 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3285 * resetting the zone fails, don't treat it as a fatal problem from the 3286 * filesystem's point of view. 3287 */ 3288 if (btrfs_is_zoned(fs_info)) { 3289 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3290 if (ret) 3291 btrfs_info(fs_info, 3292 "failed to reset zone %llu after relocation", 3293 chunk_offset); 3294 } 3295 3296 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3297 chunk_offset); 3298 if (IS_ERR(trans)) { 3299 ret = PTR_ERR(trans); 3300 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3301 return ret; 3302 } 3303 3304 /* 3305 * step two, delete the device extents and the 3306 * chunk tree entries 3307 */ 3308 ret = btrfs_remove_chunk(trans, chunk_offset); 3309 btrfs_end_transaction(trans); 3310 return ret; 3311 } 3312 3313 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3314 { 3315 struct btrfs_root *chunk_root = fs_info->chunk_root; 3316 struct btrfs_path *path; 3317 struct extent_buffer *leaf; 3318 struct btrfs_chunk *chunk; 3319 struct btrfs_key key; 3320 struct btrfs_key found_key; 3321 u64 chunk_type; 3322 bool retried = false; 3323 int failed = 0; 3324 int ret; 3325 3326 path = btrfs_alloc_path(); 3327 if (!path) 3328 return -ENOMEM; 3329 3330 again: 3331 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3332 key.offset = (u64)-1; 3333 key.type = BTRFS_CHUNK_ITEM_KEY; 3334 3335 while (1) { 3336 mutex_lock(&fs_info->reclaim_bgs_lock); 3337 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3338 if (ret < 0) { 3339 mutex_unlock(&fs_info->reclaim_bgs_lock); 3340 goto error; 3341 } 3342 BUG_ON(ret == 0); /* Corruption */ 3343 3344 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3345 key.type); 3346 if (ret) 3347 mutex_unlock(&fs_info->reclaim_bgs_lock); 3348 if (ret < 0) 3349 goto error; 3350 if (ret > 0) 3351 break; 3352 3353 leaf = path->nodes[0]; 3354 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3355 3356 chunk = btrfs_item_ptr(leaf, path->slots[0], 3357 struct btrfs_chunk); 3358 chunk_type = btrfs_chunk_type(leaf, chunk); 3359 btrfs_release_path(path); 3360 3361 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3362 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3363 if (ret == -ENOSPC) 3364 failed++; 3365 else 3366 BUG_ON(ret); 3367 } 3368 mutex_unlock(&fs_info->reclaim_bgs_lock); 3369 3370 if (found_key.offset == 0) 3371 break; 3372 key.offset = found_key.offset - 1; 3373 } 3374 ret = 0; 3375 if (failed && !retried) { 3376 failed = 0; 3377 retried = true; 3378 goto again; 3379 } else if (WARN_ON(failed && retried)) { 3380 ret = -ENOSPC; 3381 } 3382 error: 3383 btrfs_free_path(path); 3384 return ret; 3385 } 3386 3387 /* 3388 * return 1 : allocate a data chunk successfully, 3389 * return <0: errors during allocating a data chunk, 3390 * return 0 : no need to allocate a data chunk. 3391 */ 3392 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3393 u64 chunk_offset) 3394 { 3395 struct btrfs_block_group *cache; 3396 u64 bytes_used; 3397 u64 chunk_type; 3398 3399 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3400 ASSERT(cache); 3401 chunk_type = cache->flags; 3402 btrfs_put_block_group(cache); 3403 3404 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3405 return 0; 3406 3407 spin_lock(&fs_info->data_sinfo->lock); 3408 bytes_used = fs_info->data_sinfo->bytes_used; 3409 spin_unlock(&fs_info->data_sinfo->lock); 3410 3411 if (!bytes_used) { 3412 struct btrfs_trans_handle *trans; 3413 int ret; 3414 3415 trans = btrfs_join_transaction(fs_info->tree_root); 3416 if (IS_ERR(trans)) 3417 return PTR_ERR(trans); 3418 3419 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3420 btrfs_end_transaction(trans); 3421 if (ret < 0) 3422 return ret; 3423 return 1; 3424 } 3425 3426 return 0; 3427 } 3428 3429 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3430 struct btrfs_balance_control *bctl) 3431 { 3432 struct btrfs_root *root = fs_info->tree_root; 3433 struct btrfs_trans_handle *trans; 3434 struct btrfs_balance_item *item; 3435 struct btrfs_disk_balance_args disk_bargs; 3436 struct btrfs_path *path; 3437 struct extent_buffer *leaf; 3438 struct btrfs_key key; 3439 int ret, err; 3440 3441 path = btrfs_alloc_path(); 3442 if (!path) 3443 return -ENOMEM; 3444 3445 trans = btrfs_start_transaction(root, 0); 3446 if (IS_ERR(trans)) { 3447 btrfs_free_path(path); 3448 return PTR_ERR(trans); 3449 } 3450 3451 key.objectid = BTRFS_BALANCE_OBJECTID; 3452 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3453 key.offset = 0; 3454 3455 ret = btrfs_insert_empty_item(trans, root, path, &key, 3456 sizeof(*item)); 3457 if (ret) 3458 goto out; 3459 3460 leaf = path->nodes[0]; 3461 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3462 3463 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3464 3465 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3466 btrfs_set_balance_data(leaf, item, &disk_bargs); 3467 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3468 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3469 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3470 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3471 3472 btrfs_set_balance_flags(leaf, item, bctl->flags); 3473 3474 btrfs_mark_buffer_dirty(leaf); 3475 out: 3476 btrfs_free_path(path); 3477 err = btrfs_commit_transaction(trans); 3478 if (err && !ret) 3479 ret = err; 3480 return ret; 3481 } 3482 3483 static int del_balance_item(struct btrfs_fs_info *fs_info) 3484 { 3485 struct btrfs_root *root = fs_info->tree_root; 3486 struct btrfs_trans_handle *trans; 3487 struct btrfs_path *path; 3488 struct btrfs_key key; 3489 int ret, err; 3490 3491 path = btrfs_alloc_path(); 3492 if (!path) 3493 return -ENOMEM; 3494 3495 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3496 if (IS_ERR(trans)) { 3497 btrfs_free_path(path); 3498 return PTR_ERR(trans); 3499 } 3500 3501 key.objectid = BTRFS_BALANCE_OBJECTID; 3502 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3503 key.offset = 0; 3504 3505 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3506 if (ret < 0) 3507 goto out; 3508 if (ret > 0) { 3509 ret = -ENOENT; 3510 goto out; 3511 } 3512 3513 ret = btrfs_del_item(trans, root, path); 3514 out: 3515 btrfs_free_path(path); 3516 err = btrfs_commit_transaction(trans); 3517 if (err && !ret) 3518 ret = err; 3519 return ret; 3520 } 3521 3522 /* 3523 * This is a heuristic used to reduce the number of chunks balanced on 3524 * resume after balance was interrupted. 3525 */ 3526 static void update_balance_args(struct btrfs_balance_control *bctl) 3527 { 3528 /* 3529 * Turn on soft mode for chunk types that were being converted. 3530 */ 3531 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3532 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3533 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3534 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3535 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3536 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3537 3538 /* 3539 * Turn on usage filter if is not already used. The idea is 3540 * that chunks that we have already balanced should be 3541 * reasonably full. Don't do it for chunks that are being 3542 * converted - that will keep us from relocating unconverted 3543 * (albeit full) chunks. 3544 */ 3545 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3546 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3547 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3548 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3549 bctl->data.usage = 90; 3550 } 3551 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3552 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3553 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3554 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3555 bctl->sys.usage = 90; 3556 } 3557 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3558 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3559 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3560 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3561 bctl->meta.usage = 90; 3562 } 3563 } 3564 3565 /* 3566 * Clear the balance status in fs_info and delete the balance item from disk. 3567 */ 3568 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3569 { 3570 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3571 int ret; 3572 3573 BUG_ON(!fs_info->balance_ctl); 3574 3575 spin_lock(&fs_info->balance_lock); 3576 fs_info->balance_ctl = NULL; 3577 spin_unlock(&fs_info->balance_lock); 3578 3579 kfree(bctl); 3580 ret = del_balance_item(fs_info); 3581 if (ret) 3582 btrfs_handle_fs_error(fs_info, ret, NULL); 3583 } 3584 3585 /* 3586 * Balance filters. Return 1 if chunk should be filtered out 3587 * (should not be balanced). 3588 */ 3589 static int chunk_profiles_filter(u64 chunk_type, 3590 struct btrfs_balance_args *bargs) 3591 { 3592 chunk_type = chunk_to_extended(chunk_type) & 3593 BTRFS_EXTENDED_PROFILE_MASK; 3594 3595 if (bargs->profiles & chunk_type) 3596 return 0; 3597 3598 return 1; 3599 } 3600 3601 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3602 struct btrfs_balance_args *bargs) 3603 { 3604 struct btrfs_block_group *cache; 3605 u64 chunk_used; 3606 u64 user_thresh_min; 3607 u64 user_thresh_max; 3608 int ret = 1; 3609 3610 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3611 chunk_used = cache->used; 3612 3613 if (bargs->usage_min == 0) 3614 user_thresh_min = 0; 3615 else 3616 user_thresh_min = div_factor_fine(cache->length, 3617 bargs->usage_min); 3618 3619 if (bargs->usage_max == 0) 3620 user_thresh_max = 1; 3621 else if (bargs->usage_max > 100) 3622 user_thresh_max = cache->length; 3623 else 3624 user_thresh_max = div_factor_fine(cache->length, 3625 bargs->usage_max); 3626 3627 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3628 ret = 0; 3629 3630 btrfs_put_block_group(cache); 3631 return ret; 3632 } 3633 3634 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3635 u64 chunk_offset, struct btrfs_balance_args *bargs) 3636 { 3637 struct btrfs_block_group *cache; 3638 u64 chunk_used, user_thresh; 3639 int ret = 1; 3640 3641 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3642 chunk_used = cache->used; 3643 3644 if (bargs->usage_min == 0) 3645 user_thresh = 1; 3646 else if (bargs->usage > 100) 3647 user_thresh = cache->length; 3648 else 3649 user_thresh = div_factor_fine(cache->length, bargs->usage); 3650 3651 if (chunk_used < user_thresh) 3652 ret = 0; 3653 3654 btrfs_put_block_group(cache); 3655 return ret; 3656 } 3657 3658 static int chunk_devid_filter(struct extent_buffer *leaf, 3659 struct btrfs_chunk *chunk, 3660 struct btrfs_balance_args *bargs) 3661 { 3662 struct btrfs_stripe *stripe; 3663 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3664 int i; 3665 3666 for (i = 0; i < num_stripes; i++) { 3667 stripe = btrfs_stripe_nr(chunk, i); 3668 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3669 return 0; 3670 } 3671 3672 return 1; 3673 } 3674 3675 static u64 calc_data_stripes(u64 type, int num_stripes) 3676 { 3677 const int index = btrfs_bg_flags_to_raid_index(type); 3678 const int ncopies = btrfs_raid_array[index].ncopies; 3679 const int nparity = btrfs_raid_array[index].nparity; 3680 3681 return (num_stripes - nparity) / ncopies; 3682 } 3683 3684 /* [pstart, pend) */ 3685 static int chunk_drange_filter(struct extent_buffer *leaf, 3686 struct btrfs_chunk *chunk, 3687 struct btrfs_balance_args *bargs) 3688 { 3689 struct btrfs_stripe *stripe; 3690 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3691 u64 stripe_offset; 3692 u64 stripe_length; 3693 u64 type; 3694 int factor; 3695 int i; 3696 3697 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3698 return 0; 3699 3700 type = btrfs_chunk_type(leaf, chunk); 3701 factor = calc_data_stripes(type, num_stripes); 3702 3703 for (i = 0; i < num_stripes; i++) { 3704 stripe = btrfs_stripe_nr(chunk, i); 3705 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3706 continue; 3707 3708 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3709 stripe_length = btrfs_chunk_length(leaf, chunk); 3710 stripe_length = div_u64(stripe_length, factor); 3711 3712 if (stripe_offset < bargs->pend && 3713 stripe_offset + stripe_length > bargs->pstart) 3714 return 0; 3715 } 3716 3717 return 1; 3718 } 3719 3720 /* [vstart, vend) */ 3721 static int chunk_vrange_filter(struct extent_buffer *leaf, 3722 struct btrfs_chunk *chunk, 3723 u64 chunk_offset, 3724 struct btrfs_balance_args *bargs) 3725 { 3726 if (chunk_offset < bargs->vend && 3727 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3728 /* at least part of the chunk is inside this vrange */ 3729 return 0; 3730 3731 return 1; 3732 } 3733 3734 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3735 struct btrfs_chunk *chunk, 3736 struct btrfs_balance_args *bargs) 3737 { 3738 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3739 3740 if (bargs->stripes_min <= num_stripes 3741 && num_stripes <= bargs->stripes_max) 3742 return 0; 3743 3744 return 1; 3745 } 3746 3747 static int chunk_soft_convert_filter(u64 chunk_type, 3748 struct btrfs_balance_args *bargs) 3749 { 3750 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3751 return 0; 3752 3753 chunk_type = chunk_to_extended(chunk_type) & 3754 BTRFS_EXTENDED_PROFILE_MASK; 3755 3756 if (bargs->target == chunk_type) 3757 return 1; 3758 3759 return 0; 3760 } 3761 3762 static int should_balance_chunk(struct extent_buffer *leaf, 3763 struct btrfs_chunk *chunk, u64 chunk_offset) 3764 { 3765 struct btrfs_fs_info *fs_info = leaf->fs_info; 3766 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3767 struct btrfs_balance_args *bargs = NULL; 3768 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3769 3770 /* type filter */ 3771 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3772 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3773 return 0; 3774 } 3775 3776 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3777 bargs = &bctl->data; 3778 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3779 bargs = &bctl->sys; 3780 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3781 bargs = &bctl->meta; 3782 3783 /* profiles filter */ 3784 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3785 chunk_profiles_filter(chunk_type, bargs)) { 3786 return 0; 3787 } 3788 3789 /* usage filter */ 3790 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3791 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3792 return 0; 3793 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3794 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3795 return 0; 3796 } 3797 3798 /* devid filter */ 3799 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3800 chunk_devid_filter(leaf, chunk, bargs)) { 3801 return 0; 3802 } 3803 3804 /* drange filter, makes sense only with devid filter */ 3805 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3806 chunk_drange_filter(leaf, chunk, bargs)) { 3807 return 0; 3808 } 3809 3810 /* vrange filter */ 3811 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3812 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3813 return 0; 3814 } 3815 3816 /* stripes filter */ 3817 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3818 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3819 return 0; 3820 } 3821 3822 /* soft profile changing mode */ 3823 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3824 chunk_soft_convert_filter(chunk_type, bargs)) { 3825 return 0; 3826 } 3827 3828 /* 3829 * limited by count, must be the last filter 3830 */ 3831 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3832 if (bargs->limit == 0) 3833 return 0; 3834 else 3835 bargs->limit--; 3836 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3837 /* 3838 * Same logic as the 'limit' filter; the minimum cannot be 3839 * determined here because we do not have the global information 3840 * about the count of all chunks that satisfy the filters. 3841 */ 3842 if (bargs->limit_max == 0) 3843 return 0; 3844 else 3845 bargs->limit_max--; 3846 } 3847 3848 return 1; 3849 } 3850 3851 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3852 { 3853 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3854 struct btrfs_root *chunk_root = fs_info->chunk_root; 3855 u64 chunk_type; 3856 struct btrfs_chunk *chunk; 3857 struct btrfs_path *path = NULL; 3858 struct btrfs_key key; 3859 struct btrfs_key found_key; 3860 struct extent_buffer *leaf; 3861 int slot; 3862 int ret; 3863 int enospc_errors = 0; 3864 bool counting = true; 3865 /* The single value limit and min/max limits use the same bytes in the */ 3866 u64 limit_data = bctl->data.limit; 3867 u64 limit_meta = bctl->meta.limit; 3868 u64 limit_sys = bctl->sys.limit; 3869 u32 count_data = 0; 3870 u32 count_meta = 0; 3871 u32 count_sys = 0; 3872 int chunk_reserved = 0; 3873 3874 path = btrfs_alloc_path(); 3875 if (!path) { 3876 ret = -ENOMEM; 3877 goto error; 3878 } 3879 3880 /* zero out stat counters */ 3881 spin_lock(&fs_info->balance_lock); 3882 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3883 spin_unlock(&fs_info->balance_lock); 3884 again: 3885 if (!counting) { 3886 /* 3887 * The single value limit and min/max limits use the same bytes 3888 * in the 3889 */ 3890 bctl->data.limit = limit_data; 3891 bctl->meta.limit = limit_meta; 3892 bctl->sys.limit = limit_sys; 3893 } 3894 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3895 key.offset = (u64)-1; 3896 key.type = BTRFS_CHUNK_ITEM_KEY; 3897 3898 while (1) { 3899 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3900 atomic_read(&fs_info->balance_cancel_req)) { 3901 ret = -ECANCELED; 3902 goto error; 3903 } 3904 3905 mutex_lock(&fs_info->reclaim_bgs_lock); 3906 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3907 if (ret < 0) { 3908 mutex_unlock(&fs_info->reclaim_bgs_lock); 3909 goto error; 3910 } 3911 3912 /* 3913 * this shouldn't happen, it means the last relocate 3914 * failed 3915 */ 3916 if (ret == 0) 3917 BUG(); /* FIXME break ? */ 3918 3919 ret = btrfs_previous_item(chunk_root, path, 0, 3920 BTRFS_CHUNK_ITEM_KEY); 3921 if (ret) { 3922 mutex_unlock(&fs_info->reclaim_bgs_lock); 3923 ret = 0; 3924 break; 3925 } 3926 3927 leaf = path->nodes[0]; 3928 slot = path->slots[0]; 3929 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3930 3931 if (found_key.objectid != key.objectid) { 3932 mutex_unlock(&fs_info->reclaim_bgs_lock); 3933 break; 3934 } 3935 3936 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3937 chunk_type = btrfs_chunk_type(leaf, chunk); 3938 3939 if (!counting) { 3940 spin_lock(&fs_info->balance_lock); 3941 bctl->stat.considered++; 3942 spin_unlock(&fs_info->balance_lock); 3943 } 3944 3945 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3946 3947 btrfs_release_path(path); 3948 if (!ret) { 3949 mutex_unlock(&fs_info->reclaim_bgs_lock); 3950 goto loop; 3951 } 3952 3953 if (counting) { 3954 mutex_unlock(&fs_info->reclaim_bgs_lock); 3955 spin_lock(&fs_info->balance_lock); 3956 bctl->stat.expected++; 3957 spin_unlock(&fs_info->balance_lock); 3958 3959 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3960 count_data++; 3961 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3962 count_sys++; 3963 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3964 count_meta++; 3965 3966 goto loop; 3967 } 3968 3969 /* 3970 * Apply limit_min filter, no need to check if the LIMITS 3971 * filter is used, limit_min is 0 by default 3972 */ 3973 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3974 count_data < bctl->data.limit_min) 3975 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3976 count_meta < bctl->meta.limit_min) 3977 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3978 count_sys < bctl->sys.limit_min)) { 3979 mutex_unlock(&fs_info->reclaim_bgs_lock); 3980 goto loop; 3981 } 3982 3983 if (!chunk_reserved) { 3984 /* 3985 * We may be relocating the only data chunk we have, 3986 * which could potentially end up with losing data's 3987 * raid profile, so lets allocate an empty one in 3988 * advance. 3989 */ 3990 ret = btrfs_may_alloc_data_chunk(fs_info, 3991 found_key.offset); 3992 if (ret < 0) { 3993 mutex_unlock(&fs_info->reclaim_bgs_lock); 3994 goto error; 3995 } else if (ret == 1) { 3996 chunk_reserved = 1; 3997 } 3998 } 3999 4000 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4001 mutex_unlock(&fs_info->reclaim_bgs_lock); 4002 if (ret == -ENOSPC) { 4003 enospc_errors++; 4004 } else if (ret == -ETXTBSY) { 4005 btrfs_info(fs_info, 4006 "skipping relocation of block group %llu due to active swapfile", 4007 found_key.offset); 4008 ret = 0; 4009 } else if (ret) { 4010 goto error; 4011 } else { 4012 spin_lock(&fs_info->balance_lock); 4013 bctl->stat.completed++; 4014 spin_unlock(&fs_info->balance_lock); 4015 } 4016 loop: 4017 if (found_key.offset == 0) 4018 break; 4019 key.offset = found_key.offset - 1; 4020 } 4021 4022 if (counting) { 4023 btrfs_release_path(path); 4024 counting = false; 4025 goto again; 4026 } 4027 error: 4028 btrfs_free_path(path); 4029 if (enospc_errors) { 4030 btrfs_info(fs_info, "%d enospc errors during balance", 4031 enospc_errors); 4032 if (!ret) 4033 ret = -ENOSPC; 4034 } 4035 4036 return ret; 4037 } 4038 4039 /** 4040 * alloc_profile_is_valid - see if a given profile is valid and reduced 4041 * @flags: profile to validate 4042 * @extended: if true @flags is treated as an extended profile 4043 */ 4044 static int alloc_profile_is_valid(u64 flags, int extended) 4045 { 4046 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4047 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4048 4049 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4050 4051 /* 1) check that all other bits are zeroed */ 4052 if (flags & ~mask) 4053 return 0; 4054 4055 /* 2) see if profile is reduced */ 4056 if (flags == 0) 4057 return !extended; /* "0" is valid for usual profiles */ 4058 4059 return has_single_bit_set(flags); 4060 } 4061 4062 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4063 { 4064 /* cancel requested || normal exit path */ 4065 return atomic_read(&fs_info->balance_cancel_req) || 4066 (atomic_read(&fs_info->balance_pause_req) == 0 && 4067 atomic_read(&fs_info->balance_cancel_req) == 0); 4068 } 4069 4070 /* 4071 * Validate target profile against allowed profiles and return true if it's OK. 4072 * Otherwise print the error message and return false. 4073 */ 4074 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4075 const struct btrfs_balance_args *bargs, 4076 u64 allowed, const char *type) 4077 { 4078 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4079 return true; 4080 4081 if (fs_info->sectorsize < PAGE_SIZE && 4082 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4083 btrfs_err(fs_info, 4084 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4085 fs_info->sectorsize, PAGE_SIZE); 4086 return false; 4087 } 4088 /* Profile is valid and does not have bits outside of the allowed set */ 4089 if (alloc_profile_is_valid(bargs->target, 1) && 4090 (bargs->target & ~allowed) == 0) 4091 return true; 4092 4093 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4094 type, btrfs_bg_type_to_raid_name(bargs->target)); 4095 return false; 4096 } 4097 4098 /* 4099 * Fill @buf with textual description of balance filter flags @bargs, up to 4100 * @size_buf including the terminating null. The output may be trimmed if it 4101 * does not fit into the provided buffer. 4102 */ 4103 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4104 u32 size_buf) 4105 { 4106 int ret; 4107 u32 size_bp = size_buf; 4108 char *bp = buf; 4109 u64 flags = bargs->flags; 4110 char tmp_buf[128] = {'\0'}; 4111 4112 if (!flags) 4113 return; 4114 4115 #define CHECK_APPEND_NOARG(a) \ 4116 do { \ 4117 ret = snprintf(bp, size_bp, (a)); \ 4118 if (ret < 0 || ret >= size_bp) \ 4119 goto out_overflow; \ 4120 size_bp -= ret; \ 4121 bp += ret; \ 4122 } while (0) 4123 4124 #define CHECK_APPEND_1ARG(a, v1) \ 4125 do { \ 4126 ret = snprintf(bp, size_bp, (a), (v1)); \ 4127 if (ret < 0 || ret >= size_bp) \ 4128 goto out_overflow; \ 4129 size_bp -= ret; \ 4130 bp += ret; \ 4131 } while (0) 4132 4133 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4134 do { \ 4135 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4136 if (ret < 0 || ret >= size_bp) \ 4137 goto out_overflow; \ 4138 size_bp -= ret; \ 4139 bp += ret; \ 4140 } while (0) 4141 4142 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4143 CHECK_APPEND_1ARG("convert=%s,", 4144 btrfs_bg_type_to_raid_name(bargs->target)); 4145 4146 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4147 CHECK_APPEND_NOARG("soft,"); 4148 4149 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4150 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4151 sizeof(tmp_buf)); 4152 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4153 } 4154 4155 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4156 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4157 4158 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4159 CHECK_APPEND_2ARG("usage=%u..%u,", 4160 bargs->usage_min, bargs->usage_max); 4161 4162 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4163 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4164 4165 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4166 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4167 bargs->pstart, bargs->pend); 4168 4169 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4170 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4171 bargs->vstart, bargs->vend); 4172 4173 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4174 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4175 4176 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4177 CHECK_APPEND_2ARG("limit=%u..%u,", 4178 bargs->limit_min, bargs->limit_max); 4179 4180 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4181 CHECK_APPEND_2ARG("stripes=%u..%u,", 4182 bargs->stripes_min, bargs->stripes_max); 4183 4184 #undef CHECK_APPEND_2ARG 4185 #undef CHECK_APPEND_1ARG 4186 #undef CHECK_APPEND_NOARG 4187 4188 out_overflow: 4189 4190 if (size_bp < size_buf) 4191 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4192 else 4193 buf[0] = '\0'; 4194 } 4195 4196 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4197 { 4198 u32 size_buf = 1024; 4199 char tmp_buf[192] = {'\0'}; 4200 char *buf; 4201 char *bp; 4202 u32 size_bp = size_buf; 4203 int ret; 4204 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4205 4206 buf = kzalloc(size_buf, GFP_KERNEL); 4207 if (!buf) 4208 return; 4209 4210 bp = buf; 4211 4212 #define CHECK_APPEND_1ARG(a, v1) \ 4213 do { \ 4214 ret = snprintf(bp, size_bp, (a), (v1)); \ 4215 if (ret < 0 || ret >= size_bp) \ 4216 goto out_overflow; \ 4217 size_bp -= ret; \ 4218 bp += ret; \ 4219 } while (0) 4220 4221 if (bctl->flags & BTRFS_BALANCE_FORCE) 4222 CHECK_APPEND_1ARG("%s", "-f "); 4223 4224 if (bctl->flags & BTRFS_BALANCE_DATA) { 4225 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4226 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4227 } 4228 4229 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4230 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4231 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4232 } 4233 4234 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4235 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4236 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4237 } 4238 4239 #undef CHECK_APPEND_1ARG 4240 4241 out_overflow: 4242 4243 if (size_bp < size_buf) 4244 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4245 btrfs_info(fs_info, "balance: %s %s", 4246 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4247 "resume" : "start", buf); 4248 4249 kfree(buf); 4250 } 4251 4252 /* 4253 * Should be called with balance mutexe held 4254 */ 4255 int btrfs_balance(struct btrfs_fs_info *fs_info, 4256 struct btrfs_balance_control *bctl, 4257 struct btrfs_ioctl_balance_args *bargs) 4258 { 4259 u64 meta_target, data_target; 4260 u64 allowed; 4261 int mixed = 0; 4262 int ret; 4263 u64 num_devices; 4264 unsigned seq; 4265 bool reducing_redundancy; 4266 int i; 4267 4268 if (btrfs_fs_closing(fs_info) || 4269 atomic_read(&fs_info->balance_pause_req) || 4270 btrfs_should_cancel_balance(fs_info)) { 4271 ret = -EINVAL; 4272 goto out; 4273 } 4274 4275 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4276 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4277 mixed = 1; 4278 4279 /* 4280 * In case of mixed groups both data and meta should be picked, 4281 * and identical options should be given for both of them. 4282 */ 4283 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4284 if (mixed && (bctl->flags & allowed)) { 4285 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4286 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4287 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4288 btrfs_err(fs_info, 4289 "balance: mixed groups data and metadata options must be the same"); 4290 ret = -EINVAL; 4291 goto out; 4292 } 4293 } 4294 4295 /* 4296 * rw_devices will not change at the moment, device add/delete/replace 4297 * are exclusive 4298 */ 4299 num_devices = fs_info->fs_devices->rw_devices; 4300 4301 /* 4302 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4303 * special bit for it, to make it easier to distinguish. Thus we need 4304 * to set it manually, or balance would refuse the profile. 4305 */ 4306 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4307 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4308 if (num_devices >= btrfs_raid_array[i].devs_min) 4309 allowed |= btrfs_raid_array[i].bg_flag; 4310 4311 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4312 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4313 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4314 ret = -EINVAL; 4315 goto out; 4316 } 4317 4318 /* 4319 * Allow to reduce metadata or system integrity only if force set for 4320 * profiles with redundancy (copies, parity) 4321 */ 4322 allowed = 0; 4323 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4324 if (btrfs_raid_array[i].ncopies >= 2 || 4325 btrfs_raid_array[i].tolerated_failures >= 1) 4326 allowed |= btrfs_raid_array[i].bg_flag; 4327 } 4328 do { 4329 seq = read_seqbegin(&fs_info->profiles_lock); 4330 4331 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4332 (fs_info->avail_system_alloc_bits & allowed) && 4333 !(bctl->sys.target & allowed)) || 4334 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4335 (fs_info->avail_metadata_alloc_bits & allowed) && 4336 !(bctl->meta.target & allowed))) 4337 reducing_redundancy = true; 4338 else 4339 reducing_redundancy = false; 4340 4341 /* if we're not converting, the target field is uninitialized */ 4342 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4343 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4344 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4345 bctl->data.target : fs_info->avail_data_alloc_bits; 4346 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4347 4348 if (reducing_redundancy) { 4349 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4350 btrfs_info(fs_info, 4351 "balance: force reducing metadata redundancy"); 4352 } else { 4353 btrfs_err(fs_info, 4354 "balance: reduces metadata redundancy, use --force if you want this"); 4355 ret = -EINVAL; 4356 goto out; 4357 } 4358 } 4359 4360 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4361 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4362 btrfs_warn(fs_info, 4363 "balance: metadata profile %s has lower redundancy than data profile %s", 4364 btrfs_bg_type_to_raid_name(meta_target), 4365 btrfs_bg_type_to_raid_name(data_target)); 4366 } 4367 4368 ret = insert_balance_item(fs_info, bctl); 4369 if (ret && ret != -EEXIST) 4370 goto out; 4371 4372 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4373 BUG_ON(ret == -EEXIST); 4374 BUG_ON(fs_info->balance_ctl); 4375 spin_lock(&fs_info->balance_lock); 4376 fs_info->balance_ctl = bctl; 4377 spin_unlock(&fs_info->balance_lock); 4378 } else { 4379 BUG_ON(ret != -EEXIST); 4380 spin_lock(&fs_info->balance_lock); 4381 update_balance_args(bctl); 4382 spin_unlock(&fs_info->balance_lock); 4383 } 4384 4385 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4386 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4387 describe_balance_start_or_resume(fs_info); 4388 mutex_unlock(&fs_info->balance_mutex); 4389 4390 ret = __btrfs_balance(fs_info); 4391 4392 mutex_lock(&fs_info->balance_mutex); 4393 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4394 btrfs_info(fs_info, "balance: paused"); 4395 /* 4396 * Balance can be canceled by: 4397 * 4398 * - Regular cancel request 4399 * Then ret == -ECANCELED and balance_cancel_req > 0 4400 * 4401 * - Fatal signal to "btrfs" process 4402 * Either the signal caught by wait_reserve_ticket() and callers 4403 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4404 * got -ECANCELED. 4405 * Either way, in this case balance_cancel_req = 0, and 4406 * ret == -EINTR or ret == -ECANCELED. 4407 * 4408 * So here we only check the return value to catch canceled balance. 4409 */ 4410 else if (ret == -ECANCELED || ret == -EINTR) 4411 btrfs_info(fs_info, "balance: canceled"); 4412 else 4413 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4414 4415 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4416 4417 if (bargs) { 4418 memset(bargs, 0, sizeof(*bargs)); 4419 btrfs_update_ioctl_balance_args(fs_info, bargs); 4420 } 4421 4422 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4423 balance_need_close(fs_info)) { 4424 reset_balance_state(fs_info); 4425 btrfs_exclop_finish(fs_info); 4426 } 4427 4428 wake_up(&fs_info->balance_wait_q); 4429 4430 return ret; 4431 out: 4432 if (bctl->flags & BTRFS_BALANCE_RESUME) 4433 reset_balance_state(fs_info); 4434 else 4435 kfree(bctl); 4436 btrfs_exclop_finish(fs_info); 4437 4438 return ret; 4439 } 4440 4441 static int balance_kthread(void *data) 4442 { 4443 struct btrfs_fs_info *fs_info = data; 4444 int ret = 0; 4445 4446 mutex_lock(&fs_info->balance_mutex); 4447 if (fs_info->balance_ctl) 4448 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4449 mutex_unlock(&fs_info->balance_mutex); 4450 4451 return ret; 4452 } 4453 4454 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4455 { 4456 struct task_struct *tsk; 4457 4458 mutex_lock(&fs_info->balance_mutex); 4459 if (!fs_info->balance_ctl) { 4460 mutex_unlock(&fs_info->balance_mutex); 4461 return 0; 4462 } 4463 mutex_unlock(&fs_info->balance_mutex); 4464 4465 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4466 btrfs_info(fs_info, "balance: resume skipped"); 4467 return 0; 4468 } 4469 4470 /* 4471 * A ro->rw remount sequence should continue with the paused balance 4472 * regardless of who pauses it, system or the user as of now, so set 4473 * the resume flag. 4474 */ 4475 spin_lock(&fs_info->balance_lock); 4476 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4477 spin_unlock(&fs_info->balance_lock); 4478 4479 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4480 return PTR_ERR_OR_ZERO(tsk); 4481 } 4482 4483 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4484 { 4485 struct btrfs_balance_control *bctl; 4486 struct btrfs_balance_item *item; 4487 struct btrfs_disk_balance_args disk_bargs; 4488 struct btrfs_path *path; 4489 struct extent_buffer *leaf; 4490 struct btrfs_key key; 4491 int ret; 4492 4493 path = btrfs_alloc_path(); 4494 if (!path) 4495 return -ENOMEM; 4496 4497 key.objectid = BTRFS_BALANCE_OBJECTID; 4498 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4499 key.offset = 0; 4500 4501 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4502 if (ret < 0) 4503 goto out; 4504 if (ret > 0) { /* ret = -ENOENT; */ 4505 ret = 0; 4506 goto out; 4507 } 4508 4509 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4510 if (!bctl) { 4511 ret = -ENOMEM; 4512 goto out; 4513 } 4514 4515 leaf = path->nodes[0]; 4516 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4517 4518 bctl->flags = btrfs_balance_flags(leaf, item); 4519 bctl->flags |= BTRFS_BALANCE_RESUME; 4520 4521 btrfs_balance_data(leaf, item, &disk_bargs); 4522 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4523 btrfs_balance_meta(leaf, item, &disk_bargs); 4524 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4525 btrfs_balance_sys(leaf, item, &disk_bargs); 4526 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4527 4528 /* 4529 * This should never happen, as the paused balance state is recovered 4530 * during mount without any chance of other exclusive ops to collide. 4531 * 4532 * This gives the exclusive op status to balance and keeps in paused 4533 * state until user intervention (cancel or umount). If the ownership 4534 * cannot be assigned, show a message but do not fail. The balance 4535 * is in a paused state and must have fs_info::balance_ctl properly 4536 * set up. 4537 */ 4538 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 4539 btrfs_warn(fs_info, 4540 "balance: cannot set exclusive op status, resume manually"); 4541 4542 btrfs_release_path(path); 4543 4544 mutex_lock(&fs_info->balance_mutex); 4545 BUG_ON(fs_info->balance_ctl); 4546 spin_lock(&fs_info->balance_lock); 4547 fs_info->balance_ctl = bctl; 4548 spin_unlock(&fs_info->balance_lock); 4549 mutex_unlock(&fs_info->balance_mutex); 4550 out: 4551 btrfs_free_path(path); 4552 return ret; 4553 } 4554 4555 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4556 { 4557 int ret = 0; 4558 4559 mutex_lock(&fs_info->balance_mutex); 4560 if (!fs_info->balance_ctl) { 4561 mutex_unlock(&fs_info->balance_mutex); 4562 return -ENOTCONN; 4563 } 4564 4565 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4566 atomic_inc(&fs_info->balance_pause_req); 4567 mutex_unlock(&fs_info->balance_mutex); 4568 4569 wait_event(fs_info->balance_wait_q, 4570 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4571 4572 mutex_lock(&fs_info->balance_mutex); 4573 /* we are good with balance_ctl ripped off from under us */ 4574 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4575 atomic_dec(&fs_info->balance_pause_req); 4576 } else { 4577 ret = -ENOTCONN; 4578 } 4579 4580 mutex_unlock(&fs_info->balance_mutex); 4581 return ret; 4582 } 4583 4584 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4585 { 4586 mutex_lock(&fs_info->balance_mutex); 4587 if (!fs_info->balance_ctl) { 4588 mutex_unlock(&fs_info->balance_mutex); 4589 return -ENOTCONN; 4590 } 4591 4592 /* 4593 * A paused balance with the item stored on disk can be resumed at 4594 * mount time if the mount is read-write. Otherwise it's still paused 4595 * and we must not allow cancelling as it deletes the item. 4596 */ 4597 if (sb_rdonly(fs_info->sb)) { 4598 mutex_unlock(&fs_info->balance_mutex); 4599 return -EROFS; 4600 } 4601 4602 atomic_inc(&fs_info->balance_cancel_req); 4603 /* 4604 * if we are running just wait and return, balance item is 4605 * deleted in btrfs_balance in this case 4606 */ 4607 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4608 mutex_unlock(&fs_info->balance_mutex); 4609 wait_event(fs_info->balance_wait_q, 4610 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4611 mutex_lock(&fs_info->balance_mutex); 4612 } else { 4613 mutex_unlock(&fs_info->balance_mutex); 4614 /* 4615 * Lock released to allow other waiters to continue, we'll 4616 * reexamine the status again. 4617 */ 4618 mutex_lock(&fs_info->balance_mutex); 4619 4620 if (fs_info->balance_ctl) { 4621 reset_balance_state(fs_info); 4622 btrfs_exclop_finish(fs_info); 4623 btrfs_info(fs_info, "balance: canceled"); 4624 } 4625 } 4626 4627 BUG_ON(fs_info->balance_ctl || 4628 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4629 atomic_dec(&fs_info->balance_cancel_req); 4630 mutex_unlock(&fs_info->balance_mutex); 4631 return 0; 4632 } 4633 4634 int btrfs_uuid_scan_kthread(void *data) 4635 { 4636 struct btrfs_fs_info *fs_info = data; 4637 struct btrfs_root *root = fs_info->tree_root; 4638 struct btrfs_key key; 4639 struct btrfs_path *path = NULL; 4640 int ret = 0; 4641 struct extent_buffer *eb; 4642 int slot; 4643 struct btrfs_root_item root_item; 4644 u32 item_size; 4645 struct btrfs_trans_handle *trans = NULL; 4646 bool closing = false; 4647 4648 path = btrfs_alloc_path(); 4649 if (!path) { 4650 ret = -ENOMEM; 4651 goto out; 4652 } 4653 4654 key.objectid = 0; 4655 key.type = BTRFS_ROOT_ITEM_KEY; 4656 key.offset = 0; 4657 4658 while (1) { 4659 if (btrfs_fs_closing(fs_info)) { 4660 closing = true; 4661 break; 4662 } 4663 ret = btrfs_search_forward(root, &key, path, 4664 BTRFS_OLDEST_GENERATION); 4665 if (ret) { 4666 if (ret > 0) 4667 ret = 0; 4668 break; 4669 } 4670 4671 if (key.type != BTRFS_ROOT_ITEM_KEY || 4672 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4673 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4674 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4675 goto skip; 4676 4677 eb = path->nodes[0]; 4678 slot = path->slots[0]; 4679 item_size = btrfs_item_size(eb, slot); 4680 if (item_size < sizeof(root_item)) 4681 goto skip; 4682 4683 read_extent_buffer(eb, &root_item, 4684 btrfs_item_ptr_offset(eb, slot), 4685 (int)sizeof(root_item)); 4686 if (btrfs_root_refs(&root_item) == 0) 4687 goto skip; 4688 4689 if (!btrfs_is_empty_uuid(root_item.uuid) || 4690 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4691 if (trans) 4692 goto update_tree; 4693 4694 btrfs_release_path(path); 4695 /* 4696 * 1 - subvol uuid item 4697 * 1 - received_subvol uuid item 4698 */ 4699 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4700 if (IS_ERR(trans)) { 4701 ret = PTR_ERR(trans); 4702 break; 4703 } 4704 continue; 4705 } else { 4706 goto skip; 4707 } 4708 update_tree: 4709 btrfs_release_path(path); 4710 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4711 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4712 BTRFS_UUID_KEY_SUBVOL, 4713 key.objectid); 4714 if (ret < 0) { 4715 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4716 ret); 4717 break; 4718 } 4719 } 4720 4721 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4722 ret = btrfs_uuid_tree_add(trans, 4723 root_item.received_uuid, 4724 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4725 key.objectid); 4726 if (ret < 0) { 4727 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4728 ret); 4729 break; 4730 } 4731 } 4732 4733 skip: 4734 btrfs_release_path(path); 4735 if (trans) { 4736 ret = btrfs_end_transaction(trans); 4737 trans = NULL; 4738 if (ret) 4739 break; 4740 } 4741 4742 if (key.offset < (u64)-1) { 4743 key.offset++; 4744 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4745 key.offset = 0; 4746 key.type = BTRFS_ROOT_ITEM_KEY; 4747 } else if (key.objectid < (u64)-1) { 4748 key.offset = 0; 4749 key.type = BTRFS_ROOT_ITEM_KEY; 4750 key.objectid++; 4751 } else { 4752 break; 4753 } 4754 cond_resched(); 4755 } 4756 4757 out: 4758 btrfs_free_path(path); 4759 if (trans && !IS_ERR(trans)) 4760 btrfs_end_transaction(trans); 4761 if (ret) 4762 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4763 else if (!closing) 4764 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4765 up(&fs_info->uuid_tree_rescan_sem); 4766 return 0; 4767 } 4768 4769 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4770 { 4771 struct btrfs_trans_handle *trans; 4772 struct btrfs_root *tree_root = fs_info->tree_root; 4773 struct btrfs_root *uuid_root; 4774 struct task_struct *task; 4775 int ret; 4776 4777 /* 4778 * 1 - root node 4779 * 1 - root item 4780 */ 4781 trans = btrfs_start_transaction(tree_root, 2); 4782 if (IS_ERR(trans)) 4783 return PTR_ERR(trans); 4784 4785 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4786 if (IS_ERR(uuid_root)) { 4787 ret = PTR_ERR(uuid_root); 4788 btrfs_abort_transaction(trans, ret); 4789 btrfs_end_transaction(trans); 4790 return ret; 4791 } 4792 4793 fs_info->uuid_root = uuid_root; 4794 4795 ret = btrfs_commit_transaction(trans); 4796 if (ret) 4797 return ret; 4798 4799 down(&fs_info->uuid_tree_rescan_sem); 4800 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4801 if (IS_ERR(task)) { 4802 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4803 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4804 up(&fs_info->uuid_tree_rescan_sem); 4805 return PTR_ERR(task); 4806 } 4807 4808 return 0; 4809 } 4810 4811 /* 4812 * shrinking a device means finding all of the device extents past 4813 * the new size, and then following the back refs to the chunks. 4814 * The chunk relocation code actually frees the device extent 4815 */ 4816 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4817 { 4818 struct btrfs_fs_info *fs_info = device->fs_info; 4819 struct btrfs_root *root = fs_info->dev_root; 4820 struct btrfs_trans_handle *trans; 4821 struct btrfs_dev_extent *dev_extent = NULL; 4822 struct btrfs_path *path; 4823 u64 length; 4824 u64 chunk_offset; 4825 int ret; 4826 int slot; 4827 int failed = 0; 4828 bool retried = false; 4829 struct extent_buffer *l; 4830 struct btrfs_key key; 4831 struct btrfs_super_block *super_copy = fs_info->super_copy; 4832 u64 old_total = btrfs_super_total_bytes(super_copy); 4833 u64 old_size = btrfs_device_get_total_bytes(device); 4834 u64 diff; 4835 u64 start; 4836 4837 new_size = round_down(new_size, fs_info->sectorsize); 4838 start = new_size; 4839 diff = round_down(old_size - new_size, fs_info->sectorsize); 4840 4841 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4842 return -EINVAL; 4843 4844 path = btrfs_alloc_path(); 4845 if (!path) 4846 return -ENOMEM; 4847 4848 path->reada = READA_BACK; 4849 4850 trans = btrfs_start_transaction(root, 0); 4851 if (IS_ERR(trans)) { 4852 btrfs_free_path(path); 4853 return PTR_ERR(trans); 4854 } 4855 4856 mutex_lock(&fs_info->chunk_mutex); 4857 4858 btrfs_device_set_total_bytes(device, new_size); 4859 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4860 device->fs_devices->total_rw_bytes -= diff; 4861 atomic64_sub(diff, &fs_info->free_chunk_space); 4862 } 4863 4864 /* 4865 * Once the device's size has been set to the new size, ensure all 4866 * in-memory chunks are synced to disk so that the loop below sees them 4867 * and relocates them accordingly. 4868 */ 4869 if (contains_pending_extent(device, &start, diff)) { 4870 mutex_unlock(&fs_info->chunk_mutex); 4871 ret = btrfs_commit_transaction(trans); 4872 if (ret) 4873 goto done; 4874 } else { 4875 mutex_unlock(&fs_info->chunk_mutex); 4876 btrfs_end_transaction(trans); 4877 } 4878 4879 again: 4880 key.objectid = device->devid; 4881 key.offset = (u64)-1; 4882 key.type = BTRFS_DEV_EXTENT_KEY; 4883 4884 do { 4885 mutex_lock(&fs_info->reclaim_bgs_lock); 4886 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4887 if (ret < 0) { 4888 mutex_unlock(&fs_info->reclaim_bgs_lock); 4889 goto done; 4890 } 4891 4892 ret = btrfs_previous_item(root, path, 0, key.type); 4893 if (ret) { 4894 mutex_unlock(&fs_info->reclaim_bgs_lock); 4895 if (ret < 0) 4896 goto done; 4897 ret = 0; 4898 btrfs_release_path(path); 4899 break; 4900 } 4901 4902 l = path->nodes[0]; 4903 slot = path->slots[0]; 4904 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4905 4906 if (key.objectid != device->devid) { 4907 mutex_unlock(&fs_info->reclaim_bgs_lock); 4908 btrfs_release_path(path); 4909 break; 4910 } 4911 4912 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4913 length = btrfs_dev_extent_length(l, dev_extent); 4914 4915 if (key.offset + length <= new_size) { 4916 mutex_unlock(&fs_info->reclaim_bgs_lock); 4917 btrfs_release_path(path); 4918 break; 4919 } 4920 4921 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4922 btrfs_release_path(path); 4923 4924 /* 4925 * We may be relocating the only data chunk we have, 4926 * which could potentially end up with losing data's 4927 * raid profile, so lets allocate an empty one in 4928 * advance. 4929 */ 4930 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4931 if (ret < 0) { 4932 mutex_unlock(&fs_info->reclaim_bgs_lock); 4933 goto done; 4934 } 4935 4936 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4937 mutex_unlock(&fs_info->reclaim_bgs_lock); 4938 if (ret == -ENOSPC) { 4939 failed++; 4940 } else if (ret) { 4941 if (ret == -ETXTBSY) { 4942 btrfs_warn(fs_info, 4943 "could not shrink block group %llu due to active swapfile", 4944 chunk_offset); 4945 } 4946 goto done; 4947 } 4948 } while (key.offset-- > 0); 4949 4950 if (failed && !retried) { 4951 failed = 0; 4952 retried = true; 4953 goto again; 4954 } else if (failed && retried) { 4955 ret = -ENOSPC; 4956 goto done; 4957 } 4958 4959 /* Shrinking succeeded, else we would be at "done". */ 4960 trans = btrfs_start_transaction(root, 0); 4961 if (IS_ERR(trans)) { 4962 ret = PTR_ERR(trans); 4963 goto done; 4964 } 4965 4966 mutex_lock(&fs_info->chunk_mutex); 4967 /* Clear all state bits beyond the shrunk device size */ 4968 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4969 CHUNK_STATE_MASK); 4970 4971 btrfs_device_set_disk_total_bytes(device, new_size); 4972 if (list_empty(&device->post_commit_list)) 4973 list_add_tail(&device->post_commit_list, 4974 &trans->transaction->dev_update_list); 4975 4976 WARN_ON(diff > old_total); 4977 btrfs_set_super_total_bytes(super_copy, 4978 round_down(old_total - diff, fs_info->sectorsize)); 4979 mutex_unlock(&fs_info->chunk_mutex); 4980 4981 btrfs_reserve_chunk_metadata(trans, false); 4982 /* Now btrfs_update_device() will change the on-disk size. */ 4983 ret = btrfs_update_device(trans, device); 4984 btrfs_trans_release_chunk_metadata(trans); 4985 if (ret < 0) { 4986 btrfs_abort_transaction(trans, ret); 4987 btrfs_end_transaction(trans); 4988 } else { 4989 ret = btrfs_commit_transaction(trans); 4990 } 4991 done: 4992 btrfs_free_path(path); 4993 if (ret) { 4994 mutex_lock(&fs_info->chunk_mutex); 4995 btrfs_device_set_total_bytes(device, old_size); 4996 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4997 device->fs_devices->total_rw_bytes += diff; 4998 atomic64_add(diff, &fs_info->free_chunk_space); 4999 mutex_unlock(&fs_info->chunk_mutex); 5000 } 5001 return ret; 5002 } 5003 5004 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5005 struct btrfs_key *key, 5006 struct btrfs_chunk *chunk, int item_size) 5007 { 5008 struct btrfs_super_block *super_copy = fs_info->super_copy; 5009 struct btrfs_disk_key disk_key; 5010 u32 array_size; 5011 u8 *ptr; 5012 5013 lockdep_assert_held(&fs_info->chunk_mutex); 5014 5015 array_size = btrfs_super_sys_array_size(super_copy); 5016 if (array_size + item_size + sizeof(disk_key) 5017 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5018 return -EFBIG; 5019 5020 ptr = super_copy->sys_chunk_array + array_size; 5021 btrfs_cpu_key_to_disk(&disk_key, key); 5022 memcpy(ptr, &disk_key, sizeof(disk_key)); 5023 ptr += sizeof(disk_key); 5024 memcpy(ptr, chunk, item_size); 5025 item_size += sizeof(disk_key); 5026 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5027 5028 return 0; 5029 } 5030 5031 /* 5032 * sort the devices in descending order by max_avail, total_avail 5033 */ 5034 static int btrfs_cmp_device_info(const void *a, const void *b) 5035 { 5036 const struct btrfs_device_info *di_a = a; 5037 const struct btrfs_device_info *di_b = b; 5038 5039 if (di_a->max_avail > di_b->max_avail) 5040 return -1; 5041 if (di_a->max_avail < di_b->max_avail) 5042 return 1; 5043 if (di_a->total_avail > di_b->total_avail) 5044 return -1; 5045 if (di_a->total_avail < di_b->total_avail) 5046 return 1; 5047 return 0; 5048 } 5049 5050 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5051 { 5052 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5053 return; 5054 5055 btrfs_set_fs_incompat(info, RAID56); 5056 } 5057 5058 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5059 { 5060 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5061 return; 5062 5063 btrfs_set_fs_incompat(info, RAID1C34); 5064 } 5065 5066 /* 5067 * Structure used internally for btrfs_create_chunk() function. 5068 * Wraps needed parameters. 5069 */ 5070 struct alloc_chunk_ctl { 5071 u64 start; 5072 u64 type; 5073 /* Total number of stripes to allocate */ 5074 int num_stripes; 5075 /* sub_stripes info for map */ 5076 int sub_stripes; 5077 /* Stripes per device */ 5078 int dev_stripes; 5079 /* Maximum number of devices to use */ 5080 int devs_max; 5081 /* Minimum number of devices to use */ 5082 int devs_min; 5083 /* ndevs has to be a multiple of this */ 5084 int devs_increment; 5085 /* Number of copies */ 5086 int ncopies; 5087 /* Number of stripes worth of bytes to store parity information */ 5088 int nparity; 5089 u64 max_stripe_size; 5090 u64 max_chunk_size; 5091 u64 dev_extent_min; 5092 u64 stripe_size; 5093 u64 chunk_size; 5094 int ndevs; 5095 }; 5096 5097 static void init_alloc_chunk_ctl_policy_regular( 5098 struct btrfs_fs_devices *fs_devices, 5099 struct alloc_chunk_ctl *ctl) 5100 { 5101 u64 type = ctl->type; 5102 5103 if (type & BTRFS_BLOCK_GROUP_DATA) { 5104 ctl->max_stripe_size = SZ_1G; 5105 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5106 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5107 /* For larger filesystems, use larger metadata chunks */ 5108 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5109 ctl->max_stripe_size = SZ_1G; 5110 else 5111 ctl->max_stripe_size = SZ_256M; 5112 ctl->max_chunk_size = ctl->max_stripe_size; 5113 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5114 ctl->max_stripe_size = SZ_32M; 5115 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5116 ctl->devs_max = min_t(int, ctl->devs_max, 5117 BTRFS_MAX_DEVS_SYS_CHUNK); 5118 } else { 5119 BUG(); 5120 } 5121 5122 /* We don't want a chunk larger than 10% of writable space */ 5123 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5124 ctl->max_chunk_size); 5125 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5126 } 5127 5128 static void init_alloc_chunk_ctl_policy_zoned( 5129 struct btrfs_fs_devices *fs_devices, 5130 struct alloc_chunk_ctl *ctl) 5131 { 5132 u64 zone_size = fs_devices->fs_info->zone_size; 5133 u64 limit; 5134 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5135 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5136 u64 min_chunk_size = min_data_stripes * zone_size; 5137 u64 type = ctl->type; 5138 5139 ctl->max_stripe_size = zone_size; 5140 if (type & BTRFS_BLOCK_GROUP_DATA) { 5141 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5142 zone_size); 5143 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5144 ctl->max_chunk_size = ctl->max_stripe_size; 5145 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5146 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5147 ctl->devs_max = min_t(int, ctl->devs_max, 5148 BTRFS_MAX_DEVS_SYS_CHUNK); 5149 } else { 5150 BUG(); 5151 } 5152 5153 /* We don't want a chunk larger than 10% of writable space */ 5154 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5155 zone_size), 5156 min_chunk_size); 5157 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5158 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5159 } 5160 5161 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5162 struct alloc_chunk_ctl *ctl) 5163 { 5164 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5165 5166 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5167 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5168 ctl->devs_max = btrfs_raid_array[index].devs_max; 5169 if (!ctl->devs_max) 5170 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5171 ctl->devs_min = btrfs_raid_array[index].devs_min; 5172 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5173 ctl->ncopies = btrfs_raid_array[index].ncopies; 5174 ctl->nparity = btrfs_raid_array[index].nparity; 5175 ctl->ndevs = 0; 5176 5177 switch (fs_devices->chunk_alloc_policy) { 5178 case BTRFS_CHUNK_ALLOC_REGULAR: 5179 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5180 break; 5181 case BTRFS_CHUNK_ALLOC_ZONED: 5182 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5183 break; 5184 default: 5185 BUG(); 5186 } 5187 } 5188 5189 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5190 struct alloc_chunk_ctl *ctl, 5191 struct btrfs_device_info *devices_info) 5192 { 5193 struct btrfs_fs_info *info = fs_devices->fs_info; 5194 struct btrfs_device *device; 5195 u64 total_avail; 5196 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5197 int ret; 5198 int ndevs = 0; 5199 u64 max_avail; 5200 u64 dev_offset; 5201 5202 /* 5203 * in the first pass through the devices list, we gather information 5204 * about the available holes on each device. 5205 */ 5206 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5207 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5208 WARN(1, KERN_ERR 5209 "BTRFS: read-only device in alloc_list\n"); 5210 continue; 5211 } 5212 5213 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5214 &device->dev_state) || 5215 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5216 continue; 5217 5218 if (device->total_bytes > device->bytes_used) 5219 total_avail = device->total_bytes - device->bytes_used; 5220 else 5221 total_avail = 0; 5222 5223 /* If there is no space on this device, skip it. */ 5224 if (total_avail < ctl->dev_extent_min) 5225 continue; 5226 5227 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5228 &max_avail); 5229 if (ret && ret != -ENOSPC) 5230 return ret; 5231 5232 if (ret == 0) 5233 max_avail = dev_extent_want; 5234 5235 if (max_avail < ctl->dev_extent_min) { 5236 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5237 btrfs_debug(info, 5238 "%s: devid %llu has no free space, have=%llu want=%llu", 5239 __func__, device->devid, max_avail, 5240 ctl->dev_extent_min); 5241 continue; 5242 } 5243 5244 if (ndevs == fs_devices->rw_devices) { 5245 WARN(1, "%s: found more than %llu devices\n", 5246 __func__, fs_devices->rw_devices); 5247 break; 5248 } 5249 devices_info[ndevs].dev_offset = dev_offset; 5250 devices_info[ndevs].max_avail = max_avail; 5251 devices_info[ndevs].total_avail = total_avail; 5252 devices_info[ndevs].dev = device; 5253 ++ndevs; 5254 } 5255 ctl->ndevs = ndevs; 5256 5257 /* 5258 * now sort the devices by hole size / available space 5259 */ 5260 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5261 btrfs_cmp_device_info, NULL); 5262 5263 return 0; 5264 } 5265 5266 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5267 struct btrfs_device_info *devices_info) 5268 { 5269 /* Number of stripes that count for block group size */ 5270 int data_stripes; 5271 5272 /* 5273 * The primary goal is to maximize the number of stripes, so use as 5274 * many devices as possible, even if the stripes are not maximum sized. 5275 * 5276 * The DUP profile stores more than one stripe per device, the 5277 * max_avail is the total size so we have to adjust. 5278 */ 5279 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5280 ctl->dev_stripes); 5281 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5282 5283 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5284 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5285 5286 /* 5287 * Use the number of data stripes to figure out how big this chunk is 5288 * really going to be in terms of logical address space, and compare 5289 * that answer with the max chunk size. If it's higher, we try to 5290 * reduce stripe_size. 5291 */ 5292 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5293 /* 5294 * Reduce stripe_size, round it up to a 16MB boundary again and 5295 * then use it, unless it ends up being even bigger than the 5296 * previous value we had already. 5297 */ 5298 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5299 data_stripes), SZ_16M), 5300 ctl->stripe_size); 5301 } 5302 5303 /* Align to BTRFS_STRIPE_LEN */ 5304 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5305 ctl->chunk_size = ctl->stripe_size * data_stripes; 5306 5307 return 0; 5308 } 5309 5310 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5311 struct btrfs_device_info *devices_info) 5312 { 5313 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5314 /* Number of stripes that count for block group size */ 5315 int data_stripes; 5316 5317 /* 5318 * It should hold because: 5319 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5320 */ 5321 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5322 5323 ctl->stripe_size = zone_size; 5324 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5325 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5326 5327 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5328 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5329 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5330 ctl->stripe_size) + ctl->nparity, 5331 ctl->dev_stripes); 5332 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5333 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5334 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5335 } 5336 5337 ctl->chunk_size = ctl->stripe_size * data_stripes; 5338 5339 return 0; 5340 } 5341 5342 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5343 struct alloc_chunk_ctl *ctl, 5344 struct btrfs_device_info *devices_info) 5345 { 5346 struct btrfs_fs_info *info = fs_devices->fs_info; 5347 5348 /* 5349 * Round down to number of usable stripes, devs_increment can be any 5350 * number so we can't use round_down() that requires power of 2, while 5351 * rounddown is safe. 5352 */ 5353 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5354 5355 if (ctl->ndevs < ctl->devs_min) { 5356 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5357 btrfs_debug(info, 5358 "%s: not enough devices with free space: have=%d minimum required=%d", 5359 __func__, ctl->ndevs, ctl->devs_min); 5360 } 5361 return -ENOSPC; 5362 } 5363 5364 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5365 5366 switch (fs_devices->chunk_alloc_policy) { 5367 case BTRFS_CHUNK_ALLOC_REGULAR: 5368 return decide_stripe_size_regular(ctl, devices_info); 5369 case BTRFS_CHUNK_ALLOC_ZONED: 5370 return decide_stripe_size_zoned(ctl, devices_info); 5371 default: 5372 BUG(); 5373 } 5374 } 5375 5376 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5377 struct alloc_chunk_ctl *ctl, 5378 struct btrfs_device_info *devices_info) 5379 { 5380 struct btrfs_fs_info *info = trans->fs_info; 5381 struct map_lookup *map = NULL; 5382 struct extent_map_tree *em_tree; 5383 struct btrfs_block_group *block_group; 5384 struct extent_map *em; 5385 u64 start = ctl->start; 5386 u64 type = ctl->type; 5387 int ret; 5388 int i; 5389 int j; 5390 5391 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5392 if (!map) 5393 return ERR_PTR(-ENOMEM); 5394 map->num_stripes = ctl->num_stripes; 5395 5396 for (i = 0; i < ctl->ndevs; ++i) { 5397 for (j = 0; j < ctl->dev_stripes; ++j) { 5398 int s = i * ctl->dev_stripes + j; 5399 map->stripes[s].dev = devices_info[i].dev; 5400 map->stripes[s].physical = devices_info[i].dev_offset + 5401 j * ctl->stripe_size; 5402 } 5403 } 5404 map->stripe_len = BTRFS_STRIPE_LEN; 5405 map->io_align = BTRFS_STRIPE_LEN; 5406 map->io_width = BTRFS_STRIPE_LEN; 5407 map->type = type; 5408 map->sub_stripes = ctl->sub_stripes; 5409 5410 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5411 5412 em = alloc_extent_map(); 5413 if (!em) { 5414 kfree(map); 5415 return ERR_PTR(-ENOMEM); 5416 } 5417 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5418 em->map_lookup = map; 5419 em->start = start; 5420 em->len = ctl->chunk_size; 5421 em->block_start = 0; 5422 em->block_len = em->len; 5423 em->orig_block_len = ctl->stripe_size; 5424 5425 em_tree = &info->mapping_tree; 5426 write_lock(&em_tree->lock); 5427 ret = add_extent_mapping(em_tree, em, 0); 5428 if (ret) { 5429 write_unlock(&em_tree->lock); 5430 free_extent_map(em); 5431 return ERR_PTR(ret); 5432 } 5433 write_unlock(&em_tree->lock); 5434 5435 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5436 if (IS_ERR(block_group)) 5437 goto error_del_extent; 5438 5439 for (i = 0; i < map->num_stripes; i++) { 5440 struct btrfs_device *dev = map->stripes[i].dev; 5441 5442 btrfs_device_set_bytes_used(dev, 5443 dev->bytes_used + ctl->stripe_size); 5444 if (list_empty(&dev->post_commit_list)) 5445 list_add_tail(&dev->post_commit_list, 5446 &trans->transaction->dev_update_list); 5447 } 5448 5449 atomic64_sub(ctl->stripe_size * map->num_stripes, 5450 &info->free_chunk_space); 5451 5452 free_extent_map(em); 5453 check_raid56_incompat_flag(info, type); 5454 check_raid1c34_incompat_flag(info, type); 5455 5456 return block_group; 5457 5458 error_del_extent: 5459 write_lock(&em_tree->lock); 5460 remove_extent_mapping(em_tree, em); 5461 write_unlock(&em_tree->lock); 5462 5463 /* One for our allocation */ 5464 free_extent_map(em); 5465 /* One for the tree reference */ 5466 free_extent_map(em); 5467 5468 return block_group; 5469 } 5470 5471 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5472 u64 type) 5473 { 5474 struct btrfs_fs_info *info = trans->fs_info; 5475 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5476 struct btrfs_device_info *devices_info = NULL; 5477 struct alloc_chunk_ctl ctl; 5478 struct btrfs_block_group *block_group; 5479 int ret; 5480 5481 lockdep_assert_held(&info->chunk_mutex); 5482 5483 if (!alloc_profile_is_valid(type, 0)) { 5484 ASSERT(0); 5485 return ERR_PTR(-EINVAL); 5486 } 5487 5488 if (list_empty(&fs_devices->alloc_list)) { 5489 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5490 btrfs_debug(info, "%s: no writable device", __func__); 5491 return ERR_PTR(-ENOSPC); 5492 } 5493 5494 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5495 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5496 ASSERT(0); 5497 return ERR_PTR(-EINVAL); 5498 } 5499 5500 ctl.start = find_next_chunk(info); 5501 ctl.type = type; 5502 init_alloc_chunk_ctl(fs_devices, &ctl); 5503 5504 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5505 GFP_NOFS); 5506 if (!devices_info) 5507 return ERR_PTR(-ENOMEM); 5508 5509 ret = gather_device_info(fs_devices, &ctl, devices_info); 5510 if (ret < 0) { 5511 block_group = ERR_PTR(ret); 5512 goto out; 5513 } 5514 5515 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5516 if (ret < 0) { 5517 block_group = ERR_PTR(ret); 5518 goto out; 5519 } 5520 5521 block_group = create_chunk(trans, &ctl, devices_info); 5522 5523 out: 5524 kfree(devices_info); 5525 return block_group; 5526 } 5527 5528 /* 5529 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5530 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5531 * chunks. 5532 * 5533 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5534 * phases. 5535 */ 5536 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5537 struct btrfs_block_group *bg) 5538 { 5539 struct btrfs_fs_info *fs_info = trans->fs_info; 5540 struct btrfs_root *extent_root = fs_info->extent_root; 5541 struct btrfs_root *chunk_root = fs_info->chunk_root; 5542 struct btrfs_key key; 5543 struct btrfs_chunk *chunk; 5544 struct btrfs_stripe *stripe; 5545 struct extent_map *em; 5546 struct map_lookup *map; 5547 size_t item_size; 5548 int i; 5549 int ret; 5550 5551 /* 5552 * We take the chunk_mutex for 2 reasons: 5553 * 5554 * 1) Updates and insertions in the chunk btree must be done while holding 5555 * the chunk_mutex, as well as updating the system chunk array in the 5556 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5557 * details; 5558 * 5559 * 2) To prevent races with the final phase of a device replace operation 5560 * that replaces the device object associated with the map's stripes, 5561 * because the device object's id can change at any time during that 5562 * final phase of the device replace operation 5563 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5564 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5565 * which would cause a failure when updating the device item, which does 5566 * not exists, or persisting a stripe of the chunk item with such ID. 5567 * Here we can't use the device_list_mutex because our caller already 5568 * has locked the chunk_mutex, and the final phase of device replace 5569 * acquires both mutexes - first the device_list_mutex and then the 5570 * chunk_mutex. Using any of those two mutexes protects us from a 5571 * concurrent device replace. 5572 */ 5573 lockdep_assert_held(&fs_info->chunk_mutex); 5574 5575 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5576 if (IS_ERR(em)) { 5577 ret = PTR_ERR(em); 5578 btrfs_abort_transaction(trans, ret); 5579 return ret; 5580 } 5581 5582 map = em->map_lookup; 5583 item_size = btrfs_chunk_item_size(map->num_stripes); 5584 5585 chunk = kzalloc(item_size, GFP_NOFS); 5586 if (!chunk) { 5587 ret = -ENOMEM; 5588 btrfs_abort_transaction(trans, ret); 5589 goto out; 5590 } 5591 5592 for (i = 0; i < map->num_stripes; i++) { 5593 struct btrfs_device *device = map->stripes[i].dev; 5594 5595 ret = btrfs_update_device(trans, device); 5596 if (ret) 5597 goto out; 5598 } 5599 5600 stripe = &chunk->stripe; 5601 for (i = 0; i < map->num_stripes; i++) { 5602 struct btrfs_device *device = map->stripes[i].dev; 5603 const u64 dev_offset = map->stripes[i].physical; 5604 5605 btrfs_set_stack_stripe_devid(stripe, device->devid); 5606 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5607 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5608 stripe++; 5609 } 5610 5611 btrfs_set_stack_chunk_length(chunk, bg->length); 5612 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5613 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5614 btrfs_set_stack_chunk_type(chunk, map->type); 5615 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5616 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5617 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5618 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5619 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5620 5621 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5622 key.type = BTRFS_CHUNK_ITEM_KEY; 5623 key.offset = bg->start; 5624 5625 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5626 if (ret) 5627 goto out; 5628 5629 bg->chunk_item_inserted = 1; 5630 5631 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5632 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5633 if (ret) 5634 goto out; 5635 } 5636 5637 out: 5638 kfree(chunk); 5639 free_extent_map(em); 5640 return ret; 5641 } 5642 5643 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5644 { 5645 struct btrfs_fs_info *fs_info = trans->fs_info; 5646 u64 alloc_profile; 5647 struct btrfs_block_group *meta_bg; 5648 struct btrfs_block_group *sys_bg; 5649 5650 /* 5651 * When adding a new device for sprouting, the seed device is read-only 5652 * so we must first allocate a metadata and a system chunk. But before 5653 * adding the block group items to the extent, device and chunk btrees, 5654 * we must first: 5655 * 5656 * 1) Create both chunks without doing any changes to the btrees, as 5657 * otherwise we would get -ENOSPC since the block groups from the 5658 * seed device are read-only; 5659 * 5660 * 2) Add the device item for the new sprout device - finishing the setup 5661 * of a new block group requires updating the device item in the chunk 5662 * btree, so it must exist when we attempt to do it. The previous step 5663 * ensures this does not fail with -ENOSPC. 5664 * 5665 * After that we can add the block group items to their btrees: 5666 * update existing device item in the chunk btree, add a new block group 5667 * item to the extent btree, add a new chunk item to the chunk btree and 5668 * finally add the new device extent items to the devices btree. 5669 */ 5670 5671 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5672 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5673 if (IS_ERR(meta_bg)) 5674 return PTR_ERR(meta_bg); 5675 5676 alloc_profile = btrfs_system_alloc_profile(fs_info); 5677 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5678 if (IS_ERR(sys_bg)) 5679 return PTR_ERR(sys_bg); 5680 5681 return 0; 5682 } 5683 5684 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5685 { 5686 const int index = btrfs_bg_flags_to_raid_index(map->type); 5687 5688 return btrfs_raid_array[index].tolerated_failures; 5689 } 5690 5691 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5692 { 5693 struct extent_map *em; 5694 struct map_lookup *map; 5695 int miss_ndevs = 0; 5696 int i; 5697 bool ret = true; 5698 5699 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5700 if (IS_ERR(em)) 5701 return false; 5702 5703 map = em->map_lookup; 5704 for (i = 0; i < map->num_stripes; i++) { 5705 if (test_bit(BTRFS_DEV_STATE_MISSING, 5706 &map->stripes[i].dev->dev_state)) { 5707 miss_ndevs++; 5708 continue; 5709 } 5710 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5711 &map->stripes[i].dev->dev_state)) { 5712 ret = false; 5713 goto end; 5714 } 5715 } 5716 5717 /* 5718 * If the number of missing devices is larger than max errors, we can 5719 * not write the data into that chunk successfully. 5720 */ 5721 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5722 ret = false; 5723 end: 5724 free_extent_map(em); 5725 return ret; 5726 } 5727 5728 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5729 { 5730 struct extent_map *em; 5731 5732 while (1) { 5733 write_lock(&tree->lock); 5734 em = lookup_extent_mapping(tree, 0, (u64)-1); 5735 if (em) 5736 remove_extent_mapping(tree, em); 5737 write_unlock(&tree->lock); 5738 if (!em) 5739 break; 5740 /* once for us */ 5741 free_extent_map(em); 5742 /* once for the tree */ 5743 free_extent_map(em); 5744 } 5745 } 5746 5747 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5748 { 5749 struct extent_map *em; 5750 struct map_lookup *map; 5751 int ret; 5752 5753 em = btrfs_get_chunk_map(fs_info, logical, len); 5754 if (IS_ERR(em)) 5755 /* 5756 * We could return errors for these cases, but that could get 5757 * ugly and we'd probably do the same thing which is just not do 5758 * anything else and exit, so return 1 so the callers don't try 5759 * to use other copies. 5760 */ 5761 return 1; 5762 5763 map = em->map_lookup; 5764 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5765 ret = map->num_stripes; 5766 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5767 ret = map->sub_stripes; 5768 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5769 ret = 2; 5770 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5771 /* 5772 * There could be two corrupted data stripes, we need 5773 * to loop retry in order to rebuild the correct data. 5774 * 5775 * Fail a stripe at a time on every retry except the 5776 * stripe under reconstruction. 5777 */ 5778 ret = map->num_stripes; 5779 else 5780 ret = 1; 5781 free_extent_map(em); 5782 5783 down_read(&fs_info->dev_replace.rwsem); 5784 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5785 fs_info->dev_replace.tgtdev) 5786 ret++; 5787 up_read(&fs_info->dev_replace.rwsem); 5788 5789 return ret; 5790 } 5791 5792 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5793 u64 logical) 5794 { 5795 struct extent_map *em; 5796 struct map_lookup *map; 5797 unsigned long len = fs_info->sectorsize; 5798 5799 em = btrfs_get_chunk_map(fs_info, logical, len); 5800 5801 if (!WARN_ON(IS_ERR(em))) { 5802 map = em->map_lookup; 5803 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5804 len = map->stripe_len * nr_data_stripes(map); 5805 free_extent_map(em); 5806 } 5807 return len; 5808 } 5809 5810 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5811 { 5812 struct extent_map *em; 5813 struct map_lookup *map; 5814 int ret = 0; 5815 5816 em = btrfs_get_chunk_map(fs_info, logical, len); 5817 5818 if(!WARN_ON(IS_ERR(em))) { 5819 map = em->map_lookup; 5820 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5821 ret = 1; 5822 free_extent_map(em); 5823 } 5824 return ret; 5825 } 5826 5827 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5828 struct map_lookup *map, int first, 5829 int dev_replace_is_ongoing) 5830 { 5831 int i; 5832 int num_stripes; 5833 int preferred_mirror; 5834 int tolerance; 5835 struct btrfs_device *srcdev; 5836 5837 ASSERT((map->type & 5838 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5839 5840 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5841 num_stripes = map->sub_stripes; 5842 else 5843 num_stripes = map->num_stripes; 5844 5845 switch (fs_info->fs_devices->read_policy) { 5846 default: 5847 /* Shouldn't happen, just warn and use pid instead of failing */ 5848 btrfs_warn_rl(fs_info, 5849 "unknown read_policy type %u, reset to pid", 5850 fs_info->fs_devices->read_policy); 5851 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5852 fallthrough; 5853 case BTRFS_READ_POLICY_PID: 5854 preferred_mirror = first + (current->pid % num_stripes); 5855 break; 5856 } 5857 5858 if (dev_replace_is_ongoing && 5859 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5860 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5861 srcdev = fs_info->dev_replace.srcdev; 5862 else 5863 srcdev = NULL; 5864 5865 /* 5866 * try to avoid the drive that is the source drive for a 5867 * dev-replace procedure, only choose it if no other non-missing 5868 * mirror is available 5869 */ 5870 for (tolerance = 0; tolerance < 2; tolerance++) { 5871 if (map->stripes[preferred_mirror].dev->bdev && 5872 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5873 return preferred_mirror; 5874 for (i = first; i < first + num_stripes; i++) { 5875 if (map->stripes[i].dev->bdev && 5876 (tolerance || map->stripes[i].dev != srcdev)) 5877 return i; 5878 } 5879 } 5880 5881 /* we couldn't find one that doesn't fail. Just return something 5882 * and the io error handling code will clean up eventually 5883 */ 5884 return preferred_mirror; 5885 } 5886 5887 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5888 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5889 { 5890 int i; 5891 int again = 1; 5892 5893 while (again) { 5894 again = 0; 5895 for (i = 0; i < num_stripes - 1; i++) { 5896 /* Swap if parity is on a smaller index */ 5897 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5898 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5899 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5900 again = 1; 5901 } 5902 } 5903 } 5904 } 5905 5906 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5907 int total_stripes, 5908 int real_stripes) 5909 { 5910 struct btrfs_io_context *bioc = kzalloc( 5911 /* The size of btrfs_io_context */ 5912 sizeof(struct btrfs_io_context) + 5913 /* Plus the variable array for the stripes */ 5914 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5915 /* Plus the variable array for the tgt dev */ 5916 sizeof(int) * (real_stripes) + 5917 /* 5918 * Plus the raid_map, which includes both the tgt dev 5919 * and the stripes. 5920 */ 5921 sizeof(u64) * (total_stripes), 5922 GFP_NOFS|__GFP_NOFAIL); 5923 5924 atomic_set(&bioc->error, 0); 5925 refcount_set(&bioc->refs, 1); 5926 5927 bioc->fs_info = fs_info; 5928 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5929 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5930 5931 return bioc; 5932 } 5933 5934 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5935 { 5936 WARN_ON(!refcount_read(&bioc->refs)); 5937 refcount_inc(&bioc->refs); 5938 } 5939 5940 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5941 { 5942 if (!bioc) 5943 return; 5944 if (refcount_dec_and_test(&bioc->refs)) 5945 kfree(bioc); 5946 } 5947 5948 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5949 /* 5950 * Please note that, discard won't be sent to target device of device 5951 * replace. 5952 */ 5953 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5954 u64 logical, u64 *length_ret, 5955 struct btrfs_io_context **bioc_ret) 5956 { 5957 struct extent_map *em; 5958 struct map_lookup *map; 5959 struct btrfs_io_context *bioc; 5960 u64 length = *length_ret; 5961 u64 offset; 5962 u64 stripe_nr; 5963 u64 stripe_nr_end; 5964 u64 stripe_end_offset; 5965 u64 stripe_cnt; 5966 u64 stripe_len; 5967 u64 stripe_offset; 5968 u64 num_stripes; 5969 u32 stripe_index; 5970 u32 factor = 0; 5971 u32 sub_stripes = 0; 5972 u64 stripes_per_dev = 0; 5973 u32 remaining_stripes = 0; 5974 u32 last_stripe = 0; 5975 int ret = 0; 5976 int i; 5977 5978 /* Discard always returns a bioc. */ 5979 ASSERT(bioc_ret); 5980 5981 em = btrfs_get_chunk_map(fs_info, logical, length); 5982 if (IS_ERR(em)) 5983 return PTR_ERR(em); 5984 5985 map = em->map_lookup; 5986 /* we don't discard raid56 yet */ 5987 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5988 ret = -EOPNOTSUPP; 5989 goto out; 5990 } 5991 5992 offset = logical - em->start; 5993 length = min_t(u64, em->start + em->len - logical, length); 5994 *length_ret = length; 5995 5996 stripe_len = map->stripe_len; 5997 /* 5998 * stripe_nr counts the total number of stripes we have to stride 5999 * to get to this block 6000 */ 6001 stripe_nr = div64_u64(offset, stripe_len); 6002 6003 /* stripe_offset is the offset of this block in its stripe */ 6004 stripe_offset = offset - stripe_nr * stripe_len; 6005 6006 stripe_nr_end = round_up(offset + length, map->stripe_len); 6007 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 6008 stripe_cnt = stripe_nr_end - stripe_nr; 6009 stripe_end_offset = stripe_nr_end * map->stripe_len - 6010 (offset + length); 6011 /* 6012 * after this, stripe_nr is the number of stripes on this 6013 * device we have to walk to find the data, and stripe_index is 6014 * the number of our device in the stripe array 6015 */ 6016 num_stripes = 1; 6017 stripe_index = 0; 6018 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6019 BTRFS_BLOCK_GROUP_RAID10)) { 6020 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6021 sub_stripes = 1; 6022 else 6023 sub_stripes = map->sub_stripes; 6024 6025 factor = map->num_stripes / sub_stripes; 6026 num_stripes = min_t(u64, map->num_stripes, 6027 sub_stripes * stripe_cnt); 6028 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6029 stripe_index *= sub_stripes; 6030 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6031 &remaining_stripes); 6032 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6033 last_stripe *= sub_stripes; 6034 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6035 BTRFS_BLOCK_GROUP_DUP)) { 6036 num_stripes = map->num_stripes; 6037 } else { 6038 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6039 &stripe_index); 6040 } 6041 6042 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6043 if (!bioc) { 6044 ret = -ENOMEM; 6045 goto out; 6046 } 6047 6048 for (i = 0; i < num_stripes; i++) { 6049 bioc->stripes[i].physical = 6050 map->stripes[stripe_index].physical + 6051 stripe_offset + stripe_nr * map->stripe_len; 6052 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6053 6054 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6055 BTRFS_BLOCK_GROUP_RAID10)) { 6056 bioc->stripes[i].length = stripes_per_dev * 6057 map->stripe_len; 6058 6059 if (i / sub_stripes < remaining_stripes) 6060 bioc->stripes[i].length += map->stripe_len; 6061 6062 /* 6063 * Special for the first stripe and 6064 * the last stripe: 6065 * 6066 * |-------|...|-------| 6067 * |----------| 6068 * off end_off 6069 */ 6070 if (i < sub_stripes) 6071 bioc->stripes[i].length -= stripe_offset; 6072 6073 if (stripe_index >= last_stripe && 6074 stripe_index <= (last_stripe + 6075 sub_stripes - 1)) 6076 bioc->stripes[i].length -= stripe_end_offset; 6077 6078 if (i == sub_stripes - 1) 6079 stripe_offset = 0; 6080 } else { 6081 bioc->stripes[i].length = length; 6082 } 6083 6084 stripe_index++; 6085 if (stripe_index == map->num_stripes) { 6086 stripe_index = 0; 6087 stripe_nr++; 6088 } 6089 } 6090 6091 *bioc_ret = bioc; 6092 bioc->map_type = map->type; 6093 bioc->num_stripes = num_stripes; 6094 out: 6095 free_extent_map(em); 6096 return ret; 6097 } 6098 6099 /* 6100 * In dev-replace case, for repair case (that's the only case where the mirror 6101 * is selected explicitly when calling btrfs_map_block), blocks left of the 6102 * left cursor can also be read from the target drive. 6103 * 6104 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6105 * array of stripes. 6106 * For READ, it also needs to be supported using the same mirror number. 6107 * 6108 * If the requested block is not left of the left cursor, EIO is returned. This 6109 * can happen because btrfs_num_copies() returns one more in the dev-replace 6110 * case. 6111 */ 6112 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6113 u64 logical, u64 length, 6114 u64 srcdev_devid, int *mirror_num, 6115 u64 *physical) 6116 { 6117 struct btrfs_io_context *bioc = NULL; 6118 int num_stripes; 6119 int index_srcdev = 0; 6120 int found = 0; 6121 u64 physical_of_found = 0; 6122 int i; 6123 int ret = 0; 6124 6125 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6126 logical, &length, &bioc, 0, 0); 6127 if (ret) { 6128 ASSERT(bioc == NULL); 6129 return ret; 6130 } 6131 6132 num_stripes = bioc->num_stripes; 6133 if (*mirror_num > num_stripes) { 6134 /* 6135 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6136 * that means that the requested area is not left of the left 6137 * cursor 6138 */ 6139 btrfs_put_bioc(bioc); 6140 return -EIO; 6141 } 6142 6143 /* 6144 * process the rest of the function using the mirror_num of the source 6145 * drive. Therefore look it up first. At the end, patch the device 6146 * pointer to the one of the target drive. 6147 */ 6148 for (i = 0; i < num_stripes; i++) { 6149 if (bioc->stripes[i].dev->devid != srcdev_devid) 6150 continue; 6151 6152 /* 6153 * In case of DUP, in order to keep it simple, only add the 6154 * mirror with the lowest physical address 6155 */ 6156 if (found && 6157 physical_of_found <= bioc->stripes[i].physical) 6158 continue; 6159 6160 index_srcdev = i; 6161 found = 1; 6162 physical_of_found = bioc->stripes[i].physical; 6163 } 6164 6165 btrfs_put_bioc(bioc); 6166 6167 ASSERT(found); 6168 if (!found) 6169 return -EIO; 6170 6171 *mirror_num = index_srcdev + 1; 6172 *physical = physical_of_found; 6173 return ret; 6174 } 6175 6176 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6177 { 6178 struct btrfs_block_group *cache; 6179 bool ret; 6180 6181 /* Non zoned filesystem does not use "to_copy" flag */ 6182 if (!btrfs_is_zoned(fs_info)) 6183 return false; 6184 6185 cache = btrfs_lookup_block_group(fs_info, logical); 6186 6187 spin_lock(&cache->lock); 6188 ret = cache->to_copy; 6189 spin_unlock(&cache->lock); 6190 6191 btrfs_put_block_group(cache); 6192 return ret; 6193 } 6194 6195 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6196 struct btrfs_io_context **bioc_ret, 6197 struct btrfs_dev_replace *dev_replace, 6198 u64 logical, 6199 int *num_stripes_ret, int *max_errors_ret) 6200 { 6201 struct btrfs_io_context *bioc = *bioc_ret; 6202 u64 srcdev_devid = dev_replace->srcdev->devid; 6203 int tgtdev_indexes = 0; 6204 int num_stripes = *num_stripes_ret; 6205 int max_errors = *max_errors_ret; 6206 int i; 6207 6208 if (op == BTRFS_MAP_WRITE) { 6209 int index_where_to_add; 6210 6211 /* 6212 * A block group which have "to_copy" set will eventually 6213 * copied by dev-replace process. We can avoid cloning IO here. 6214 */ 6215 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6216 return; 6217 6218 /* 6219 * duplicate the write operations while the dev replace 6220 * procedure is running. Since the copying of the old disk to 6221 * the new disk takes place at run time while the filesystem is 6222 * mounted writable, the regular write operations to the old 6223 * disk have to be duplicated to go to the new disk as well. 6224 * 6225 * Note that device->missing is handled by the caller, and that 6226 * the write to the old disk is already set up in the stripes 6227 * array. 6228 */ 6229 index_where_to_add = num_stripes; 6230 for (i = 0; i < num_stripes; i++) { 6231 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6232 /* write to new disk, too */ 6233 struct btrfs_io_stripe *new = 6234 bioc->stripes + index_where_to_add; 6235 struct btrfs_io_stripe *old = 6236 bioc->stripes + i; 6237 6238 new->physical = old->physical; 6239 new->length = old->length; 6240 new->dev = dev_replace->tgtdev; 6241 bioc->tgtdev_map[i] = index_where_to_add; 6242 index_where_to_add++; 6243 max_errors++; 6244 tgtdev_indexes++; 6245 } 6246 } 6247 num_stripes = index_where_to_add; 6248 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6249 int index_srcdev = 0; 6250 int found = 0; 6251 u64 physical_of_found = 0; 6252 6253 /* 6254 * During the dev-replace procedure, the target drive can also 6255 * be used to read data in case it is needed to repair a corrupt 6256 * block elsewhere. This is possible if the requested area is 6257 * left of the left cursor. In this area, the target drive is a 6258 * full copy of the source drive. 6259 */ 6260 for (i = 0; i < num_stripes; i++) { 6261 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6262 /* 6263 * In case of DUP, in order to keep it simple, 6264 * only add the mirror with the lowest physical 6265 * address 6266 */ 6267 if (found && 6268 physical_of_found <= bioc->stripes[i].physical) 6269 continue; 6270 index_srcdev = i; 6271 found = 1; 6272 physical_of_found = bioc->stripes[i].physical; 6273 } 6274 } 6275 if (found) { 6276 struct btrfs_io_stripe *tgtdev_stripe = 6277 bioc->stripes + num_stripes; 6278 6279 tgtdev_stripe->physical = physical_of_found; 6280 tgtdev_stripe->length = 6281 bioc->stripes[index_srcdev].length; 6282 tgtdev_stripe->dev = dev_replace->tgtdev; 6283 bioc->tgtdev_map[index_srcdev] = num_stripes; 6284 6285 tgtdev_indexes++; 6286 num_stripes++; 6287 } 6288 } 6289 6290 *num_stripes_ret = num_stripes; 6291 *max_errors_ret = max_errors; 6292 bioc->num_tgtdevs = tgtdev_indexes; 6293 *bioc_ret = bioc; 6294 } 6295 6296 static bool need_full_stripe(enum btrfs_map_op op) 6297 { 6298 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6299 } 6300 6301 /* 6302 * Calculate the geometry of a particular (address, len) tuple. This 6303 * information is used to calculate how big a particular bio can get before it 6304 * straddles a stripe. 6305 * 6306 * @fs_info: the filesystem 6307 * @em: mapping containing the logical extent 6308 * @op: type of operation - write or read 6309 * @logical: address that we want to figure out the geometry of 6310 * @io_geom: pointer used to return values 6311 * 6312 * Returns < 0 in case a chunk for the given logical address cannot be found, 6313 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6314 */ 6315 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6316 enum btrfs_map_op op, u64 logical, 6317 struct btrfs_io_geometry *io_geom) 6318 { 6319 struct map_lookup *map; 6320 u64 len; 6321 u64 offset; 6322 u64 stripe_offset; 6323 u64 stripe_nr; 6324 u64 stripe_len; 6325 u64 raid56_full_stripe_start = (u64)-1; 6326 int data_stripes; 6327 6328 ASSERT(op != BTRFS_MAP_DISCARD); 6329 6330 map = em->map_lookup; 6331 /* Offset of this logical address in the chunk */ 6332 offset = logical - em->start; 6333 /* Len of a stripe in a chunk */ 6334 stripe_len = map->stripe_len; 6335 /* Stripe where this block falls in */ 6336 stripe_nr = div64_u64(offset, stripe_len); 6337 /* Offset of stripe in the chunk */ 6338 stripe_offset = stripe_nr * stripe_len; 6339 if (offset < stripe_offset) { 6340 btrfs_crit(fs_info, 6341 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6342 stripe_offset, offset, em->start, logical, stripe_len); 6343 return -EINVAL; 6344 } 6345 6346 /* stripe_offset is the offset of this block in its stripe */ 6347 stripe_offset = offset - stripe_offset; 6348 data_stripes = nr_data_stripes(map); 6349 6350 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6351 u64 max_len = stripe_len - stripe_offset; 6352 6353 /* 6354 * In case of raid56, we need to know the stripe aligned start 6355 */ 6356 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6357 unsigned long full_stripe_len = stripe_len * data_stripes; 6358 raid56_full_stripe_start = offset; 6359 6360 /* 6361 * Allow a write of a full stripe, but make sure we 6362 * don't allow straddling of stripes 6363 */ 6364 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6365 full_stripe_len); 6366 raid56_full_stripe_start *= full_stripe_len; 6367 6368 /* 6369 * For writes to RAID[56], allow a full stripeset across 6370 * all disks. For other RAID types and for RAID[56] 6371 * reads, just allow a single stripe (on a single disk). 6372 */ 6373 if (op == BTRFS_MAP_WRITE) { 6374 max_len = stripe_len * data_stripes - 6375 (offset - raid56_full_stripe_start); 6376 } 6377 } 6378 len = min_t(u64, em->len - offset, max_len); 6379 } else { 6380 len = em->len - offset; 6381 } 6382 6383 io_geom->len = len; 6384 io_geom->offset = offset; 6385 io_geom->stripe_len = stripe_len; 6386 io_geom->stripe_nr = stripe_nr; 6387 io_geom->stripe_offset = stripe_offset; 6388 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6389 6390 return 0; 6391 } 6392 6393 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6394 enum btrfs_map_op op, 6395 u64 logical, u64 *length, 6396 struct btrfs_io_context **bioc_ret, 6397 int mirror_num, int need_raid_map) 6398 { 6399 struct extent_map *em; 6400 struct map_lookup *map; 6401 u64 stripe_offset; 6402 u64 stripe_nr; 6403 u64 stripe_len; 6404 u32 stripe_index; 6405 int data_stripes; 6406 int i; 6407 int ret = 0; 6408 int num_stripes; 6409 int max_errors = 0; 6410 int tgtdev_indexes = 0; 6411 struct btrfs_io_context *bioc = NULL; 6412 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6413 int dev_replace_is_ongoing = 0; 6414 int num_alloc_stripes; 6415 int patch_the_first_stripe_for_dev_replace = 0; 6416 u64 physical_to_patch_in_first_stripe = 0; 6417 u64 raid56_full_stripe_start = (u64)-1; 6418 struct btrfs_io_geometry geom; 6419 6420 ASSERT(bioc_ret); 6421 ASSERT(op != BTRFS_MAP_DISCARD); 6422 6423 em = btrfs_get_chunk_map(fs_info, logical, *length); 6424 ASSERT(!IS_ERR(em)); 6425 6426 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6427 if (ret < 0) 6428 return ret; 6429 6430 map = em->map_lookup; 6431 6432 *length = geom.len; 6433 stripe_len = geom.stripe_len; 6434 stripe_nr = geom.stripe_nr; 6435 stripe_offset = geom.stripe_offset; 6436 raid56_full_stripe_start = geom.raid56_stripe_offset; 6437 data_stripes = nr_data_stripes(map); 6438 6439 down_read(&dev_replace->rwsem); 6440 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6441 /* 6442 * Hold the semaphore for read during the whole operation, write is 6443 * requested at commit time but must wait. 6444 */ 6445 if (!dev_replace_is_ongoing) 6446 up_read(&dev_replace->rwsem); 6447 6448 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6449 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6450 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6451 dev_replace->srcdev->devid, 6452 &mirror_num, 6453 &physical_to_patch_in_first_stripe); 6454 if (ret) 6455 goto out; 6456 else 6457 patch_the_first_stripe_for_dev_replace = 1; 6458 } else if (mirror_num > map->num_stripes) { 6459 mirror_num = 0; 6460 } 6461 6462 num_stripes = 1; 6463 stripe_index = 0; 6464 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6465 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6466 &stripe_index); 6467 if (!need_full_stripe(op)) 6468 mirror_num = 1; 6469 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6470 if (need_full_stripe(op)) 6471 num_stripes = map->num_stripes; 6472 else if (mirror_num) 6473 stripe_index = mirror_num - 1; 6474 else { 6475 stripe_index = find_live_mirror(fs_info, map, 0, 6476 dev_replace_is_ongoing); 6477 mirror_num = stripe_index + 1; 6478 } 6479 6480 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6481 if (need_full_stripe(op)) { 6482 num_stripes = map->num_stripes; 6483 } else if (mirror_num) { 6484 stripe_index = mirror_num - 1; 6485 } else { 6486 mirror_num = 1; 6487 } 6488 6489 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6490 u32 factor = map->num_stripes / map->sub_stripes; 6491 6492 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6493 stripe_index *= map->sub_stripes; 6494 6495 if (need_full_stripe(op)) 6496 num_stripes = map->sub_stripes; 6497 else if (mirror_num) 6498 stripe_index += mirror_num - 1; 6499 else { 6500 int old_stripe_index = stripe_index; 6501 stripe_index = find_live_mirror(fs_info, map, 6502 stripe_index, 6503 dev_replace_is_ongoing); 6504 mirror_num = stripe_index - old_stripe_index + 1; 6505 } 6506 6507 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6508 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6509 /* push stripe_nr back to the start of the full stripe */ 6510 stripe_nr = div64_u64(raid56_full_stripe_start, 6511 stripe_len * data_stripes); 6512 6513 /* RAID[56] write or recovery. Return all stripes */ 6514 num_stripes = map->num_stripes; 6515 max_errors = nr_parity_stripes(map); 6516 6517 *length = map->stripe_len; 6518 stripe_index = 0; 6519 stripe_offset = 0; 6520 } else { 6521 /* 6522 * Mirror #0 or #1 means the original data block. 6523 * Mirror #2 is RAID5 parity block. 6524 * Mirror #3 is RAID6 Q block. 6525 */ 6526 stripe_nr = div_u64_rem(stripe_nr, 6527 data_stripes, &stripe_index); 6528 if (mirror_num > 1) 6529 stripe_index = data_stripes + mirror_num - 2; 6530 6531 /* We distribute the parity blocks across stripes */ 6532 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6533 &stripe_index); 6534 if (!need_full_stripe(op) && mirror_num <= 1) 6535 mirror_num = 1; 6536 } 6537 } else { 6538 /* 6539 * after this, stripe_nr is the number of stripes on this 6540 * device we have to walk to find the data, and stripe_index is 6541 * the number of our device in the stripe array 6542 */ 6543 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6544 &stripe_index); 6545 mirror_num = stripe_index + 1; 6546 } 6547 if (stripe_index >= map->num_stripes) { 6548 btrfs_crit(fs_info, 6549 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6550 stripe_index, map->num_stripes); 6551 ret = -EINVAL; 6552 goto out; 6553 } 6554 6555 num_alloc_stripes = num_stripes; 6556 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6557 if (op == BTRFS_MAP_WRITE) 6558 num_alloc_stripes <<= 1; 6559 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6560 num_alloc_stripes++; 6561 tgtdev_indexes = num_stripes; 6562 } 6563 6564 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6565 if (!bioc) { 6566 ret = -ENOMEM; 6567 goto out; 6568 } 6569 6570 for (i = 0; i < num_stripes; i++) { 6571 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6572 stripe_offset + stripe_nr * map->stripe_len; 6573 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6574 stripe_index++; 6575 } 6576 6577 /* Build raid_map */ 6578 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6579 (need_full_stripe(op) || mirror_num > 1)) { 6580 u64 tmp; 6581 unsigned rot; 6582 6583 /* Work out the disk rotation on this stripe-set */ 6584 div_u64_rem(stripe_nr, num_stripes, &rot); 6585 6586 /* Fill in the logical address of each stripe */ 6587 tmp = stripe_nr * data_stripes; 6588 for (i = 0; i < data_stripes; i++) 6589 bioc->raid_map[(i + rot) % num_stripes] = 6590 em->start + (tmp + i) * map->stripe_len; 6591 6592 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6593 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6594 bioc->raid_map[(i + rot + 1) % num_stripes] = 6595 RAID6_Q_STRIPE; 6596 6597 sort_parity_stripes(bioc, num_stripes); 6598 } 6599 6600 if (need_full_stripe(op)) 6601 max_errors = btrfs_chunk_max_errors(map); 6602 6603 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6604 need_full_stripe(op)) { 6605 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6606 &num_stripes, &max_errors); 6607 } 6608 6609 *bioc_ret = bioc; 6610 bioc->map_type = map->type; 6611 bioc->num_stripes = num_stripes; 6612 bioc->max_errors = max_errors; 6613 bioc->mirror_num = mirror_num; 6614 6615 /* 6616 * this is the case that REQ_READ && dev_replace_is_ongoing && 6617 * mirror_num == num_stripes + 1 && dev_replace target drive is 6618 * available as a mirror 6619 */ 6620 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6621 WARN_ON(num_stripes > 1); 6622 bioc->stripes[0].dev = dev_replace->tgtdev; 6623 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6624 bioc->mirror_num = map->num_stripes + 1; 6625 } 6626 out: 6627 if (dev_replace_is_ongoing) { 6628 lockdep_assert_held(&dev_replace->rwsem); 6629 /* Unlock and let waiting writers proceed */ 6630 up_read(&dev_replace->rwsem); 6631 } 6632 free_extent_map(em); 6633 return ret; 6634 } 6635 6636 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6637 u64 logical, u64 *length, 6638 struct btrfs_io_context **bioc_ret, int mirror_num) 6639 { 6640 if (op == BTRFS_MAP_DISCARD) 6641 return __btrfs_map_block_for_discard(fs_info, logical, 6642 length, bioc_ret); 6643 6644 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6645 mirror_num, 0); 6646 } 6647 6648 /* For Scrub/replace */ 6649 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6650 u64 logical, u64 *length, 6651 struct btrfs_io_context **bioc_ret) 6652 { 6653 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6654 } 6655 6656 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6657 { 6658 bio->bi_private = bioc->private; 6659 bio->bi_end_io = bioc->end_io; 6660 bio_endio(bio); 6661 6662 btrfs_put_bioc(bioc); 6663 } 6664 6665 static void btrfs_end_bio(struct bio *bio) 6666 { 6667 struct btrfs_io_context *bioc = bio->bi_private; 6668 int is_orig_bio = 0; 6669 6670 if (bio->bi_status) { 6671 atomic_inc(&bioc->error); 6672 if (bio->bi_status == BLK_STS_IOERR || 6673 bio->bi_status == BLK_STS_TARGET) { 6674 struct btrfs_device *dev = btrfs_bio(bio)->device; 6675 6676 ASSERT(dev->bdev); 6677 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6678 btrfs_dev_stat_inc_and_print(dev, 6679 BTRFS_DEV_STAT_WRITE_ERRS); 6680 else if (!(bio->bi_opf & REQ_RAHEAD)) 6681 btrfs_dev_stat_inc_and_print(dev, 6682 BTRFS_DEV_STAT_READ_ERRS); 6683 if (bio->bi_opf & REQ_PREFLUSH) 6684 btrfs_dev_stat_inc_and_print(dev, 6685 BTRFS_DEV_STAT_FLUSH_ERRS); 6686 } 6687 } 6688 6689 if (bio == bioc->orig_bio) 6690 is_orig_bio = 1; 6691 6692 btrfs_bio_counter_dec(bioc->fs_info); 6693 6694 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6695 if (!is_orig_bio) { 6696 bio_put(bio); 6697 bio = bioc->orig_bio; 6698 } 6699 6700 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6701 /* only send an error to the higher layers if it is 6702 * beyond the tolerance of the btrfs bio 6703 */ 6704 if (atomic_read(&bioc->error) > bioc->max_errors) { 6705 bio->bi_status = BLK_STS_IOERR; 6706 } else { 6707 /* 6708 * this bio is actually up to date, we didn't 6709 * go over the max number of errors 6710 */ 6711 bio->bi_status = BLK_STS_OK; 6712 } 6713 6714 btrfs_end_bioc(bioc, bio); 6715 } else if (!is_orig_bio) { 6716 bio_put(bio); 6717 } 6718 } 6719 6720 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6721 u64 physical, struct btrfs_device *dev) 6722 { 6723 struct btrfs_fs_info *fs_info = bioc->fs_info; 6724 6725 bio->bi_private = bioc; 6726 btrfs_bio(bio)->device = dev; 6727 bio->bi_end_io = btrfs_end_bio; 6728 bio->bi_iter.bi_sector = physical >> 9; 6729 /* 6730 * For zone append writing, bi_sector must point the beginning of the 6731 * zone 6732 */ 6733 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6734 if (btrfs_dev_is_sequential(dev, physical)) { 6735 u64 zone_start = round_down(physical, fs_info->zone_size); 6736 6737 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6738 } else { 6739 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6740 bio->bi_opf |= REQ_OP_WRITE; 6741 } 6742 } 6743 btrfs_debug_in_rcu(fs_info, 6744 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6745 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6746 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6747 dev->devid, bio->bi_iter.bi_size); 6748 bio_set_dev(bio, dev->bdev); 6749 6750 btrfs_bio_counter_inc_noblocked(fs_info); 6751 6752 btrfsic_submit_bio(bio); 6753 } 6754 6755 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6756 { 6757 atomic_inc(&bioc->error); 6758 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6759 /* Should be the original bio. */ 6760 WARN_ON(bio != bioc->orig_bio); 6761 6762 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6763 bio->bi_iter.bi_sector = logical >> 9; 6764 if (atomic_read(&bioc->error) > bioc->max_errors) 6765 bio->bi_status = BLK_STS_IOERR; 6766 else 6767 bio->bi_status = BLK_STS_OK; 6768 btrfs_end_bioc(bioc, bio); 6769 } 6770 } 6771 6772 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6773 int mirror_num) 6774 { 6775 struct btrfs_device *dev; 6776 struct bio *first_bio = bio; 6777 u64 logical = bio->bi_iter.bi_sector << 9; 6778 u64 length = 0; 6779 u64 map_length; 6780 int ret; 6781 int dev_nr; 6782 int total_devs; 6783 struct btrfs_io_context *bioc = NULL; 6784 6785 length = bio->bi_iter.bi_size; 6786 map_length = length; 6787 6788 btrfs_bio_counter_inc_blocked(fs_info); 6789 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6790 &map_length, &bioc, mirror_num, 1); 6791 if (ret) { 6792 btrfs_bio_counter_dec(fs_info); 6793 return errno_to_blk_status(ret); 6794 } 6795 6796 total_devs = bioc->num_stripes; 6797 bioc->orig_bio = first_bio; 6798 bioc->private = first_bio->bi_private; 6799 bioc->end_io = first_bio->bi_end_io; 6800 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6801 6802 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6803 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6804 /* In this case, map_length has been set to the length of 6805 a single stripe; not the whole write */ 6806 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6807 ret = raid56_parity_write(bio, bioc, map_length); 6808 } else { 6809 ret = raid56_parity_recover(bio, bioc, map_length, 6810 mirror_num, 1); 6811 } 6812 6813 btrfs_bio_counter_dec(fs_info); 6814 return errno_to_blk_status(ret); 6815 } 6816 6817 if (map_length < length) { 6818 btrfs_crit(fs_info, 6819 "mapping failed logical %llu bio len %llu len %llu", 6820 logical, length, map_length); 6821 BUG(); 6822 } 6823 6824 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6825 dev = bioc->stripes[dev_nr].dev; 6826 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6827 &dev->dev_state) || 6828 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6829 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6830 bioc_error(bioc, first_bio, logical); 6831 continue; 6832 } 6833 6834 if (dev_nr < total_devs - 1) 6835 bio = btrfs_bio_clone(first_bio); 6836 else 6837 bio = first_bio; 6838 6839 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6840 } 6841 btrfs_bio_counter_dec(fs_info); 6842 return BLK_STS_OK; 6843 } 6844 6845 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6846 const struct btrfs_fs_devices *fs_devices) 6847 { 6848 if (args->fsid == NULL) 6849 return true; 6850 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6851 return true; 6852 return false; 6853 } 6854 6855 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6856 const struct btrfs_device *device) 6857 { 6858 ASSERT((args->devid != (u64)-1) || args->missing); 6859 6860 if ((args->devid != (u64)-1) && device->devid != args->devid) 6861 return false; 6862 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6863 return false; 6864 if (!args->missing) 6865 return true; 6866 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6867 !device->bdev) 6868 return true; 6869 return false; 6870 } 6871 6872 /* 6873 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6874 * return NULL. 6875 * 6876 * If devid and uuid are both specified, the match must be exact, otherwise 6877 * only devid is used. 6878 */ 6879 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6880 const struct btrfs_dev_lookup_args *args) 6881 { 6882 struct btrfs_device *device; 6883 struct btrfs_fs_devices *seed_devs; 6884 6885 if (dev_args_match_fs_devices(args, fs_devices)) { 6886 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6887 if (dev_args_match_device(args, device)) 6888 return device; 6889 } 6890 } 6891 6892 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6893 if (!dev_args_match_fs_devices(args, seed_devs)) 6894 continue; 6895 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6896 if (dev_args_match_device(args, device)) 6897 return device; 6898 } 6899 } 6900 6901 return NULL; 6902 } 6903 6904 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6905 u64 devid, u8 *dev_uuid) 6906 { 6907 struct btrfs_device *device; 6908 unsigned int nofs_flag; 6909 6910 /* 6911 * We call this under the chunk_mutex, so we want to use NOFS for this 6912 * allocation, however we don't want to change btrfs_alloc_device() to 6913 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6914 * places. 6915 */ 6916 nofs_flag = memalloc_nofs_save(); 6917 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6918 memalloc_nofs_restore(nofs_flag); 6919 if (IS_ERR(device)) 6920 return device; 6921 6922 list_add(&device->dev_list, &fs_devices->devices); 6923 device->fs_devices = fs_devices; 6924 fs_devices->num_devices++; 6925 6926 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6927 fs_devices->missing_devices++; 6928 6929 return device; 6930 } 6931 6932 /** 6933 * btrfs_alloc_device - allocate struct btrfs_device 6934 * @fs_info: used only for generating a new devid, can be NULL if 6935 * devid is provided (i.e. @devid != NULL). 6936 * @devid: a pointer to devid for this device. If NULL a new devid 6937 * is generated. 6938 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6939 * is generated. 6940 * 6941 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6942 * on error. Returned struct is not linked onto any lists and must be 6943 * destroyed with btrfs_free_device. 6944 */ 6945 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6946 const u64 *devid, 6947 const u8 *uuid) 6948 { 6949 struct btrfs_device *dev; 6950 u64 tmp; 6951 6952 if (WARN_ON(!devid && !fs_info)) 6953 return ERR_PTR(-EINVAL); 6954 6955 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6956 if (!dev) 6957 return ERR_PTR(-ENOMEM); 6958 6959 /* 6960 * Preallocate a bio that's always going to be used for flushing device 6961 * barriers and matches the device lifespan 6962 */ 6963 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6964 if (!dev->flush_bio) { 6965 kfree(dev); 6966 return ERR_PTR(-ENOMEM); 6967 } 6968 6969 INIT_LIST_HEAD(&dev->dev_list); 6970 INIT_LIST_HEAD(&dev->dev_alloc_list); 6971 INIT_LIST_HEAD(&dev->post_commit_list); 6972 6973 atomic_set(&dev->reada_in_flight, 0); 6974 atomic_set(&dev->dev_stats_ccnt, 0); 6975 btrfs_device_data_ordered_init(dev); 6976 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6977 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6978 extent_io_tree_init(fs_info, &dev->alloc_state, 6979 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6980 6981 if (devid) 6982 tmp = *devid; 6983 else { 6984 int ret; 6985 6986 ret = find_next_devid(fs_info, &tmp); 6987 if (ret) { 6988 btrfs_free_device(dev); 6989 return ERR_PTR(ret); 6990 } 6991 } 6992 dev->devid = tmp; 6993 6994 if (uuid) 6995 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6996 else 6997 generate_random_uuid(dev->uuid); 6998 6999 return dev; 7000 } 7001 7002 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 7003 u64 devid, u8 *uuid, bool error) 7004 { 7005 if (error) 7006 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 7007 devid, uuid); 7008 else 7009 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 7010 devid, uuid); 7011 } 7012 7013 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 7014 { 7015 const int data_stripes = calc_data_stripes(type, num_stripes); 7016 7017 return div_u64(chunk_len, data_stripes); 7018 } 7019 7020 #if BITS_PER_LONG == 32 7021 /* 7022 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7023 * can't be accessed on 32bit systems. 7024 * 7025 * This function do mount time check to reject the fs if it already has 7026 * metadata chunk beyond that limit. 7027 */ 7028 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7029 u64 logical, u64 length, u64 type) 7030 { 7031 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7032 return 0; 7033 7034 if (logical + length < MAX_LFS_FILESIZE) 7035 return 0; 7036 7037 btrfs_err_32bit_limit(fs_info); 7038 return -EOVERFLOW; 7039 } 7040 7041 /* 7042 * This is to give early warning for any metadata chunk reaching 7043 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7044 * Although we can still access the metadata, it's not going to be possible 7045 * once the limit is reached. 7046 */ 7047 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7048 u64 logical, u64 length, u64 type) 7049 { 7050 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7051 return; 7052 7053 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7054 return; 7055 7056 btrfs_warn_32bit_limit(fs_info); 7057 } 7058 #endif 7059 7060 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7061 struct btrfs_chunk *chunk) 7062 { 7063 BTRFS_DEV_LOOKUP_ARGS(args); 7064 struct btrfs_fs_info *fs_info = leaf->fs_info; 7065 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7066 struct map_lookup *map; 7067 struct extent_map *em; 7068 u64 logical; 7069 u64 length; 7070 u64 devid; 7071 u64 type; 7072 u8 uuid[BTRFS_UUID_SIZE]; 7073 int num_stripes; 7074 int ret; 7075 int i; 7076 7077 logical = key->offset; 7078 length = btrfs_chunk_length(leaf, chunk); 7079 type = btrfs_chunk_type(leaf, chunk); 7080 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7081 7082 #if BITS_PER_LONG == 32 7083 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7084 if (ret < 0) 7085 return ret; 7086 warn_32bit_meta_chunk(fs_info, logical, length, type); 7087 #endif 7088 7089 /* 7090 * Only need to verify chunk item if we're reading from sys chunk array, 7091 * as chunk item in tree block is already verified by tree-checker. 7092 */ 7093 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7094 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7095 if (ret) 7096 return ret; 7097 } 7098 7099 read_lock(&map_tree->lock); 7100 em = lookup_extent_mapping(map_tree, logical, 1); 7101 read_unlock(&map_tree->lock); 7102 7103 /* already mapped? */ 7104 if (em && em->start <= logical && em->start + em->len > logical) { 7105 free_extent_map(em); 7106 return 0; 7107 } else if (em) { 7108 free_extent_map(em); 7109 } 7110 7111 em = alloc_extent_map(); 7112 if (!em) 7113 return -ENOMEM; 7114 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7115 if (!map) { 7116 free_extent_map(em); 7117 return -ENOMEM; 7118 } 7119 7120 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7121 em->map_lookup = map; 7122 em->start = logical; 7123 em->len = length; 7124 em->orig_start = 0; 7125 em->block_start = 0; 7126 em->block_len = em->len; 7127 7128 map->num_stripes = num_stripes; 7129 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7130 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7131 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7132 map->type = type; 7133 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7134 map->verified_stripes = 0; 7135 em->orig_block_len = calc_stripe_length(type, em->len, 7136 map->num_stripes); 7137 for (i = 0; i < num_stripes; i++) { 7138 map->stripes[i].physical = 7139 btrfs_stripe_offset_nr(leaf, chunk, i); 7140 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7141 args.devid = devid; 7142 read_extent_buffer(leaf, uuid, (unsigned long) 7143 btrfs_stripe_dev_uuid_nr(chunk, i), 7144 BTRFS_UUID_SIZE); 7145 args.uuid = uuid; 7146 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7147 if (!map->stripes[i].dev && 7148 !btrfs_test_opt(fs_info, DEGRADED)) { 7149 free_extent_map(em); 7150 btrfs_report_missing_device(fs_info, devid, uuid, true); 7151 return -ENOENT; 7152 } 7153 if (!map->stripes[i].dev) { 7154 map->stripes[i].dev = 7155 add_missing_dev(fs_info->fs_devices, devid, 7156 uuid); 7157 if (IS_ERR(map->stripes[i].dev)) { 7158 free_extent_map(em); 7159 btrfs_err(fs_info, 7160 "failed to init missing dev %llu: %ld", 7161 devid, PTR_ERR(map->stripes[i].dev)); 7162 return PTR_ERR(map->stripes[i].dev); 7163 } 7164 btrfs_report_missing_device(fs_info, devid, uuid, false); 7165 } 7166 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7167 &(map->stripes[i].dev->dev_state)); 7168 7169 } 7170 7171 write_lock(&map_tree->lock); 7172 ret = add_extent_mapping(map_tree, em, 0); 7173 write_unlock(&map_tree->lock); 7174 if (ret < 0) { 7175 btrfs_err(fs_info, 7176 "failed to add chunk map, start=%llu len=%llu: %d", 7177 em->start, em->len, ret); 7178 } 7179 free_extent_map(em); 7180 7181 return ret; 7182 } 7183 7184 static void fill_device_from_item(struct extent_buffer *leaf, 7185 struct btrfs_dev_item *dev_item, 7186 struct btrfs_device *device) 7187 { 7188 unsigned long ptr; 7189 7190 device->devid = btrfs_device_id(leaf, dev_item); 7191 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7192 device->total_bytes = device->disk_total_bytes; 7193 device->commit_total_bytes = device->disk_total_bytes; 7194 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7195 device->commit_bytes_used = device->bytes_used; 7196 device->type = btrfs_device_type(leaf, dev_item); 7197 device->io_align = btrfs_device_io_align(leaf, dev_item); 7198 device->io_width = btrfs_device_io_width(leaf, dev_item); 7199 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7200 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7201 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7202 7203 ptr = btrfs_device_uuid(dev_item); 7204 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7205 } 7206 7207 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7208 u8 *fsid) 7209 { 7210 struct btrfs_fs_devices *fs_devices; 7211 int ret; 7212 7213 lockdep_assert_held(&uuid_mutex); 7214 ASSERT(fsid); 7215 7216 /* This will match only for multi-device seed fs */ 7217 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7218 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7219 return fs_devices; 7220 7221 7222 fs_devices = find_fsid(fsid, NULL); 7223 if (!fs_devices) { 7224 if (!btrfs_test_opt(fs_info, DEGRADED)) 7225 return ERR_PTR(-ENOENT); 7226 7227 fs_devices = alloc_fs_devices(fsid, NULL); 7228 if (IS_ERR(fs_devices)) 7229 return fs_devices; 7230 7231 fs_devices->seeding = true; 7232 fs_devices->opened = 1; 7233 return fs_devices; 7234 } 7235 7236 /* 7237 * Upon first call for a seed fs fsid, just create a private copy of the 7238 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7239 */ 7240 fs_devices = clone_fs_devices(fs_devices); 7241 if (IS_ERR(fs_devices)) 7242 return fs_devices; 7243 7244 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7245 if (ret) { 7246 free_fs_devices(fs_devices); 7247 return ERR_PTR(ret); 7248 } 7249 7250 if (!fs_devices->seeding) { 7251 close_fs_devices(fs_devices); 7252 free_fs_devices(fs_devices); 7253 return ERR_PTR(-EINVAL); 7254 } 7255 7256 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7257 7258 return fs_devices; 7259 } 7260 7261 static int read_one_dev(struct extent_buffer *leaf, 7262 struct btrfs_dev_item *dev_item) 7263 { 7264 BTRFS_DEV_LOOKUP_ARGS(args); 7265 struct btrfs_fs_info *fs_info = leaf->fs_info; 7266 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7267 struct btrfs_device *device; 7268 u64 devid; 7269 int ret; 7270 u8 fs_uuid[BTRFS_FSID_SIZE]; 7271 u8 dev_uuid[BTRFS_UUID_SIZE]; 7272 7273 devid = args.devid = btrfs_device_id(leaf, dev_item); 7274 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7275 BTRFS_UUID_SIZE); 7276 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7277 BTRFS_FSID_SIZE); 7278 args.uuid = dev_uuid; 7279 args.fsid = fs_uuid; 7280 7281 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7282 fs_devices = open_seed_devices(fs_info, fs_uuid); 7283 if (IS_ERR(fs_devices)) 7284 return PTR_ERR(fs_devices); 7285 } 7286 7287 device = btrfs_find_device(fs_info->fs_devices, &args); 7288 if (!device) { 7289 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7290 btrfs_report_missing_device(fs_info, devid, 7291 dev_uuid, true); 7292 return -ENOENT; 7293 } 7294 7295 device = add_missing_dev(fs_devices, devid, dev_uuid); 7296 if (IS_ERR(device)) { 7297 btrfs_err(fs_info, 7298 "failed to add missing dev %llu: %ld", 7299 devid, PTR_ERR(device)); 7300 return PTR_ERR(device); 7301 } 7302 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7303 } else { 7304 if (!device->bdev) { 7305 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7306 btrfs_report_missing_device(fs_info, 7307 devid, dev_uuid, true); 7308 return -ENOENT; 7309 } 7310 btrfs_report_missing_device(fs_info, devid, 7311 dev_uuid, false); 7312 } 7313 7314 if (!device->bdev && 7315 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7316 /* 7317 * this happens when a device that was properly setup 7318 * in the device info lists suddenly goes bad. 7319 * device->bdev is NULL, and so we have to set 7320 * device->missing to one here 7321 */ 7322 device->fs_devices->missing_devices++; 7323 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7324 } 7325 7326 /* Move the device to its own fs_devices */ 7327 if (device->fs_devices != fs_devices) { 7328 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7329 &device->dev_state)); 7330 7331 list_move(&device->dev_list, &fs_devices->devices); 7332 device->fs_devices->num_devices--; 7333 fs_devices->num_devices++; 7334 7335 device->fs_devices->missing_devices--; 7336 fs_devices->missing_devices++; 7337 7338 device->fs_devices = fs_devices; 7339 } 7340 } 7341 7342 if (device->fs_devices != fs_info->fs_devices) { 7343 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7344 if (device->generation != 7345 btrfs_device_generation(leaf, dev_item)) 7346 return -EINVAL; 7347 } 7348 7349 fill_device_from_item(leaf, dev_item, device); 7350 if (device->bdev) { 7351 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7352 7353 if (device->total_bytes > max_total_bytes) { 7354 btrfs_err(fs_info, 7355 "device total_bytes should be at most %llu but found %llu", 7356 max_total_bytes, device->total_bytes); 7357 return -EINVAL; 7358 } 7359 } 7360 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7361 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7362 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7363 device->fs_devices->total_rw_bytes += device->total_bytes; 7364 atomic64_add(device->total_bytes - device->bytes_used, 7365 &fs_info->free_chunk_space); 7366 } 7367 ret = 0; 7368 return ret; 7369 } 7370 7371 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7372 { 7373 struct btrfs_root *root = fs_info->tree_root; 7374 struct btrfs_super_block *super_copy = fs_info->super_copy; 7375 struct extent_buffer *sb; 7376 struct btrfs_disk_key *disk_key; 7377 struct btrfs_chunk *chunk; 7378 u8 *array_ptr; 7379 unsigned long sb_array_offset; 7380 int ret = 0; 7381 u32 num_stripes; 7382 u32 array_size; 7383 u32 len = 0; 7384 u32 cur_offset; 7385 u64 type; 7386 struct btrfs_key key; 7387 7388 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7389 /* 7390 * This will create extent buffer of nodesize, superblock size is 7391 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7392 * overallocate but we can keep it as-is, only the first page is used. 7393 */ 7394 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7395 root->root_key.objectid, 0); 7396 if (IS_ERR(sb)) 7397 return PTR_ERR(sb); 7398 set_extent_buffer_uptodate(sb); 7399 /* 7400 * The sb extent buffer is artificial and just used to read the system array. 7401 * set_extent_buffer_uptodate() call does not properly mark all it's 7402 * pages up-to-date when the page is larger: extent does not cover the 7403 * whole page and consequently check_page_uptodate does not find all 7404 * the page's extents up-to-date (the hole beyond sb), 7405 * write_extent_buffer then triggers a WARN_ON. 7406 * 7407 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7408 * but sb spans only this function. Add an explicit SetPageUptodate call 7409 * to silence the warning eg. on PowerPC 64. 7410 */ 7411 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7412 SetPageUptodate(sb->pages[0]); 7413 7414 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7415 array_size = btrfs_super_sys_array_size(super_copy); 7416 7417 array_ptr = super_copy->sys_chunk_array; 7418 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7419 cur_offset = 0; 7420 7421 while (cur_offset < array_size) { 7422 disk_key = (struct btrfs_disk_key *)array_ptr; 7423 len = sizeof(*disk_key); 7424 if (cur_offset + len > array_size) 7425 goto out_short_read; 7426 7427 btrfs_disk_key_to_cpu(&key, disk_key); 7428 7429 array_ptr += len; 7430 sb_array_offset += len; 7431 cur_offset += len; 7432 7433 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7434 btrfs_err(fs_info, 7435 "unexpected item type %u in sys_array at offset %u", 7436 (u32)key.type, cur_offset); 7437 ret = -EIO; 7438 break; 7439 } 7440 7441 chunk = (struct btrfs_chunk *)sb_array_offset; 7442 /* 7443 * At least one btrfs_chunk with one stripe must be present, 7444 * exact stripe count check comes afterwards 7445 */ 7446 len = btrfs_chunk_item_size(1); 7447 if (cur_offset + len > array_size) 7448 goto out_short_read; 7449 7450 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7451 if (!num_stripes) { 7452 btrfs_err(fs_info, 7453 "invalid number of stripes %u in sys_array at offset %u", 7454 num_stripes, cur_offset); 7455 ret = -EIO; 7456 break; 7457 } 7458 7459 type = btrfs_chunk_type(sb, chunk); 7460 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7461 btrfs_err(fs_info, 7462 "invalid chunk type %llu in sys_array at offset %u", 7463 type, cur_offset); 7464 ret = -EIO; 7465 break; 7466 } 7467 7468 len = btrfs_chunk_item_size(num_stripes); 7469 if (cur_offset + len > array_size) 7470 goto out_short_read; 7471 7472 ret = read_one_chunk(&key, sb, chunk); 7473 if (ret) 7474 break; 7475 7476 array_ptr += len; 7477 sb_array_offset += len; 7478 cur_offset += len; 7479 } 7480 clear_extent_buffer_uptodate(sb); 7481 free_extent_buffer_stale(sb); 7482 return ret; 7483 7484 out_short_read: 7485 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7486 len, cur_offset); 7487 clear_extent_buffer_uptodate(sb); 7488 free_extent_buffer_stale(sb); 7489 return -EIO; 7490 } 7491 7492 /* 7493 * Check if all chunks in the fs are OK for read-write degraded mount 7494 * 7495 * If the @failing_dev is specified, it's accounted as missing. 7496 * 7497 * Return true if all chunks meet the minimal RW mount requirements. 7498 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7499 */ 7500 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7501 struct btrfs_device *failing_dev) 7502 { 7503 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7504 struct extent_map *em; 7505 u64 next_start = 0; 7506 bool ret = true; 7507 7508 read_lock(&map_tree->lock); 7509 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7510 read_unlock(&map_tree->lock); 7511 /* No chunk at all? Return false anyway */ 7512 if (!em) { 7513 ret = false; 7514 goto out; 7515 } 7516 while (em) { 7517 struct map_lookup *map; 7518 int missing = 0; 7519 int max_tolerated; 7520 int i; 7521 7522 map = em->map_lookup; 7523 max_tolerated = 7524 btrfs_get_num_tolerated_disk_barrier_failures( 7525 map->type); 7526 for (i = 0; i < map->num_stripes; i++) { 7527 struct btrfs_device *dev = map->stripes[i].dev; 7528 7529 if (!dev || !dev->bdev || 7530 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7531 dev->last_flush_error) 7532 missing++; 7533 else if (failing_dev && failing_dev == dev) 7534 missing++; 7535 } 7536 if (missing > max_tolerated) { 7537 if (!failing_dev) 7538 btrfs_warn(fs_info, 7539 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7540 em->start, missing, max_tolerated); 7541 free_extent_map(em); 7542 ret = false; 7543 goto out; 7544 } 7545 next_start = extent_map_end(em); 7546 free_extent_map(em); 7547 7548 read_lock(&map_tree->lock); 7549 em = lookup_extent_mapping(map_tree, next_start, 7550 (u64)(-1) - next_start); 7551 read_unlock(&map_tree->lock); 7552 } 7553 out: 7554 return ret; 7555 } 7556 7557 static void readahead_tree_node_children(struct extent_buffer *node) 7558 { 7559 int i; 7560 const int nr_items = btrfs_header_nritems(node); 7561 7562 for (i = 0; i < nr_items; i++) 7563 btrfs_readahead_node_child(node, i); 7564 } 7565 7566 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7567 { 7568 struct btrfs_root *root = fs_info->chunk_root; 7569 struct btrfs_path *path; 7570 struct extent_buffer *leaf; 7571 struct btrfs_key key; 7572 struct btrfs_key found_key; 7573 int ret; 7574 int slot; 7575 u64 total_dev = 0; 7576 u64 last_ra_node = 0; 7577 7578 path = btrfs_alloc_path(); 7579 if (!path) 7580 return -ENOMEM; 7581 7582 /* 7583 * uuid_mutex is needed only if we are mounting a sprout FS 7584 * otherwise we don't need it. 7585 */ 7586 mutex_lock(&uuid_mutex); 7587 7588 /* 7589 * It is possible for mount and umount to race in such a way that 7590 * we execute this code path, but open_fs_devices failed to clear 7591 * total_rw_bytes. We certainly want it cleared before reading the 7592 * device items, so clear it here. 7593 */ 7594 fs_info->fs_devices->total_rw_bytes = 0; 7595 7596 /* 7597 * Lockdep complains about possible circular locking dependency between 7598 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7599 * used for freeze procection of a fs (struct super_block.s_writers), 7600 * which we take when starting a transaction, and extent buffers of the 7601 * chunk tree if we call read_one_dev() while holding a lock on an 7602 * extent buffer of the chunk tree. Since we are mounting the filesystem 7603 * and at this point there can't be any concurrent task modifying the 7604 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7605 */ 7606 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7607 path->skip_locking = 1; 7608 7609 /* 7610 * Read all device items, and then all the chunk items. All 7611 * device items are found before any chunk item (their object id 7612 * is smaller than the lowest possible object id for a chunk 7613 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7614 */ 7615 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7616 key.offset = 0; 7617 key.type = 0; 7618 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7619 if (ret < 0) 7620 goto error; 7621 while (1) { 7622 struct extent_buffer *node; 7623 7624 leaf = path->nodes[0]; 7625 slot = path->slots[0]; 7626 if (slot >= btrfs_header_nritems(leaf)) { 7627 ret = btrfs_next_leaf(root, path); 7628 if (ret == 0) 7629 continue; 7630 if (ret < 0) 7631 goto error; 7632 break; 7633 } 7634 node = path->nodes[1]; 7635 if (node) { 7636 if (last_ra_node != node->start) { 7637 readahead_tree_node_children(node); 7638 last_ra_node = node->start; 7639 } 7640 } 7641 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7642 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7643 struct btrfs_dev_item *dev_item; 7644 dev_item = btrfs_item_ptr(leaf, slot, 7645 struct btrfs_dev_item); 7646 ret = read_one_dev(leaf, dev_item); 7647 if (ret) 7648 goto error; 7649 total_dev++; 7650 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7651 struct btrfs_chunk *chunk; 7652 7653 /* 7654 * We are only called at mount time, so no need to take 7655 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7656 * we always lock first fs_info->chunk_mutex before 7657 * acquiring any locks on the chunk tree. This is a 7658 * requirement for chunk allocation, see the comment on 7659 * top of btrfs_chunk_alloc() for details. 7660 */ 7661 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7662 ret = read_one_chunk(&found_key, leaf, chunk); 7663 if (ret) 7664 goto error; 7665 } 7666 path->slots[0]++; 7667 } 7668 7669 /* 7670 * After loading chunk tree, we've got all device information, 7671 * do another round of validation checks. 7672 */ 7673 if (total_dev != fs_info->fs_devices->total_devices) { 7674 btrfs_err(fs_info, 7675 "super_num_devices %llu mismatch with num_devices %llu found here", 7676 btrfs_super_num_devices(fs_info->super_copy), 7677 total_dev); 7678 ret = -EINVAL; 7679 goto error; 7680 } 7681 if (btrfs_super_total_bytes(fs_info->super_copy) < 7682 fs_info->fs_devices->total_rw_bytes) { 7683 btrfs_err(fs_info, 7684 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7685 btrfs_super_total_bytes(fs_info->super_copy), 7686 fs_info->fs_devices->total_rw_bytes); 7687 ret = -EINVAL; 7688 goto error; 7689 } 7690 ret = 0; 7691 error: 7692 mutex_unlock(&uuid_mutex); 7693 7694 btrfs_free_path(path); 7695 return ret; 7696 } 7697 7698 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7699 { 7700 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7701 struct btrfs_device *device; 7702 7703 fs_devices->fs_info = fs_info; 7704 7705 mutex_lock(&fs_devices->device_list_mutex); 7706 list_for_each_entry(device, &fs_devices->devices, dev_list) 7707 device->fs_info = fs_info; 7708 7709 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7710 list_for_each_entry(device, &seed_devs->devices, dev_list) 7711 device->fs_info = fs_info; 7712 7713 seed_devs->fs_info = fs_info; 7714 } 7715 mutex_unlock(&fs_devices->device_list_mutex); 7716 } 7717 7718 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7719 const struct btrfs_dev_stats_item *ptr, 7720 int index) 7721 { 7722 u64 val; 7723 7724 read_extent_buffer(eb, &val, 7725 offsetof(struct btrfs_dev_stats_item, values) + 7726 ((unsigned long)ptr) + (index * sizeof(u64)), 7727 sizeof(val)); 7728 return val; 7729 } 7730 7731 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7732 struct btrfs_dev_stats_item *ptr, 7733 int index, u64 val) 7734 { 7735 write_extent_buffer(eb, &val, 7736 offsetof(struct btrfs_dev_stats_item, values) + 7737 ((unsigned long)ptr) + (index * sizeof(u64)), 7738 sizeof(val)); 7739 } 7740 7741 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7742 struct btrfs_path *path) 7743 { 7744 struct btrfs_dev_stats_item *ptr; 7745 struct extent_buffer *eb; 7746 struct btrfs_key key; 7747 int item_size; 7748 int i, ret, slot; 7749 7750 if (!device->fs_info->dev_root) 7751 return 0; 7752 7753 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7754 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7755 key.offset = device->devid; 7756 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7757 if (ret) { 7758 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7759 btrfs_dev_stat_set(device, i, 0); 7760 device->dev_stats_valid = 1; 7761 btrfs_release_path(path); 7762 return ret < 0 ? ret : 0; 7763 } 7764 slot = path->slots[0]; 7765 eb = path->nodes[0]; 7766 item_size = btrfs_item_size(eb, slot); 7767 7768 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7769 7770 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7771 if (item_size >= (1 + i) * sizeof(__le64)) 7772 btrfs_dev_stat_set(device, i, 7773 btrfs_dev_stats_value(eb, ptr, i)); 7774 else 7775 btrfs_dev_stat_set(device, i, 0); 7776 } 7777 7778 device->dev_stats_valid = 1; 7779 btrfs_dev_stat_print_on_load(device); 7780 btrfs_release_path(path); 7781 7782 return 0; 7783 } 7784 7785 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7786 { 7787 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7788 struct btrfs_device *device; 7789 struct btrfs_path *path = NULL; 7790 int ret = 0; 7791 7792 path = btrfs_alloc_path(); 7793 if (!path) 7794 return -ENOMEM; 7795 7796 mutex_lock(&fs_devices->device_list_mutex); 7797 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7798 ret = btrfs_device_init_dev_stats(device, path); 7799 if (ret) 7800 goto out; 7801 } 7802 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7803 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7804 ret = btrfs_device_init_dev_stats(device, path); 7805 if (ret) 7806 goto out; 7807 } 7808 } 7809 out: 7810 mutex_unlock(&fs_devices->device_list_mutex); 7811 7812 btrfs_free_path(path); 7813 return ret; 7814 } 7815 7816 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7817 struct btrfs_device *device) 7818 { 7819 struct btrfs_fs_info *fs_info = trans->fs_info; 7820 struct btrfs_root *dev_root = fs_info->dev_root; 7821 struct btrfs_path *path; 7822 struct btrfs_key key; 7823 struct extent_buffer *eb; 7824 struct btrfs_dev_stats_item *ptr; 7825 int ret; 7826 int i; 7827 7828 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7829 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7830 key.offset = device->devid; 7831 7832 path = btrfs_alloc_path(); 7833 if (!path) 7834 return -ENOMEM; 7835 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7836 if (ret < 0) { 7837 btrfs_warn_in_rcu(fs_info, 7838 "error %d while searching for dev_stats item for device %s", 7839 ret, rcu_str_deref(device->name)); 7840 goto out; 7841 } 7842 7843 if (ret == 0 && 7844 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7845 /* need to delete old one and insert a new one */ 7846 ret = btrfs_del_item(trans, dev_root, path); 7847 if (ret != 0) { 7848 btrfs_warn_in_rcu(fs_info, 7849 "delete too small dev_stats item for device %s failed %d", 7850 rcu_str_deref(device->name), ret); 7851 goto out; 7852 } 7853 ret = 1; 7854 } 7855 7856 if (ret == 1) { 7857 /* need to insert a new item */ 7858 btrfs_release_path(path); 7859 ret = btrfs_insert_empty_item(trans, dev_root, path, 7860 &key, sizeof(*ptr)); 7861 if (ret < 0) { 7862 btrfs_warn_in_rcu(fs_info, 7863 "insert dev_stats item for device %s failed %d", 7864 rcu_str_deref(device->name), ret); 7865 goto out; 7866 } 7867 } 7868 7869 eb = path->nodes[0]; 7870 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7871 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7872 btrfs_set_dev_stats_value(eb, ptr, i, 7873 btrfs_dev_stat_read(device, i)); 7874 btrfs_mark_buffer_dirty(eb); 7875 7876 out: 7877 btrfs_free_path(path); 7878 return ret; 7879 } 7880 7881 /* 7882 * called from commit_transaction. Writes all changed device stats to disk. 7883 */ 7884 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7885 { 7886 struct btrfs_fs_info *fs_info = trans->fs_info; 7887 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7888 struct btrfs_device *device; 7889 int stats_cnt; 7890 int ret = 0; 7891 7892 mutex_lock(&fs_devices->device_list_mutex); 7893 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7894 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7895 if (!device->dev_stats_valid || stats_cnt == 0) 7896 continue; 7897 7898 7899 /* 7900 * There is a LOAD-LOAD control dependency between the value of 7901 * dev_stats_ccnt and updating the on-disk values which requires 7902 * reading the in-memory counters. Such control dependencies 7903 * require explicit read memory barriers. 7904 * 7905 * This memory barriers pairs with smp_mb__before_atomic in 7906 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7907 * barrier implied by atomic_xchg in 7908 * btrfs_dev_stats_read_and_reset 7909 */ 7910 smp_rmb(); 7911 7912 ret = update_dev_stat_item(trans, device); 7913 if (!ret) 7914 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7915 } 7916 mutex_unlock(&fs_devices->device_list_mutex); 7917 7918 return ret; 7919 } 7920 7921 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7922 { 7923 btrfs_dev_stat_inc(dev, index); 7924 btrfs_dev_stat_print_on_error(dev); 7925 } 7926 7927 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7928 { 7929 if (!dev->dev_stats_valid) 7930 return; 7931 btrfs_err_rl_in_rcu(dev->fs_info, 7932 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7933 rcu_str_deref(dev->name), 7934 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7935 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7936 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7937 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7938 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7939 } 7940 7941 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7942 { 7943 int i; 7944 7945 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7946 if (btrfs_dev_stat_read(dev, i) != 0) 7947 break; 7948 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7949 return; /* all values == 0, suppress message */ 7950 7951 btrfs_info_in_rcu(dev->fs_info, 7952 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7953 rcu_str_deref(dev->name), 7954 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7955 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7956 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7957 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7958 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7959 } 7960 7961 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7962 struct btrfs_ioctl_get_dev_stats *stats) 7963 { 7964 BTRFS_DEV_LOOKUP_ARGS(args); 7965 struct btrfs_device *dev; 7966 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7967 int i; 7968 7969 mutex_lock(&fs_devices->device_list_mutex); 7970 args.devid = stats->devid; 7971 dev = btrfs_find_device(fs_info->fs_devices, &args); 7972 mutex_unlock(&fs_devices->device_list_mutex); 7973 7974 if (!dev) { 7975 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7976 return -ENODEV; 7977 } else if (!dev->dev_stats_valid) { 7978 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7979 return -ENODEV; 7980 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7981 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7982 if (stats->nr_items > i) 7983 stats->values[i] = 7984 btrfs_dev_stat_read_and_reset(dev, i); 7985 else 7986 btrfs_dev_stat_set(dev, i, 0); 7987 } 7988 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7989 current->comm, task_pid_nr(current)); 7990 } else { 7991 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7992 if (stats->nr_items > i) 7993 stats->values[i] = btrfs_dev_stat_read(dev, i); 7994 } 7995 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7996 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7997 return 0; 7998 } 7999 8000 /* 8001 * Update the size and bytes used for each device where it changed. This is 8002 * delayed since we would otherwise get errors while writing out the 8003 * superblocks. 8004 * 8005 * Must be invoked during transaction commit. 8006 */ 8007 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 8008 { 8009 struct btrfs_device *curr, *next; 8010 8011 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 8012 8013 if (list_empty(&trans->dev_update_list)) 8014 return; 8015 8016 /* 8017 * We don't need the device_list_mutex here. This list is owned by the 8018 * transaction and the transaction must complete before the device is 8019 * released. 8020 */ 8021 mutex_lock(&trans->fs_info->chunk_mutex); 8022 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 8023 post_commit_list) { 8024 list_del_init(&curr->post_commit_list); 8025 curr->commit_total_bytes = curr->disk_total_bytes; 8026 curr->commit_bytes_used = curr->bytes_used; 8027 } 8028 mutex_unlock(&trans->fs_info->chunk_mutex); 8029 } 8030 8031 /* 8032 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 8033 */ 8034 int btrfs_bg_type_to_factor(u64 flags) 8035 { 8036 const int index = btrfs_bg_flags_to_raid_index(flags); 8037 8038 return btrfs_raid_array[index].ncopies; 8039 } 8040 8041 8042 8043 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 8044 u64 chunk_offset, u64 devid, 8045 u64 physical_offset, u64 physical_len) 8046 { 8047 struct btrfs_dev_lookup_args args = { .devid = devid }; 8048 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8049 struct extent_map *em; 8050 struct map_lookup *map; 8051 struct btrfs_device *dev; 8052 u64 stripe_len; 8053 bool found = false; 8054 int ret = 0; 8055 int i; 8056 8057 read_lock(&em_tree->lock); 8058 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8059 read_unlock(&em_tree->lock); 8060 8061 if (!em) { 8062 btrfs_err(fs_info, 8063 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8064 physical_offset, devid); 8065 ret = -EUCLEAN; 8066 goto out; 8067 } 8068 8069 map = em->map_lookup; 8070 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8071 if (physical_len != stripe_len) { 8072 btrfs_err(fs_info, 8073 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8074 physical_offset, devid, em->start, physical_len, 8075 stripe_len); 8076 ret = -EUCLEAN; 8077 goto out; 8078 } 8079 8080 for (i = 0; i < map->num_stripes; i++) { 8081 if (map->stripes[i].dev->devid == devid && 8082 map->stripes[i].physical == physical_offset) { 8083 found = true; 8084 if (map->verified_stripes >= map->num_stripes) { 8085 btrfs_err(fs_info, 8086 "too many dev extents for chunk %llu found", 8087 em->start); 8088 ret = -EUCLEAN; 8089 goto out; 8090 } 8091 map->verified_stripes++; 8092 break; 8093 } 8094 } 8095 if (!found) { 8096 btrfs_err(fs_info, 8097 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8098 physical_offset, devid); 8099 ret = -EUCLEAN; 8100 } 8101 8102 /* Make sure no dev extent is beyond device boundary */ 8103 dev = btrfs_find_device(fs_info->fs_devices, &args); 8104 if (!dev) { 8105 btrfs_err(fs_info, "failed to find devid %llu", devid); 8106 ret = -EUCLEAN; 8107 goto out; 8108 } 8109 8110 if (physical_offset + physical_len > dev->disk_total_bytes) { 8111 btrfs_err(fs_info, 8112 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8113 devid, physical_offset, physical_len, 8114 dev->disk_total_bytes); 8115 ret = -EUCLEAN; 8116 goto out; 8117 } 8118 8119 if (dev->zone_info) { 8120 u64 zone_size = dev->zone_info->zone_size; 8121 8122 if (!IS_ALIGNED(physical_offset, zone_size) || 8123 !IS_ALIGNED(physical_len, zone_size)) { 8124 btrfs_err(fs_info, 8125 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8126 devid, physical_offset, physical_len); 8127 ret = -EUCLEAN; 8128 goto out; 8129 } 8130 } 8131 8132 out: 8133 free_extent_map(em); 8134 return ret; 8135 } 8136 8137 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8138 { 8139 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8140 struct extent_map *em; 8141 struct rb_node *node; 8142 int ret = 0; 8143 8144 read_lock(&em_tree->lock); 8145 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8146 em = rb_entry(node, struct extent_map, rb_node); 8147 if (em->map_lookup->num_stripes != 8148 em->map_lookup->verified_stripes) { 8149 btrfs_err(fs_info, 8150 "chunk %llu has missing dev extent, have %d expect %d", 8151 em->start, em->map_lookup->verified_stripes, 8152 em->map_lookup->num_stripes); 8153 ret = -EUCLEAN; 8154 goto out; 8155 } 8156 } 8157 out: 8158 read_unlock(&em_tree->lock); 8159 return ret; 8160 } 8161 8162 /* 8163 * Ensure that all dev extents are mapped to correct chunk, otherwise 8164 * later chunk allocation/free would cause unexpected behavior. 8165 * 8166 * NOTE: This will iterate through the whole device tree, which should be of 8167 * the same size level as the chunk tree. This slightly increases mount time. 8168 */ 8169 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8170 { 8171 struct btrfs_path *path; 8172 struct btrfs_root *root = fs_info->dev_root; 8173 struct btrfs_key key; 8174 u64 prev_devid = 0; 8175 u64 prev_dev_ext_end = 0; 8176 int ret = 0; 8177 8178 /* 8179 * We don't have a dev_root because we mounted with ignorebadroots and 8180 * failed to load the root, so we want to skip the verification in this 8181 * case for sure. 8182 * 8183 * However if the dev root is fine, but the tree itself is corrupted 8184 * we'd still fail to mount. This verification is only to make sure 8185 * writes can happen safely, so instead just bypass this check 8186 * completely in the case of IGNOREBADROOTS. 8187 */ 8188 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8189 return 0; 8190 8191 key.objectid = 1; 8192 key.type = BTRFS_DEV_EXTENT_KEY; 8193 key.offset = 0; 8194 8195 path = btrfs_alloc_path(); 8196 if (!path) 8197 return -ENOMEM; 8198 8199 path->reada = READA_FORWARD; 8200 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8201 if (ret < 0) 8202 goto out; 8203 8204 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8205 ret = btrfs_next_leaf(root, path); 8206 if (ret < 0) 8207 goto out; 8208 /* No dev extents at all? Not good */ 8209 if (ret > 0) { 8210 ret = -EUCLEAN; 8211 goto out; 8212 } 8213 } 8214 while (1) { 8215 struct extent_buffer *leaf = path->nodes[0]; 8216 struct btrfs_dev_extent *dext; 8217 int slot = path->slots[0]; 8218 u64 chunk_offset; 8219 u64 physical_offset; 8220 u64 physical_len; 8221 u64 devid; 8222 8223 btrfs_item_key_to_cpu(leaf, &key, slot); 8224 if (key.type != BTRFS_DEV_EXTENT_KEY) 8225 break; 8226 devid = key.objectid; 8227 physical_offset = key.offset; 8228 8229 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8230 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8231 physical_len = btrfs_dev_extent_length(leaf, dext); 8232 8233 /* Check if this dev extent overlaps with the previous one */ 8234 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8235 btrfs_err(fs_info, 8236 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8237 devid, physical_offset, prev_dev_ext_end); 8238 ret = -EUCLEAN; 8239 goto out; 8240 } 8241 8242 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8243 physical_offset, physical_len); 8244 if (ret < 0) 8245 goto out; 8246 prev_devid = devid; 8247 prev_dev_ext_end = physical_offset + physical_len; 8248 8249 ret = btrfs_next_item(root, path); 8250 if (ret < 0) 8251 goto out; 8252 if (ret > 0) { 8253 ret = 0; 8254 break; 8255 } 8256 } 8257 8258 /* Ensure all chunks have corresponding dev extents */ 8259 ret = verify_chunk_dev_extent_mapping(fs_info); 8260 out: 8261 btrfs_free_path(path); 8262 return ret; 8263 } 8264 8265 /* 8266 * Check whether the given block group or device is pinned by any inode being 8267 * used as a swapfile. 8268 */ 8269 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8270 { 8271 struct btrfs_swapfile_pin *sp; 8272 struct rb_node *node; 8273 8274 spin_lock(&fs_info->swapfile_pins_lock); 8275 node = fs_info->swapfile_pins.rb_node; 8276 while (node) { 8277 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8278 if (ptr < sp->ptr) 8279 node = node->rb_left; 8280 else if (ptr > sp->ptr) 8281 node = node->rb_right; 8282 else 8283 break; 8284 } 8285 spin_unlock(&fs_info->swapfile_pins_lock); 8286 return node != NULL; 8287 } 8288 8289 static int relocating_repair_kthread(void *data) 8290 { 8291 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8292 struct btrfs_fs_info *fs_info = cache->fs_info; 8293 u64 target; 8294 int ret = 0; 8295 8296 target = cache->start; 8297 btrfs_put_block_group(cache); 8298 8299 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8300 btrfs_info(fs_info, 8301 "zoned: skip relocating block group %llu to repair: EBUSY", 8302 target); 8303 return -EBUSY; 8304 } 8305 8306 mutex_lock(&fs_info->reclaim_bgs_lock); 8307 8308 /* Ensure block group still exists */ 8309 cache = btrfs_lookup_block_group(fs_info, target); 8310 if (!cache) 8311 goto out; 8312 8313 if (!cache->relocating_repair) 8314 goto out; 8315 8316 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8317 if (ret < 0) 8318 goto out; 8319 8320 btrfs_info(fs_info, 8321 "zoned: relocating block group %llu to repair IO failure", 8322 target); 8323 ret = btrfs_relocate_chunk(fs_info, target); 8324 8325 out: 8326 if (cache) 8327 btrfs_put_block_group(cache); 8328 mutex_unlock(&fs_info->reclaim_bgs_lock); 8329 btrfs_exclop_finish(fs_info); 8330 8331 return ret; 8332 } 8333 8334 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8335 { 8336 struct btrfs_block_group *cache; 8337 8338 /* Do not attempt to repair in degraded state */ 8339 if (btrfs_test_opt(fs_info, DEGRADED)) 8340 return 0; 8341 8342 cache = btrfs_lookup_block_group(fs_info, logical); 8343 if (!cache) 8344 return 0; 8345 8346 spin_lock(&cache->lock); 8347 if (cache->relocating_repair) { 8348 spin_unlock(&cache->lock); 8349 btrfs_put_block_group(cache); 8350 return 0; 8351 } 8352 cache->relocating_repair = 1; 8353 spin_unlock(&cache->lock); 8354 8355 kthread_run(relocating_repair_kthread, cache, 8356 "btrfs-relocating-repair"); 8357 8358 return 0; 8359 } 8360