1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 if (flags & BTRFS_BLOCK_GROUP_RAID10) 168 return BTRFS_RAID_RAID10; 169 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 170 return BTRFS_RAID_RAID1; 171 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 172 return BTRFS_RAID_RAID1C3; 173 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 174 return BTRFS_RAID_RAID1C4; 175 else if (flags & BTRFS_BLOCK_GROUP_DUP) 176 return BTRFS_RAID_DUP; 177 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 178 return BTRFS_RAID_RAID0; 179 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 180 return BTRFS_RAID_RAID5; 181 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 182 return BTRFS_RAID_RAID6; 183 184 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 /* 198 * Fill @buf with textual description of @bg_flags, no more than @size_buf 199 * bytes including terminating null byte. 200 */ 201 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 202 { 203 int i; 204 int ret; 205 char *bp = buf; 206 u64 flags = bg_flags; 207 u32 size_bp = size_buf; 208 209 if (!flags) { 210 strcpy(bp, "NONE"); 211 return; 212 } 213 214 #define DESCRIBE_FLAG(flag, desc) \ 215 do { \ 216 if (flags & (flag)) { \ 217 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 218 if (ret < 0 || ret >= size_bp) \ 219 goto out_overflow; \ 220 size_bp -= ret; \ 221 bp += ret; \ 222 flags &= ~(flag); \ 223 } \ 224 } while (0) 225 226 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 227 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 228 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 229 230 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 231 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 232 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 233 btrfs_raid_array[i].raid_name); 234 #undef DESCRIBE_FLAG 235 236 if (flags) { 237 ret = snprintf(bp, size_bp, "0x%llx|", flags); 238 size_bp -= ret; 239 } 240 241 if (size_bp < size_buf) 242 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 243 244 /* 245 * The text is trimmed, it's up to the caller to provide sufficiently 246 * large buffer 247 */ 248 out_overflow:; 249 } 250 251 static int init_first_rw_device(struct btrfs_trans_handle *trans); 252 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 253 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 254 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 255 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 256 enum btrfs_map_op op, 257 u64 logical, u64 *length, 258 struct btrfs_io_context **bioc_ret, 259 int mirror_num, int need_raid_map); 260 261 /* 262 * Device locking 263 * ============== 264 * 265 * There are several mutexes that protect manipulation of devices and low-level 266 * structures like chunks but not block groups, extents or files 267 * 268 * uuid_mutex (global lock) 269 * ------------------------ 270 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 271 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 272 * device) or requested by the device= mount option 273 * 274 * the mutex can be very coarse and can cover long-running operations 275 * 276 * protects: updates to fs_devices counters like missing devices, rw devices, 277 * seeding, structure cloning, opening/closing devices at mount/umount time 278 * 279 * global::fs_devs - add, remove, updates to the global list 280 * 281 * does not protect: manipulation of the fs_devices::devices list in general 282 * but in mount context it could be used to exclude list modifications by eg. 283 * scan ioctl 284 * 285 * btrfs_device::name - renames (write side), read is RCU 286 * 287 * fs_devices::device_list_mutex (per-fs, with RCU) 288 * ------------------------------------------------ 289 * protects updates to fs_devices::devices, ie. adding and deleting 290 * 291 * simple list traversal with read-only actions can be done with RCU protection 292 * 293 * may be used to exclude some operations from running concurrently without any 294 * modifications to the list (see write_all_supers) 295 * 296 * Is not required at mount and close times, because our device list is 297 * protected by the uuid_mutex at that point. 298 * 299 * balance_mutex 300 * ------------- 301 * protects balance structures (status, state) and context accessed from 302 * several places (internally, ioctl) 303 * 304 * chunk_mutex 305 * ----------- 306 * protects chunks, adding or removing during allocation, trim or when a new 307 * device is added/removed. Additionally it also protects post_commit_list of 308 * individual devices, since they can be added to the transaction's 309 * post_commit_list only with chunk_mutex held. 310 * 311 * cleaner_mutex 312 * ------------- 313 * a big lock that is held by the cleaner thread and prevents running subvolume 314 * cleaning together with relocation or delayed iputs 315 * 316 * 317 * Lock nesting 318 * ============ 319 * 320 * uuid_mutex 321 * device_list_mutex 322 * chunk_mutex 323 * balance_mutex 324 * 325 * 326 * Exclusive operations 327 * ==================== 328 * 329 * Maintains the exclusivity of the following operations that apply to the 330 * whole filesystem and cannot run in parallel. 331 * 332 * - Balance (*) 333 * - Device add 334 * - Device remove 335 * - Device replace (*) 336 * - Resize 337 * 338 * The device operations (as above) can be in one of the following states: 339 * 340 * - Running state 341 * - Paused state 342 * - Completed state 343 * 344 * Only device operations marked with (*) can go into the Paused state for the 345 * following reasons: 346 * 347 * - ioctl (only Balance can be Paused through ioctl) 348 * - filesystem remounted as read-only 349 * - filesystem unmounted and mounted as read-only 350 * - system power-cycle and filesystem mounted as read-only 351 * - filesystem or device errors leading to forced read-only 352 * 353 * The status of exclusive operation is set and cleared atomically. 354 * During the course of Paused state, fs_info::exclusive_operation remains set. 355 * A device operation in Paused or Running state can be canceled or resumed 356 * either by ioctl (Balance only) or when remounted as read-write. 357 * The exclusive status is cleared when the device operation is canceled or 358 * completed. 359 */ 360 361 DEFINE_MUTEX(uuid_mutex); 362 static LIST_HEAD(fs_uuids); 363 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 364 { 365 return &fs_uuids; 366 } 367 368 /* 369 * alloc_fs_devices - allocate struct btrfs_fs_devices 370 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 371 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 372 * 373 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 374 * The returned struct is not linked onto any lists and can be destroyed with 375 * kfree() right away. 376 */ 377 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 378 const u8 *metadata_fsid) 379 { 380 struct btrfs_fs_devices *fs_devs; 381 382 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 383 if (!fs_devs) 384 return ERR_PTR(-ENOMEM); 385 386 mutex_init(&fs_devs->device_list_mutex); 387 388 INIT_LIST_HEAD(&fs_devs->devices); 389 INIT_LIST_HEAD(&fs_devs->alloc_list); 390 INIT_LIST_HEAD(&fs_devs->fs_list); 391 INIT_LIST_HEAD(&fs_devs->seed_list); 392 if (fsid) 393 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 394 395 if (metadata_fsid) 396 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 397 else if (fsid) 398 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 399 400 return fs_devs; 401 } 402 403 void btrfs_free_device(struct btrfs_device *device) 404 { 405 WARN_ON(!list_empty(&device->post_commit_list)); 406 rcu_string_free(device->name); 407 extent_io_tree_release(&device->alloc_state); 408 bio_put(device->flush_bio); 409 btrfs_destroy_dev_zone_info(device); 410 kfree(device); 411 } 412 413 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 414 { 415 struct btrfs_device *device; 416 WARN_ON(fs_devices->opened); 417 while (!list_empty(&fs_devices->devices)) { 418 device = list_entry(fs_devices->devices.next, 419 struct btrfs_device, dev_list); 420 list_del(&device->dev_list); 421 btrfs_free_device(device); 422 } 423 kfree(fs_devices); 424 } 425 426 void __exit btrfs_cleanup_fs_uuids(void) 427 { 428 struct btrfs_fs_devices *fs_devices; 429 430 while (!list_empty(&fs_uuids)) { 431 fs_devices = list_entry(fs_uuids.next, 432 struct btrfs_fs_devices, fs_list); 433 list_del(&fs_devices->fs_list); 434 free_fs_devices(fs_devices); 435 } 436 } 437 438 static noinline struct btrfs_fs_devices *find_fsid( 439 const u8 *fsid, const u8 *metadata_fsid) 440 { 441 struct btrfs_fs_devices *fs_devices; 442 443 ASSERT(fsid); 444 445 /* Handle non-split brain cases */ 446 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 447 if (metadata_fsid) { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 449 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 450 BTRFS_FSID_SIZE) == 0) 451 return fs_devices; 452 } else { 453 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 454 return fs_devices; 455 } 456 } 457 return NULL; 458 } 459 460 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 461 struct btrfs_super_block *disk_super) 462 { 463 464 struct btrfs_fs_devices *fs_devices; 465 466 /* 467 * Handle scanned device having completed its fsid change but 468 * belonging to a fs_devices that was created by first scanning 469 * a device which didn't have its fsid/metadata_uuid changed 470 * at all and the CHANGING_FSID_V2 flag set. 471 */ 472 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 473 if (fs_devices->fsid_change && 474 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 475 BTRFS_FSID_SIZE) == 0 && 476 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 477 BTRFS_FSID_SIZE) == 0) { 478 return fs_devices; 479 } 480 } 481 /* 482 * Handle scanned device having completed its fsid change but 483 * belonging to a fs_devices that was created by a device that 484 * has an outdated pair of fsid/metadata_uuid and 485 * CHANGING_FSID_V2 flag set. 486 */ 487 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 488 if (fs_devices->fsid_change && 489 memcmp(fs_devices->metadata_uuid, 490 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 491 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 492 BTRFS_FSID_SIZE) == 0) { 493 return fs_devices; 494 } 495 } 496 497 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 498 } 499 500 501 static int 502 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 503 int flush, struct block_device **bdev, 504 struct btrfs_super_block **disk_super) 505 { 506 int ret; 507 508 *bdev = blkdev_get_by_path(device_path, flags, holder); 509 510 if (IS_ERR(*bdev)) { 511 ret = PTR_ERR(*bdev); 512 goto error; 513 } 514 515 if (flush) 516 sync_blockdev(*bdev); 517 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 518 if (ret) { 519 blkdev_put(*bdev, flags); 520 goto error; 521 } 522 invalidate_bdev(*bdev); 523 *disk_super = btrfs_read_dev_super(*bdev); 524 if (IS_ERR(*disk_super)) { 525 ret = PTR_ERR(*disk_super); 526 blkdev_put(*bdev, flags); 527 goto error; 528 } 529 530 return 0; 531 532 error: 533 *bdev = NULL; 534 return ret; 535 } 536 537 /* 538 * Check if the device in the path matches the device in the given struct device. 539 * 540 * Returns: 541 * true If it is the same device. 542 * false If it is not the same device or on error. 543 */ 544 static bool device_matched(const struct btrfs_device *device, const char *path) 545 { 546 char *device_name; 547 dev_t dev_old; 548 dev_t dev_new; 549 int ret; 550 551 /* 552 * If we are looking for a device with the matching dev_t, then skip 553 * device without a name (a missing device). 554 */ 555 if (!device->name) 556 return false; 557 558 device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); 559 if (!device_name) 560 return false; 561 562 rcu_read_lock(); 563 scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name)); 564 rcu_read_unlock(); 565 566 ret = lookup_bdev(device_name, &dev_old); 567 kfree(device_name); 568 if (ret) 569 return false; 570 571 ret = lookup_bdev(path, &dev_new); 572 if (ret) 573 return false; 574 575 if (dev_old == dev_new) 576 return true; 577 578 return false; 579 } 580 581 /* 582 * Search and remove all stale (devices which are not mounted) devices. 583 * When both inputs are NULL, it will search and release all stale devices. 584 * path: Optional. When provided will it release all unmounted devices 585 * matching this path only. 586 * skip_dev: Optional. Will skip this device when searching for the stale 587 * devices. 588 * Return: 0 for success or if @path is NULL. 589 * -EBUSY if @path is a mounted device. 590 * -ENOENT if @path does not match any device in the list. 591 */ 592 static int btrfs_free_stale_devices(const char *path, 593 struct btrfs_device *skip_device) 594 { 595 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 596 struct btrfs_device *device, *tmp_device; 597 int ret = 0; 598 599 lockdep_assert_held(&uuid_mutex); 600 601 if (path) 602 ret = -ENOENT; 603 604 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 605 606 mutex_lock(&fs_devices->device_list_mutex); 607 list_for_each_entry_safe(device, tmp_device, 608 &fs_devices->devices, dev_list) { 609 if (skip_device && skip_device == device) 610 continue; 611 if (path && !device_matched(device, path)) 612 continue; 613 if (fs_devices->opened) { 614 /* for an already deleted device return 0 */ 615 if (path && ret != 0) 616 ret = -EBUSY; 617 break; 618 } 619 620 /* delete the stale device */ 621 fs_devices->num_devices--; 622 list_del(&device->dev_list); 623 btrfs_free_device(device); 624 625 ret = 0; 626 } 627 mutex_unlock(&fs_devices->device_list_mutex); 628 629 if (fs_devices->num_devices == 0) { 630 btrfs_sysfs_remove_fsid(fs_devices); 631 list_del(&fs_devices->fs_list); 632 free_fs_devices(fs_devices); 633 } 634 } 635 636 return ret; 637 } 638 639 /* 640 * This is only used on mount, and we are protected from competing things 641 * messing with our fs_devices by the uuid_mutex, thus we do not need the 642 * fs_devices->device_list_mutex here. 643 */ 644 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 645 struct btrfs_device *device, fmode_t flags, 646 void *holder) 647 { 648 struct request_queue *q; 649 struct block_device *bdev; 650 struct btrfs_super_block *disk_super; 651 u64 devid; 652 int ret; 653 654 if (device->bdev) 655 return -EINVAL; 656 if (!device->name) 657 return -EINVAL; 658 659 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 660 &bdev, &disk_super); 661 if (ret) 662 return ret; 663 664 devid = btrfs_stack_device_id(&disk_super->dev_item); 665 if (devid != device->devid) 666 goto error_free_page; 667 668 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 669 goto error_free_page; 670 671 device->generation = btrfs_super_generation(disk_super); 672 673 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 674 if (btrfs_super_incompat_flags(disk_super) & 675 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 676 pr_err( 677 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 678 goto error_free_page; 679 } 680 681 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 682 fs_devices->seeding = true; 683 } else { 684 if (bdev_read_only(bdev)) 685 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 686 else 687 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 688 } 689 690 q = bdev_get_queue(bdev); 691 if (!blk_queue_nonrot(q)) 692 fs_devices->rotating = true; 693 694 device->bdev = bdev; 695 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 696 device->mode = flags; 697 698 fs_devices->open_devices++; 699 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 700 device->devid != BTRFS_DEV_REPLACE_DEVID) { 701 fs_devices->rw_devices++; 702 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 703 } 704 btrfs_release_disk_super(disk_super); 705 706 return 0; 707 708 error_free_page: 709 btrfs_release_disk_super(disk_super); 710 blkdev_put(bdev, flags); 711 712 return -EINVAL; 713 } 714 715 /* 716 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 717 * being created with a disk that has already completed its fsid change. Such 718 * disk can belong to an fs which has its FSID changed or to one which doesn't. 719 * Handle both cases here. 720 */ 721 static struct btrfs_fs_devices *find_fsid_inprogress( 722 struct btrfs_super_block *disk_super) 723 { 724 struct btrfs_fs_devices *fs_devices; 725 726 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 727 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 728 BTRFS_FSID_SIZE) != 0 && 729 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 730 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 731 return fs_devices; 732 } 733 } 734 735 return find_fsid(disk_super->fsid, NULL); 736 } 737 738 739 static struct btrfs_fs_devices *find_fsid_changed( 740 struct btrfs_super_block *disk_super) 741 { 742 struct btrfs_fs_devices *fs_devices; 743 744 /* 745 * Handles the case where scanned device is part of an fs that had 746 * multiple successful changes of FSID but currently device didn't 747 * observe it. Meaning our fsid will be different than theirs. We need 748 * to handle two subcases : 749 * 1 - The fs still continues to have different METADATA/FSID uuids. 750 * 2 - The fs is switched back to its original FSID (METADATA/FSID 751 * are equal). 752 */ 753 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 754 /* Changed UUIDs */ 755 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 756 BTRFS_FSID_SIZE) != 0 && 757 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 758 BTRFS_FSID_SIZE) == 0 && 759 memcmp(fs_devices->fsid, disk_super->fsid, 760 BTRFS_FSID_SIZE) != 0) 761 return fs_devices; 762 763 /* Unchanged UUIDs */ 764 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 765 BTRFS_FSID_SIZE) == 0 && 766 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 767 BTRFS_FSID_SIZE) == 0) 768 return fs_devices; 769 } 770 771 return NULL; 772 } 773 774 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 775 struct btrfs_super_block *disk_super) 776 { 777 struct btrfs_fs_devices *fs_devices; 778 779 /* 780 * Handle the case where the scanned device is part of an fs whose last 781 * metadata UUID change reverted it to the original FSID. At the same 782 * time * fs_devices was first created by another constitutent device 783 * which didn't fully observe the operation. This results in an 784 * btrfs_fs_devices created with metadata/fsid different AND 785 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 786 * fs_devices equal to the FSID of the disk. 787 */ 788 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 789 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 790 BTRFS_FSID_SIZE) != 0 && 791 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 792 BTRFS_FSID_SIZE) == 0 && 793 fs_devices->fsid_change) 794 return fs_devices; 795 } 796 797 return NULL; 798 } 799 /* 800 * Add new device to list of registered devices 801 * 802 * Returns: 803 * device pointer which was just added or updated when successful 804 * error pointer when failed 805 */ 806 static noinline struct btrfs_device *device_list_add(const char *path, 807 struct btrfs_super_block *disk_super, 808 bool *new_device_added) 809 { 810 struct btrfs_device *device; 811 struct btrfs_fs_devices *fs_devices = NULL; 812 struct rcu_string *name; 813 u64 found_transid = btrfs_super_generation(disk_super); 814 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 815 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 816 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 817 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 818 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 819 820 if (fsid_change_in_progress) { 821 if (!has_metadata_uuid) 822 fs_devices = find_fsid_inprogress(disk_super); 823 else 824 fs_devices = find_fsid_changed(disk_super); 825 } else if (has_metadata_uuid) { 826 fs_devices = find_fsid_with_metadata_uuid(disk_super); 827 } else { 828 fs_devices = find_fsid_reverted_metadata(disk_super); 829 if (!fs_devices) 830 fs_devices = find_fsid(disk_super->fsid, NULL); 831 } 832 833 834 if (!fs_devices) { 835 if (has_metadata_uuid) 836 fs_devices = alloc_fs_devices(disk_super->fsid, 837 disk_super->metadata_uuid); 838 else 839 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 840 841 if (IS_ERR(fs_devices)) 842 return ERR_CAST(fs_devices); 843 844 fs_devices->fsid_change = fsid_change_in_progress; 845 846 mutex_lock(&fs_devices->device_list_mutex); 847 list_add(&fs_devices->fs_list, &fs_uuids); 848 849 device = NULL; 850 } else { 851 struct btrfs_dev_lookup_args args = { 852 .devid = devid, 853 .uuid = disk_super->dev_item.uuid, 854 }; 855 856 mutex_lock(&fs_devices->device_list_mutex); 857 device = btrfs_find_device(fs_devices, &args); 858 859 /* 860 * If this disk has been pulled into an fs devices created by 861 * a device which had the CHANGING_FSID_V2 flag then replace the 862 * metadata_uuid/fsid values of the fs_devices. 863 */ 864 if (fs_devices->fsid_change && 865 found_transid > fs_devices->latest_generation) { 866 memcpy(fs_devices->fsid, disk_super->fsid, 867 BTRFS_FSID_SIZE); 868 869 if (has_metadata_uuid) 870 memcpy(fs_devices->metadata_uuid, 871 disk_super->metadata_uuid, 872 BTRFS_FSID_SIZE); 873 else 874 memcpy(fs_devices->metadata_uuid, 875 disk_super->fsid, BTRFS_FSID_SIZE); 876 877 fs_devices->fsid_change = false; 878 } 879 } 880 881 if (!device) { 882 if (fs_devices->opened) { 883 mutex_unlock(&fs_devices->device_list_mutex); 884 return ERR_PTR(-EBUSY); 885 } 886 887 device = btrfs_alloc_device(NULL, &devid, 888 disk_super->dev_item.uuid); 889 if (IS_ERR(device)) { 890 mutex_unlock(&fs_devices->device_list_mutex); 891 /* we can safely leave the fs_devices entry around */ 892 return device; 893 } 894 895 name = rcu_string_strdup(path, GFP_NOFS); 896 if (!name) { 897 btrfs_free_device(device); 898 mutex_unlock(&fs_devices->device_list_mutex); 899 return ERR_PTR(-ENOMEM); 900 } 901 rcu_assign_pointer(device->name, name); 902 903 list_add_rcu(&device->dev_list, &fs_devices->devices); 904 fs_devices->num_devices++; 905 906 device->fs_devices = fs_devices; 907 *new_device_added = true; 908 909 if (disk_super->label[0]) 910 pr_info( 911 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 912 disk_super->label, devid, found_transid, path, 913 current->comm, task_pid_nr(current)); 914 else 915 pr_info( 916 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 917 disk_super->fsid, devid, found_transid, path, 918 current->comm, task_pid_nr(current)); 919 920 } else if (!device->name || strcmp(device->name->str, path)) { 921 /* 922 * When FS is already mounted. 923 * 1. If you are here and if the device->name is NULL that 924 * means this device was missing at time of FS mount. 925 * 2. If you are here and if the device->name is different 926 * from 'path' that means either 927 * a. The same device disappeared and reappeared with 928 * different name. or 929 * b. The missing-disk-which-was-replaced, has 930 * reappeared now. 931 * 932 * We must allow 1 and 2a above. But 2b would be a spurious 933 * and unintentional. 934 * 935 * Further in case of 1 and 2a above, the disk at 'path' 936 * would have missed some transaction when it was away and 937 * in case of 2a the stale bdev has to be updated as well. 938 * 2b must not be allowed at all time. 939 */ 940 941 /* 942 * For now, we do allow update to btrfs_fs_device through the 943 * btrfs dev scan cli after FS has been mounted. We're still 944 * tracking a problem where systems fail mount by subvolume id 945 * when we reject replacement on a mounted FS. 946 */ 947 if (!fs_devices->opened && found_transid < device->generation) { 948 /* 949 * That is if the FS is _not_ mounted and if you 950 * are here, that means there is more than one 951 * disk with same uuid and devid.We keep the one 952 * with larger generation number or the last-in if 953 * generation are equal. 954 */ 955 mutex_unlock(&fs_devices->device_list_mutex); 956 return ERR_PTR(-EEXIST); 957 } 958 959 /* 960 * We are going to replace the device path for a given devid, 961 * make sure it's the same device if the device is mounted 962 */ 963 if (device->bdev) { 964 int error; 965 dev_t path_dev; 966 967 error = lookup_bdev(path, &path_dev); 968 if (error) { 969 mutex_unlock(&fs_devices->device_list_mutex); 970 return ERR_PTR(error); 971 } 972 973 if (device->bdev->bd_dev != path_dev) { 974 mutex_unlock(&fs_devices->device_list_mutex); 975 /* 976 * device->fs_info may not be reliable here, so 977 * pass in a NULL instead. This avoids a 978 * possible use-after-free when the fs_info and 979 * fs_info->sb are already torn down. 980 */ 981 btrfs_warn_in_rcu(NULL, 982 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 983 path, devid, found_transid, 984 current->comm, 985 task_pid_nr(current)); 986 return ERR_PTR(-EEXIST); 987 } 988 btrfs_info_in_rcu(device->fs_info, 989 "devid %llu device path %s changed to %s scanned by %s (%d)", 990 devid, rcu_str_deref(device->name), 991 path, current->comm, 992 task_pid_nr(current)); 993 } 994 995 name = rcu_string_strdup(path, GFP_NOFS); 996 if (!name) { 997 mutex_unlock(&fs_devices->device_list_mutex); 998 return ERR_PTR(-ENOMEM); 999 } 1000 rcu_string_free(device->name); 1001 rcu_assign_pointer(device->name, name); 1002 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1003 fs_devices->missing_devices--; 1004 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1005 } 1006 } 1007 1008 /* 1009 * Unmount does not free the btrfs_device struct but would zero 1010 * generation along with most of the other members. So just update 1011 * it back. We need it to pick the disk with largest generation 1012 * (as above). 1013 */ 1014 if (!fs_devices->opened) { 1015 device->generation = found_transid; 1016 fs_devices->latest_generation = max_t(u64, found_transid, 1017 fs_devices->latest_generation); 1018 } 1019 1020 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 1021 1022 mutex_unlock(&fs_devices->device_list_mutex); 1023 return device; 1024 } 1025 1026 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 1027 { 1028 struct btrfs_fs_devices *fs_devices; 1029 struct btrfs_device *device; 1030 struct btrfs_device *orig_dev; 1031 int ret = 0; 1032 1033 lockdep_assert_held(&uuid_mutex); 1034 1035 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1036 if (IS_ERR(fs_devices)) 1037 return fs_devices; 1038 1039 fs_devices->total_devices = orig->total_devices; 1040 1041 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1042 struct rcu_string *name; 1043 1044 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1045 orig_dev->uuid); 1046 if (IS_ERR(device)) { 1047 ret = PTR_ERR(device); 1048 goto error; 1049 } 1050 1051 /* 1052 * This is ok to do without rcu read locked because we hold the 1053 * uuid mutex so nothing we touch in here is going to disappear. 1054 */ 1055 if (orig_dev->name) { 1056 name = rcu_string_strdup(orig_dev->name->str, 1057 GFP_KERNEL); 1058 if (!name) { 1059 btrfs_free_device(device); 1060 ret = -ENOMEM; 1061 goto error; 1062 } 1063 rcu_assign_pointer(device->name, name); 1064 } 1065 1066 list_add(&device->dev_list, &fs_devices->devices); 1067 device->fs_devices = fs_devices; 1068 fs_devices->num_devices++; 1069 } 1070 return fs_devices; 1071 error: 1072 free_fs_devices(fs_devices); 1073 return ERR_PTR(ret); 1074 } 1075 1076 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1077 struct btrfs_device **latest_dev) 1078 { 1079 struct btrfs_device *device, *next; 1080 1081 /* This is the initialized path, it is safe to release the devices. */ 1082 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1083 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1084 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1085 &device->dev_state) && 1086 !test_bit(BTRFS_DEV_STATE_MISSING, 1087 &device->dev_state) && 1088 (!*latest_dev || 1089 device->generation > (*latest_dev)->generation)) { 1090 *latest_dev = device; 1091 } 1092 continue; 1093 } 1094 1095 /* 1096 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1097 * in btrfs_init_dev_replace() so just continue. 1098 */ 1099 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1100 continue; 1101 1102 if (device->bdev) { 1103 blkdev_put(device->bdev, device->mode); 1104 device->bdev = NULL; 1105 fs_devices->open_devices--; 1106 } 1107 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1108 list_del_init(&device->dev_alloc_list); 1109 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1110 fs_devices->rw_devices--; 1111 } 1112 list_del_init(&device->dev_list); 1113 fs_devices->num_devices--; 1114 btrfs_free_device(device); 1115 } 1116 1117 } 1118 1119 /* 1120 * After we have read the system tree and know devids belonging to this 1121 * filesystem, remove the device which does not belong there. 1122 */ 1123 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1124 { 1125 struct btrfs_device *latest_dev = NULL; 1126 struct btrfs_fs_devices *seed_dev; 1127 1128 mutex_lock(&uuid_mutex); 1129 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1130 1131 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1132 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1133 1134 fs_devices->latest_dev = latest_dev; 1135 1136 mutex_unlock(&uuid_mutex); 1137 } 1138 1139 static void btrfs_close_bdev(struct btrfs_device *device) 1140 { 1141 if (!device->bdev) 1142 return; 1143 1144 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1145 sync_blockdev(device->bdev); 1146 invalidate_bdev(device->bdev); 1147 } 1148 1149 blkdev_put(device->bdev, device->mode); 1150 } 1151 1152 static void btrfs_close_one_device(struct btrfs_device *device) 1153 { 1154 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1155 1156 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1157 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1158 list_del_init(&device->dev_alloc_list); 1159 fs_devices->rw_devices--; 1160 } 1161 1162 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1163 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1164 1165 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1166 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1167 fs_devices->missing_devices--; 1168 } 1169 1170 btrfs_close_bdev(device); 1171 if (device->bdev) { 1172 fs_devices->open_devices--; 1173 device->bdev = NULL; 1174 } 1175 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1176 btrfs_destroy_dev_zone_info(device); 1177 1178 device->fs_info = NULL; 1179 atomic_set(&device->dev_stats_ccnt, 0); 1180 extent_io_tree_release(&device->alloc_state); 1181 1182 /* 1183 * Reset the flush error record. We might have a transient flush error 1184 * in this mount, and if so we aborted the current transaction and set 1185 * the fs to an error state, guaranteeing no super blocks can be further 1186 * committed. However that error might be transient and if we unmount the 1187 * filesystem and mount it again, we should allow the mount to succeed 1188 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1189 * filesystem again we still get flush errors, then we will again abort 1190 * any transaction and set the error state, guaranteeing no commits of 1191 * unsafe super blocks. 1192 */ 1193 device->last_flush_error = 0; 1194 1195 /* Verify the device is back in a pristine state */ 1196 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1197 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1198 ASSERT(list_empty(&device->dev_alloc_list)); 1199 ASSERT(list_empty(&device->post_commit_list)); 1200 } 1201 1202 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1203 { 1204 struct btrfs_device *device, *tmp; 1205 1206 lockdep_assert_held(&uuid_mutex); 1207 1208 if (--fs_devices->opened > 0) 1209 return; 1210 1211 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1212 btrfs_close_one_device(device); 1213 1214 WARN_ON(fs_devices->open_devices); 1215 WARN_ON(fs_devices->rw_devices); 1216 fs_devices->opened = 0; 1217 fs_devices->seeding = false; 1218 fs_devices->fs_info = NULL; 1219 } 1220 1221 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1222 { 1223 LIST_HEAD(list); 1224 struct btrfs_fs_devices *tmp; 1225 1226 mutex_lock(&uuid_mutex); 1227 close_fs_devices(fs_devices); 1228 if (!fs_devices->opened) 1229 list_splice_init(&fs_devices->seed_list, &list); 1230 1231 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1232 close_fs_devices(fs_devices); 1233 list_del(&fs_devices->seed_list); 1234 free_fs_devices(fs_devices); 1235 } 1236 mutex_unlock(&uuid_mutex); 1237 } 1238 1239 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1240 fmode_t flags, void *holder) 1241 { 1242 struct btrfs_device *device; 1243 struct btrfs_device *latest_dev = NULL; 1244 struct btrfs_device *tmp_device; 1245 1246 flags |= FMODE_EXCL; 1247 1248 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1249 dev_list) { 1250 int ret; 1251 1252 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1253 if (ret == 0 && 1254 (!latest_dev || device->generation > latest_dev->generation)) { 1255 latest_dev = device; 1256 } else if (ret == -ENODATA) { 1257 fs_devices->num_devices--; 1258 list_del(&device->dev_list); 1259 btrfs_free_device(device); 1260 } 1261 } 1262 if (fs_devices->open_devices == 0) 1263 return -EINVAL; 1264 1265 fs_devices->opened = 1; 1266 fs_devices->latest_dev = latest_dev; 1267 fs_devices->total_rw_bytes = 0; 1268 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1269 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1270 1271 return 0; 1272 } 1273 1274 static int devid_cmp(void *priv, const struct list_head *a, 1275 const struct list_head *b) 1276 { 1277 const struct btrfs_device *dev1, *dev2; 1278 1279 dev1 = list_entry(a, struct btrfs_device, dev_list); 1280 dev2 = list_entry(b, struct btrfs_device, dev_list); 1281 1282 if (dev1->devid < dev2->devid) 1283 return -1; 1284 else if (dev1->devid > dev2->devid) 1285 return 1; 1286 return 0; 1287 } 1288 1289 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1290 fmode_t flags, void *holder) 1291 { 1292 int ret; 1293 1294 lockdep_assert_held(&uuid_mutex); 1295 /* 1296 * The device_list_mutex cannot be taken here in case opening the 1297 * underlying device takes further locks like open_mutex. 1298 * 1299 * We also don't need the lock here as this is called during mount and 1300 * exclusion is provided by uuid_mutex 1301 */ 1302 1303 if (fs_devices->opened) { 1304 fs_devices->opened++; 1305 ret = 0; 1306 } else { 1307 list_sort(NULL, &fs_devices->devices, devid_cmp); 1308 ret = open_fs_devices(fs_devices, flags, holder); 1309 } 1310 1311 return ret; 1312 } 1313 1314 void btrfs_release_disk_super(struct btrfs_super_block *super) 1315 { 1316 struct page *page = virt_to_page(super); 1317 1318 put_page(page); 1319 } 1320 1321 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1322 u64 bytenr, u64 bytenr_orig) 1323 { 1324 struct btrfs_super_block *disk_super; 1325 struct page *page; 1326 void *p; 1327 pgoff_t index; 1328 1329 /* make sure our super fits in the device */ 1330 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1331 return ERR_PTR(-EINVAL); 1332 1333 /* make sure our super fits in the page */ 1334 if (sizeof(*disk_super) > PAGE_SIZE) 1335 return ERR_PTR(-EINVAL); 1336 1337 /* make sure our super doesn't straddle pages on disk */ 1338 index = bytenr >> PAGE_SHIFT; 1339 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1340 return ERR_PTR(-EINVAL); 1341 1342 /* pull in the page with our super */ 1343 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1344 1345 if (IS_ERR(page)) 1346 return ERR_CAST(page); 1347 1348 p = page_address(page); 1349 1350 /* align our pointer to the offset of the super block */ 1351 disk_super = p + offset_in_page(bytenr); 1352 1353 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1354 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1355 btrfs_release_disk_super(p); 1356 return ERR_PTR(-EINVAL); 1357 } 1358 1359 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1360 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1361 1362 return disk_super; 1363 } 1364 1365 int btrfs_forget_devices(const char *path) 1366 { 1367 int ret; 1368 1369 mutex_lock(&uuid_mutex); 1370 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1371 mutex_unlock(&uuid_mutex); 1372 1373 return ret; 1374 } 1375 1376 /* 1377 * Look for a btrfs signature on a device. This may be called out of the mount path 1378 * and we are not allowed to call set_blocksize during the scan. The superblock 1379 * is read via pagecache 1380 */ 1381 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1382 void *holder) 1383 { 1384 struct btrfs_super_block *disk_super; 1385 bool new_device_added = false; 1386 struct btrfs_device *device = NULL; 1387 struct block_device *bdev; 1388 u64 bytenr, bytenr_orig; 1389 int ret; 1390 1391 lockdep_assert_held(&uuid_mutex); 1392 1393 /* 1394 * we would like to check all the supers, but that would make 1395 * a btrfs mount succeed after a mkfs from a different FS. 1396 * So, we need to add a special mount option to scan for 1397 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1398 */ 1399 flags |= FMODE_EXCL; 1400 1401 bdev = blkdev_get_by_path(path, flags, holder); 1402 if (IS_ERR(bdev)) 1403 return ERR_CAST(bdev); 1404 1405 bytenr_orig = btrfs_sb_offset(0); 1406 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1407 if (ret) { 1408 device = ERR_PTR(ret); 1409 goto error_bdev_put; 1410 } 1411 1412 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1413 if (IS_ERR(disk_super)) { 1414 device = ERR_CAST(disk_super); 1415 goto error_bdev_put; 1416 } 1417 1418 device = device_list_add(path, disk_super, &new_device_added); 1419 if (!IS_ERR(device)) { 1420 if (new_device_added) 1421 btrfs_free_stale_devices(path, device); 1422 } 1423 1424 btrfs_release_disk_super(disk_super); 1425 1426 error_bdev_put: 1427 blkdev_put(bdev, flags); 1428 1429 return device; 1430 } 1431 1432 /* 1433 * Try to find a chunk that intersects [start, start + len] range and when one 1434 * such is found, record the end of it in *start 1435 */ 1436 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1437 u64 len) 1438 { 1439 u64 physical_start, physical_end; 1440 1441 lockdep_assert_held(&device->fs_info->chunk_mutex); 1442 1443 if (!find_first_extent_bit(&device->alloc_state, *start, 1444 &physical_start, &physical_end, 1445 CHUNK_ALLOCATED, NULL)) { 1446 1447 if (in_range(physical_start, *start, len) || 1448 in_range(*start, physical_start, 1449 physical_end - physical_start)) { 1450 *start = physical_end + 1; 1451 return true; 1452 } 1453 } 1454 return false; 1455 } 1456 1457 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1458 { 1459 switch (device->fs_devices->chunk_alloc_policy) { 1460 case BTRFS_CHUNK_ALLOC_REGULAR: 1461 /* 1462 * We don't want to overwrite the superblock on the drive nor 1463 * any area used by the boot loader (grub for example), so we 1464 * make sure to start at an offset of at least 1MB. 1465 */ 1466 return max_t(u64, start, SZ_1M); 1467 case BTRFS_CHUNK_ALLOC_ZONED: 1468 /* 1469 * We don't care about the starting region like regular 1470 * allocator, because we anyway use/reserve the first two zones 1471 * for superblock logging. 1472 */ 1473 return ALIGN(start, device->zone_info->zone_size); 1474 default: 1475 BUG(); 1476 } 1477 } 1478 1479 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1480 u64 *hole_start, u64 *hole_size, 1481 u64 num_bytes) 1482 { 1483 u64 zone_size = device->zone_info->zone_size; 1484 u64 pos; 1485 int ret; 1486 bool changed = false; 1487 1488 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1489 1490 while (*hole_size > 0) { 1491 pos = btrfs_find_allocatable_zones(device, *hole_start, 1492 *hole_start + *hole_size, 1493 num_bytes); 1494 if (pos != *hole_start) { 1495 *hole_size = *hole_start + *hole_size - pos; 1496 *hole_start = pos; 1497 changed = true; 1498 if (*hole_size < num_bytes) 1499 break; 1500 } 1501 1502 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1503 1504 /* Range is ensured to be empty */ 1505 if (!ret) 1506 return changed; 1507 1508 /* Given hole range was invalid (outside of device) */ 1509 if (ret == -ERANGE) { 1510 *hole_start += *hole_size; 1511 *hole_size = 0; 1512 return true; 1513 } 1514 1515 *hole_start += zone_size; 1516 *hole_size -= zone_size; 1517 changed = true; 1518 } 1519 1520 return changed; 1521 } 1522 1523 /** 1524 * dev_extent_hole_check - check if specified hole is suitable for allocation 1525 * @device: the device which we have the hole 1526 * @hole_start: starting position of the hole 1527 * @hole_size: the size of the hole 1528 * @num_bytes: the size of the free space that we need 1529 * 1530 * This function may modify @hole_start and @hole_size to reflect the suitable 1531 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1532 */ 1533 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1534 u64 *hole_size, u64 num_bytes) 1535 { 1536 bool changed = false; 1537 u64 hole_end = *hole_start + *hole_size; 1538 1539 for (;;) { 1540 /* 1541 * Check before we set max_hole_start, otherwise we could end up 1542 * sending back this offset anyway. 1543 */ 1544 if (contains_pending_extent(device, hole_start, *hole_size)) { 1545 if (hole_end >= *hole_start) 1546 *hole_size = hole_end - *hole_start; 1547 else 1548 *hole_size = 0; 1549 changed = true; 1550 } 1551 1552 switch (device->fs_devices->chunk_alloc_policy) { 1553 case BTRFS_CHUNK_ALLOC_REGULAR: 1554 /* No extra check */ 1555 break; 1556 case BTRFS_CHUNK_ALLOC_ZONED: 1557 if (dev_extent_hole_check_zoned(device, hole_start, 1558 hole_size, num_bytes)) { 1559 changed = true; 1560 /* 1561 * The changed hole can contain pending extent. 1562 * Loop again to check that. 1563 */ 1564 continue; 1565 } 1566 break; 1567 default: 1568 BUG(); 1569 } 1570 1571 break; 1572 } 1573 1574 return changed; 1575 } 1576 1577 /* 1578 * find_free_dev_extent_start - find free space in the specified device 1579 * @device: the device which we search the free space in 1580 * @num_bytes: the size of the free space that we need 1581 * @search_start: the position from which to begin the search 1582 * @start: store the start of the free space. 1583 * @len: the size of the free space. that we find, or the size 1584 * of the max free space if we don't find suitable free space 1585 * 1586 * this uses a pretty simple search, the expectation is that it is 1587 * called very infrequently and that a given device has a small number 1588 * of extents 1589 * 1590 * @start is used to store the start of the free space if we find. But if we 1591 * don't find suitable free space, it will be used to store the start position 1592 * of the max free space. 1593 * 1594 * @len is used to store the size of the free space that we find. 1595 * But if we don't find suitable free space, it is used to store the size of 1596 * the max free space. 1597 * 1598 * NOTE: This function will search *commit* root of device tree, and does extra 1599 * check to ensure dev extents are not double allocated. 1600 * This makes the function safe to allocate dev extents but may not report 1601 * correct usable device space, as device extent freed in current transaction 1602 * is not reported as available. 1603 */ 1604 static int find_free_dev_extent_start(struct btrfs_device *device, 1605 u64 num_bytes, u64 search_start, u64 *start, 1606 u64 *len) 1607 { 1608 struct btrfs_fs_info *fs_info = device->fs_info; 1609 struct btrfs_root *root = fs_info->dev_root; 1610 struct btrfs_key key; 1611 struct btrfs_dev_extent *dev_extent; 1612 struct btrfs_path *path; 1613 u64 hole_size; 1614 u64 max_hole_start; 1615 u64 max_hole_size; 1616 u64 extent_end; 1617 u64 search_end = device->total_bytes; 1618 int ret; 1619 int slot; 1620 struct extent_buffer *l; 1621 1622 search_start = dev_extent_search_start(device, search_start); 1623 1624 WARN_ON(device->zone_info && 1625 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1626 1627 path = btrfs_alloc_path(); 1628 if (!path) 1629 return -ENOMEM; 1630 1631 max_hole_start = search_start; 1632 max_hole_size = 0; 1633 1634 again: 1635 if (search_start >= search_end || 1636 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1637 ret = -ENOSPC; 1638 goto out; 1639 } 1640 1641 path->reada = READA_FORWARD; 1642 path->search_commit_root = 1; 1643 path->skip_locking = 1; 1644 1645 key.objectid = device->devid; 1646 key.offset = search_start; 1647 key.type = BTRFS_DEV_EXTENT_KEY; 1648 1649 ret = btrfs_search_backwards(root, &key, path); 1650 if (ret < 0) 1651 goto out; 1652 1653 while (1) { 1654 l = path->nodes[0]; 1655 slot = path->slots[0]; 1656 if (slot >= btrfs_header_nritems(l)) { 1657 ret = btrfs_next_leaf(root, path); 1658 if (ret == 0) 1659 continue; 1660 if (ret < 0) 1661 goto out; 1662 1663 break; 1664 } 1665 btrfs_item_key_to_cpu(l, &key, slot); 1666 1667 if (key.objectid < device->devid) 1668 goto next; 1669 1670 if (key.objectid > device->devid) 1671 break; 1672 1673 if (key.type != BTRFS_DEV_EXTENT_KEY) 1674 goto next; 1675 1676 if (key.offset > search_start) { 1677 hole_size = key.offset - search_start; 1678 dev_extent_hole_check(device, &search_start, &hole_size, 1679 num_bytes); 1680 1681 if (hole_size > max_hole_size) { 1682 max_hole_start = search_start; 1683 max_hole_size = hole_size; 1684 } 1685 1686 /* 1687 * If this free space is greater than which we need, 1688 * it must be the max free space that we have found 1689 * until now, so max_hole_start must point to the start 1690 * of this free space and the length of this free space 1691 * is stored in max_hole_size. Thus, we return 1692 * max_hole_start and max_hole_size and go back to the 1693 * caller. 1694 */ 1695 if (hole_size >= num_bytes) { 1696 ret = 0; 1697 goto out; 1698 } 1699 } 1700 1701 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1702 extent_end = key.offset + btrfs_dev_extent_length(l, 1703 dev_extent); 1704 if (extent_end > search_start) 1705 search_start = extent_end; 1706 next: 1707 path->slots[0]++; 1708 cond_resched(); 1709 } 1710 1711 /* 1712 * At this point, search_start should be the end of 1713 * allocated dev extents, and when shrinking the device, 1714 * search_end may be smaller than search_start. 1715 */ 1716 if (search_end > search_start) { 1717 hole_size = search_end - search_start; 1718 if (dev_extent_hole_check(device, &search_start, &hole_size, 1719 num_bytes)) { 1720 btrfs_release_path(path); 1721 goto again; 1722 } 1723 1724 if (hole_size > max_hole_size) { 1725 max_hole_start = search_start; 1726 max_hole_size = hole_size; 1727 } 1728 } 1729 1730 /* See above. */ 1731 if (max_hole_size < num_bytes) 1732 ret = -ENOSPC; 1733 else 1734 ret = 0; 1735 1736 out: 1737 btrfs_free_path(path); 1738 *start = max_hole_start; 1739 if (len) 1740 *len = max_hole_size; 1741 return ret; 1742 } 1743 1744 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1745 u64 *start, u64 *len) 1746 { 1747 /* FIXME use last free of some kind */ 1748 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1749 } 1750 1751 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1752 struct btrfs_device *device, 1753 u64 start, u64 *dev_extent_len) 1754 { 1755 struct btrfs_fs_info *fs_info = device->fs_info; 1756 struct btrfs_root *root = fs_info->dev_root; 1757 int ret; 1758 struct btrfs_path *path; 1759 struct btrfs_key key; 1760 struct btrfs_key found_key; 1761 struct extent_buffer *leaf = NULL; 1762 struct btrfs_dev_extent *extent = NULL; 1763 1764 path = btrfs_alloc_path(); 1765 if (!path) 1766 return -ENOMEM; 1767 1768 key.objectid = device->devid; 1769 key.offset = start; 1770 key.type = BTRFS_DEV_EXTENT_KEY; 1771 again: 1772 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1773 if (ret > 0) { 1774 ret = btrfs_previous_item(root, path, key.objectid, 1775 BTRFS_DEV_EXTENT_KEY); 1776 if (ret) 1777 goto out; 1778 leaf = path->nodes[0]; 1779 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1780 extent = btrfs_item_ptr(leaf, path->slots[0], 1781 struct btrfs_dev_extent); 1782 BUG_ON(found_key.offset > start || found_key.offset + 1783 btrfs_dev_extent_length(leaf, extent) < start); 1784 key = found_key; 1785 btrfs_release_path(path); 1786 goto again; 1787 } else if (ret == 0) { 1788 leaf = path->nodes[0]; 1789 extent = btrfs_item_ptr(leaf, path->slots[0], 1790 struct btrfs_dev_extent); 1791 } else { 1792 goto out; 1793 } 1794 1795 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1796 1797 ret = btrfs_del_item(trans, root, path); 1798 if (ret == 0) 1799 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1800 out: 1801 btrfs_free_path(path); 1802 return ret; 1803 } 1804 1805 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1806 { 1807 struct extent_map_tree *em_tree; 1808 struct extent_map *em; 1809 struct rb_node *n; 1810 u64 ret = 0; 1811 1812 em_tree = &fs_info->mapping_tree; 1813 read_lock(&em_tree->lock); 1814 n = rb_last(&em_tree->map.rb_root); 1815 if (n) { 1816 em = rb_entry(n, struct extent_map, rb_node); 1817 ret = em->start + em->len; 1818 } 1819 read_unlock(&em_tree->lock); 1820 1821 return ret; 1822 } 1823 1824 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1825 u64 *devid_ret) 1826 { 1827 int ret; 1828 struct btrfs_key key; 1829 struct btrfs_key found_key; 1830 struct btrfs_path *path; 1831 1832 path = btrfs_alloc_path(); 1833 if (!path) 1834 return -ENOMEM; 1835 1836 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1837 key.type = BTRFS_DEV_ITEM_KEY; 1838 key.offset = (u64)-1; 1839 1840 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1841 if (ret < 0) 1842 goto error; 1843 1844 if (ret == 0) { 1845 /* Corruption */ 1846 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1847 ret = -EUCLEAN; 1848 goto error; 1849 } 1850 1851 ret = btrfs_previous_item(fs_info->chunk_root, path, 1852 BTRFS_DEV_ITEMS_OBJECTID, 1853 BTRFS_DEV_ITEM_KEY); 1854 if (ret) { 1855 *devid_ret = 1; 1856 } else { 1857 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1858 path->slots[0]); 1859 *devid_ret = found_key.offset + 1; 1860 } 1861 ret = 0; 1862 error: 1863 btrfs_free_path(path); 1864 return ret; 1865 } 1866 1867 /* 1868 * the device information is stored in the chunk root 1869 * the btrfs_device struct should be fully filled in 1870 */ 1871 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1872 struct btrfs_device *device) 1873 { 1874 int ret; 1875 struct btrfs_path *path; 1876 struct btrfs_dev_item *dev_item; 1877 struct extent_buffer *leaf; 1878 struct btrfs_key key; 1879 unsigned long ptr; 1880 1881 path = btrfs_alloc_path(); 1882 if (!path) 1883 return -ENOMEM; 1884 1885 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1886 key.type = BTRFS_DEV_ITEM_KEY; 1887 key.offset = device->devid; 1888 1889 btrfs_reserve_chunk_metadata(trans, true); 1890 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1891 &key, sizeof(*dev_item)); 1892 btrfs_trans_release_chunk_metadata(trans); 1893 if (ret) 1894 goto out; 1895 1896 leaf = path->nodes[0]; 1897 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1898 1899 btrfs_set_device_id(leaf, dev_item, device->devid); 1900 btrfs_set_device_generation(leaf, dev_item, 0); 1901 btrfs_set_device_type(leaf, dev_item, device->type); 1902 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1903 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1904 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1905 btrfs_set_device_total_bytes(leaf, dev_item, 1906 btrfs_device_get_disk_total_bytes(device)); 1907 btrfs_set_device_bytes_used(leaf, dev_item, 1908 btrfs_device_get_bytes_used(device)); 1909 btrfs_set_device_group(leaf, dev_item, 0); 1910 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1911 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1912 btrfs_set_device_start_offset(leaf, dev_item, 0); 1913 1914 ptr = btrfs_device_uuid(dev_item); 1915 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1916 ptr = btrfs_device_fsid(dev_item); 1917 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1918 ptr, BTRFS_FSID_SIZE); 1919 btrfs_mark_buffer_dirty(leaf); 1920 1921 ret = 0; 1922 out: 1923 btrfs_free_path(path); 1924 return ret; 1925 } 1926 1927 /* 1928 * Function to update ctime/mtime for a given device path. 1929 * Mainly used for ctime/mtime based probe like libblkid. 1930 * 1931 * We don't care about errors here, this is just to be kind to userspace. 1932 */ 1933 static void update_dev_time(const char *device_path) 1934 { 1935 struct path path; 1936 struct timespec64 now; 1937 int ret; 1938 1939 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1940 if (ret) 1941 return; 1942 1943 now = current_time(d_inode(path.dentry)); 1944 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1945 path_put(&path); 1946 } 1947 1948 static int btrfs_rm_dev_item(struct btrfs_device *device) 1949 { 1950 struct btrfs_root *root = device->fs_info->chunk_root; 1951 int ret; 1952 struct btrfs_path *path; 1953 struct btrfs_key key; 1954 struct btrfs_trans_handle *trans; 1955 1956 path = btrfs_alloc_path(); 1957 if (!path) 1958 return -ENOMEM; 1959 1960 trans = btrfs_start_transaction(root, 0); 1961 if (IS_ERR(trans)) { 1962 btrfs_free_path(path); 1963 return PTR_ERR(trans); 1964 } 1965 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1966 key.type = BTRFS_DEV_ITEM_KEY; 1967 key.offset = device->devid; 1968 1969 btrfs_reserve_chunk_metadata(trans, false); 1970 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1971 btrfs_trans_release_chunk_metadata(trans); 1972 if (ret) { 1973 if (ret > 0) 1974 ret = -ENOENT; 1975 btrfs_abort_transaction(trans, ret); 1976 btrfs_end_transaction(trans); 1977 goto out; 1978 } 1979 1980 ret = btrfs_del_item(trans, root, path); 1981 if (ret) { 1982 btrfs_abort_transaction(trans, ret); 1983 btrfs_end_transaction(trans); 1984 } 1985 1986 out: 1987 btrfs_free_path(path); 1988 if (!ret) 1989 ret = btrfs_commit_transaction(trans); 1990 return ret; 1991 } 1992 1993 /* 1994 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1995 * filesystem. It's up to the caller to adjust that number regarding eg. device 1996 * replace. 1997 */ 1998 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1999 u64 num_devices) 2000 { 2001 u64 all_avail; 2002 unsigned seq; 2003 int i; 2004 2005 do { 2006 seq = read_seqbegin(&fs_info->profiles_lock); 2007 2008 all_avail = fs_info->avail_data_alloc_bits | 2009 fs_info->avail_system_alloc_bits | 2010 fs_info->avail_metadata_alloc_bits; 2011 } while (read_seqretry(&fs_info->profiles_lock, seq)); 2012 2013 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2014 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 2015 continue; 2016 2017 if (num_devices < btrfs_raid_array[i].devs_min) 2018 return btrfs_raid_array[i].mindev_error; 2019 } 2020 2021 return 0; 2022 } 2023 2024 static struct btrfs_device * btrfs_find_next_active_device( 2025 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 2026 { 2027 struct btrfs_device *next_device; 2028 2029 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2030 if (next_device != device && 2031 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2032 && next_device->bdev) 2033 return next_device; 2034 } 2035 2036 return NULL; 2037 } 2038 2039 /* 2040 * Helper function to check if the given device is part of s_bdev / latest_dev 2041 * and replace it with the provided or the next active device, in the context 2042 * where this function called, there should be always be another device (or 2043 * this_dev) which is active. 2044 */ 2045 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2046 struct btrfs_device *next_device) 2047 { 2048 struct btrfs_fs_info *fs_info = device->fs_info; 2049 2050 if (!next_device) 2051 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2052 device); 2053 ASSERT(next_device); 2054 2055 if (fs_info->sb->s_bdev && 2056 (fs_info->sb->s_bdev == device->bdev)) 2057 fs_info->sb->s_bdev = next_device->bdev; 2058 2059 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2060 fs_info->fs_devices->latest_dev = next_device; 2061 } 2062 2063 /* 2064 * Return btrfs_fs_devices::num_devices excluding the device that's being 2065 * currently replaced. 2066 */ 2067 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2068 { 2069 u64 num_devices = fs_info->fs_devices->num_devices; 2070 2071 down_read(&fs_info->dev_replace.rwsem); 2072 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2073 ASSERT(num_devices > 1); 2074 num_devices--; 2075 } 2076 up_read(&fs_info->dev_replace.rwsem); 2077 2078 return num_devices; 2079 } 2080 2081 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2082 struct block_device *bdev, 2083 const char *device_path) 2084 { 2085 struct btrfs_super_block *disk_super; 2086 int copy_num; 2087 2088 if (!bdev) 2089 return; 2090 2091 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2092 struct page *page; 2093 int ret; 2094 2095 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2096 if (IS_ERR(disk_super)) 2097 continue; 2098 2099 if (bdev_is_zoned(bdev)) { 2100 btrfs_reset_sb_log_zones(bdev, copy_num); 2101 continue; 2102 } 2103 2104 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2105 2106 page = virt_to_page(disk_super); 2107 set_page_dirty(page); 2108 lock_page(page); 2109 /* write_on_page() unlocks the page */ 2110 ret = write_one_page(page); 2111 if (ret) 2112 btrfs_warn(fs_info, 2113 "error clearing superblock number %d (%d)", 2114 copy_num, ret); 2115 btrfs_release_disk_super(disk_super); 2116 2117 } 2118 2119 /* Notify udev that device has changed */ 2120 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2121 2122 /* Update ctime/mtime for device path for libblkid */ 2123 update_dev_time(device_path); 2124 } 2125 2126 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2127 struct btrfs_dev_lookup_args *args, 2128 struct block_device **bdev, fmode_t *mode) 2129 { 2130 struct btrfs_device *device; 2131 struct btrfs_fs_devices *cur_devices; 2132 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2133 u64 num_devices; 2134 int ret = 0; 2135 2136 /* 2137 * The device list in fs_devices is accessed without locks (neither 2138 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2139 * filesystem and another device rm cannot run. 2140 */ 2141 num_devices = btrfs_num_devices(fs_info); 2142 2143 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2144 if (ret) 2145 goto out; 2146 2147 device = btrfs_find_device(fs_info->fs_devices, args); 2148 if (!device) { 2149 if (args->missing) 2150 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2151 else 2152 ret = -ENOENT; 2153 goto out; 2154 } 2155 2156 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2157 btrfs_warn_in_rcu(fs_info, 2158 "cannot remove device %s (devid %llu) due to active swapfile", 2159 rcu_str_deref(device->name), device->devid); 2160 ret = -ETXTBSY; 2161 goto out; 2162 } 2163 2164 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2165 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2166 goto out; 2167 } 2168 2169 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2170 fs_info->fs_devices->rw_devices == 1) { 2171 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2172 goto out; 2173 } 2174 2175 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2176 mutex_lock(&fs_info->chunk_mutex); 2177 list_del_init(&device->dev_alloc_list); 2178 device->fs_devices->rw_devices--; 2179 mutex_unlock(&fs_info->chunk_mutex); 2180 } 2181 2182 ret = btrfs_shrink_device(device, 0); 2183 if (ret) 2184 goto error_undo; 2185 2186 /* 2187 * TODO: the superblock still includes this device in its num_devices 2188 * counter although write_all_supers() is not locked out. This 2189 * could give a filesystem state which requires a degraded mount. 2190 */ 2191 ret = btrfs_rm_dev_item(device); 2192 if (ret) 2193 goto error_undo; 2194 2195 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2196 btrfs_scrub_cancel_dev(device); 2197 2198 /* 2199 * the device list mutex makes sure that we don't change 2200 * the device list while someone else is writing out all 2201 * the device supers. Whoever is writing all supers, should 2202 * lock the device list mutex before getting the number of 2203 * devices in the super block (super_copy). Conversely, 2204 * whoever updates the number of devices in the super block 2205 * (super_copy) should hold the device list mutex. 2206 */ 2207 2208 /* 2209 * In normal cases the cur_devices == fs_devices. But in case 2210 * of deleting a seed device, the cur_devices should point to 2211 * its own fs_devices listed under the fs_devices->seed_list. 2212 */ 2213 cur_devices = device->fs_devices; 2214 mutex_lock(&fs_devices->device_list_mutex); 2215 list_del_rcu(&device->dev_list); 2216 2217 cur_devices->num_devices--; 2218 cur_devices->total_devices--; 2219 /* Update total_devices of the parent fs_devices if it's seed */ 2220 if (cur_devices != fs_devices) 2221 fs_devices->total_devices--; 2222 2223 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2224 cur_devices->missing_devices--; 2225 2226 btrfs_assign_next_active_device(device, NULL); 2227 2228 if (device->bdev) { 2229 cur_devices->open_devices--; 2230 /* remove sysfs entry */ 2231 btrfs_sysfs_remove_device(device); 2232 } 2233 2234 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2235 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2236 mutex_unlock(&fs_devices->device_list_mutex); 2237 2238 /* 2239 * At this point, the device is zero sized and detached from the 2240 * devices list. All that's left is to zero out the old supers and 2241 * free the device. 2242 * 2243 * We cannot call btrfs_close_bdev() here because we're holding the sb 2244 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2245 * block device and it's dependencies. Instead just flush the device 2246 * and let the caller do the final blkdev_put. 2247 */ 2248 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2249 btrfs_scratch_superblocks(fs_info, device->bdev, 2250 device->name->str); 2251 if (device->bdev) { 2252 sync_blockdev(device->bdev); 2253 invalidate_bdev(device->bdev); 2254 } 2255 } 2256 2257 *bdev = device->bdev; 2258 *mode = device->mode; 2259 synchronize_rcu(); 2260 btrfs_free_device(device); 2261 2262 /* 2263 * This can happen if cur_devices is the private seed devices list. We 2264 * cannot call close_fs_devices() here because it expects the uuid_mutex 2265 * to be held, but in fact we don't need that for the private 2266 * seed_devices, we can simply decrement cur_devices->opened and then 2267 * remove it from our list and free the fs_devices. 2268 */ 2269 if (cur_devices->num_devices == 0) { 2270 list_del_init(&cur_devices->seed_list); 2271 ASSERT(cur_devices->opened == 1); 2272 cur_devices->opened--; 2273 free_fs_devices(cur_devices); 2274 } 2275 2276 out: 2277 return ret; 2278 2279 error_undo: 2280 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2281 mutex_lock(&fs_info->chunk_mutex); 2282 list_add(&device->dev_alloc_list, 2283 &fs_devices->alloc_list); 2284 device->fs_devices->rw_devices++; 2285 mutex_unlock(&fs_info->chunk_mutex); 2286 } 2287 goto out; 2288 } 2289 2290 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2291 { 2292 struct btrfs_fs_devices *fs_devices; 2293 2294 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2295 2296 /* 2297 * in case of fs with no seed, srcdev->fs_devices will point 2298 * to fs_devices of fs_info. However when the dev being replaced is 2299 * a seed dev it will point to the seed's local fs_devices. In short 2300 * srcdev will have its correct fs_devices in both the cases. 2301 */ 2302 fs_devices = srcdev->fs_devices; 2303 2304 list_del_rcu(&srcdev->dev_list); 2305 list_del(&srcdev->dev_alloc_list); 2306 fs_devices->num_devices--; 2307 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2308 fs_devices->missing_devices--; 2309 2310 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2311 fs_devices->rw_devices--; 2312 2313 if (srcdev->bdev) 2314 fs_devices->open_devices--; 2315 } 2316 2317 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2318 { 2319 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2320 2321 mutex_lock(&uuid_mutex); 2322 2323 btrfs_close_bdev(srcdev); 2324 synchronize_rcu(); 2325 btrfs_free_device(srcdev); 2326 2327 /* if this is no devs we rather delete the fs_devices */ 2328 if (!fs_devices->num_devices) { 2329 /* 2330 * On a mounted FS, num_devices can't be zero unless it's a 2331 * seed. In case of a seed device being replaced, the replace 2332 * target added to the sprout FS, so there will be no more 2333 * device left under the seed FS. 2334 */ 2335 ASSERT(fs_devices->seeding); 2336 2337 list_del_init(&fs_devices->seed_list); 2338 close_fs_devices(fs_devices); 2339 free_fs_devices(fs_devices); 2340 } 2341 mutex_unlock(&uuid_mutex); 2342 } 2343 2344 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2345 { 2346 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2347 2348 mutex_lock(&fs_devices->device_list_mutex); 2349 2350 btrfs_sysfs_remove_device(tgtdev); 2351 2352 if (tgtdev->bdev) 2353 fs_devices->open_devices--; 2354 2355 fs_devices->num_devices--; 2356 2357 btrfs_assign_next_active_device(tgtdev, NULL); 2358 2359 list_del_rcu(&tgtdev->dev_list); 2360 2361 mutex_unlock(&fs_devices->device_list_mutex); 2362 2363 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2364 tgtdev->name->str); 2365 2366 btrfs_close_bdev(tgtdev); 2367 synchronize_rcu(); 2368 btrfs_free_device(tgtdev); 2369 } 2370 2371 /** 2372 * Populate args from device at path 2373 * 2374 * @fs_info: the filesystem 2375 * @args: the args to populate 2376 * @path: the path to the device 2377 * 2378 * This will read the super block of the device at @path and populate @args with 2379 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2380 * lookup a device to operate on, but need to do it before we take any locks. 2381 * This properly handles the special case of "missing" that a user may pass in, 2382 * and does some basic sanity checks. The caller must make sure that @path is 2383 * properly NUL terminated before calling in, and must call 2384 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2385 * uuid buffers. 2386 * 2387 * Return: 0 for success, -errno for failure 2388 */ 2389 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2390 struct btrfs_dev_lookup_args *args, 2391 const char *path) 2392 { 2393 struct btrfs_super_block *disk_super; 2394 struct block_device *bdev; 2395 int ret; 2396 2397 if (!path || !path[0]) 2398 return -EINVAL; 2399 if (!strcmp(path, "missing")) { 2400 args->missing = true; 2401 return 0; 2402 } 2403 2404 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2405 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2406 if (!args->uuid || !args->fsid) { 2407 btrfs_put_dev_args_from_path(args); 2408 return -ENOMEM; 2409 } 2410 2411 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2412 &bdev, &disk_super); 2413 if (ret) 2414 return ret; 2415 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2416 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2417 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2418 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2419 else 2420 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2421 btrfs_release_disk_super(disk_super); 2422 blkdev_put(bdev, FMODE_READ); 2423 return 0; 2424 } 2425 2426 /* 2427 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2428 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2429 * that don't need to be freed. 2430 */ 2431 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2432 { 2433 kfree(args->uuid); 2434 kfree(args->fsid); 2435 args->uuid = NULL; 2436 args->fsid = NULL; 2437 } 2438 2439 struct btrfs_device *btrfs_find_device_by_devspec( 2440 struct btrfs_fs_info *fs_info, u64 devid, 2441 const char *device_path) 2442 { 2443 BTRFS_DEV_LOOKUP_ARGS(args); 2444 struct btrfs_device *device; 2445 int ret; 2446 2447 if (devid) { 2448 args.devid = devid; 2449 device = btrfs_find_device(fs_info->fs_devices, &args); 2450 if (!device) 2451 return ERR_PTR(-ENOENT); 2452 return device; 2453 } 2454 2455 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2456 if (ret) 2457 return ERR_PTR(ret); 2458 device = btrfs_find_device(fs_info->fs_devices, &args); 2459 btrfs_put_dev_args_from_path(&args); 2460 if (!device) 2461 return ERR_PTR(-ENOENT); 2462 return device; 2463 } 2464 2465 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2466 { 2467 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2468 struct btrfs_fs_devices *old_devices; 2469 struct btrfs_fs_devices *seed_devices; 2470 2471 lockdep_assert_held(&uuid_mutex); 2472 if (!fs_devices->seeding) 2473 return ERR_PTR(-EINVAL); 2474 2475 /* 2476 * Private copy of the seed devices, anchored at 2477 * fs_info->fs_devices->seed_list 2478 */ 2479 seed_devices = alloc_fs_devices(NULL, NULL); 2480 if (IS_ERR(seed_devices)) 2481 return seed_devices; 2482 2483 /* 2484 * It's necessary to retain a copy of the original seed fs_devices in 2485 * fs_uuids so that filesystems which have been seeded can successfully 2486 * reference the seed device from open_seed_devices. This also supports 2487 * multiple fs seed. 2488 */ 2489 old_devices = clone_fs_devices(fs_devices); 2490 if (IS_ERR(old_devices)) { 2491 kfree(seed_devices); 2492 return old_devices; 2493 } 2494 2495 list_add(&old_devices->fs_list, &fs_uuids); 2496 2497 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2498 seed_devices->opened = 1; 2499 INIT_LIST_HEAD(&seed_devices->devices); 2500 INIT_LIST_HEAD(&seed_devices->alloc_list); 2501 mutex_init(&seed_devices->device_list_mutex); 2502 2503 return seed_devices; 2504 } 2505 2506 /* 2507 * Splice seed devices into the sprout fs_devices. 2508 * Generate a new fsid for the sprouted read-write filesystem. 2509 */ 2510 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2511 struct btrfs_fs_devices *seed_devices) 2512 { 2513 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2514 struct btrfs_super_block *disk_super = fs_info->super_copy; 2515 struct btrfs_device *device; 2516 u64 super_flags; 2517 2518 /* 2519 * We are updating the fsid, the thread leading to device_list_add() 2520 * could race, so uuid_mutex is needed. 2521 */ 2522 lockdep_assert_held(&uuid_mutex); 2523 2524 /* 2525 * The threads listed below may traverse dev_list but can do that without 2526 * device_list_mutex: 2527 * - All device ops and balance - as we are in btrfs_exclop_start. 2528 * - Various dev_list readers - are using RCU. 2529 * - btrfs_ioctl_fitrim() - is using RCU. 2530 * 2531 * For-read threads as below are using device_list_mutex: 2532 * - Readonly scrub btrfs_scrub_dev() 2533 * - Readonly scrub btrfs_scrub_progress() 2534 * - btrfs_get_dev_stats() 2535 */ 2536 lockdep_assert_held(&fs_devices->device_list_mutex); 2537 2538 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2539 synchronize_rcu); 2540 list_for_each_entry(device, &seed_devices->devices, dev_list) 2541 device->fs_devices = seed_devices; 2542 2543 fs_devices->seeding = false; 2544 fs_devices->num_devices = 0; 2545 fs_devices->open_devices = 0; 2546 fs_devices->missing_devices = 0; 2547 fs_devices->rotating = false; 2548 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2549 2550 generate_random_uuid(fs_devices->fsid); 2551 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2552 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2553 2554 super_flags = btrfs_super_flags(disk_super) & 2555 ~BTRFS_SUPER_FLAG_SEEDING; 2556 btrfs_set_super_flags(disk_super, super_flags); 2557 } 2558 2559 /* 2560 * Store the expected generation for seed devices in device items. 2561 */ 2562 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2563 { 2564 BTRFS_DEV_LOOKUP_ARGS(args); 2565 struct btrfs_fs_info *fs_info = trans->fs_info; 2566 struct btrfs_root *root = fs_info->chunk_root; 2567 struct btrfs_path *path; 2568 struct extent_buffer *leaf; 2569 struct btrfs_dev_item *dev_item; 2570 struct btrfs_device *device; 2571 struct btrfs_key key; 2572 u8 fs_uuid[BTRFS_FSID_SIZE]; 2573 u8 dev_uuid[BTRFS_UUID_SIZE]; 2574 int ret; 2575 2576 path = btrfs_alloc_path(); 2577 if (!path) 2578 return -ENOMEM; 2579 2580 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2581 key.offset = 0; 2582 key.type = BTRFS_DEV_ITEM_KEY; 2583 2584 while (1) { 2585 btrfs_reserve_chunk_metadata(trans, false); 2586 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2587 btrfs_trans_release_chunk_metadata(trans); 2588 if (ret < 0) 2589 goto error; 2590 2591 leaf = path->nodes[0]; 2592 next_slot: 2593 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2594 ret = btrfs_next_leaf(root, path); 2595 if (ret > 0) 2596 break; 2597 if (ret < 0) 2598 goto error; 2599 leaf = path->nodes[0]; 2600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2601 btrfs_release_path(path); 2602 continue; 2603 } 2604 2605 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2606 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2607 key.type != BTRFS_DEV_ITEM_KEY) 2608 break; 2609 2610 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2611 struct btrfs_dev_item); 2612 args.devid = btrfs_device_id(leaf, dev_item); 2613 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2614 BTRFS_UUID_SIZE); 2615 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2616 BTRFS_FSID_SIZE); 2617 args.uuid = dev_uuid; 2618 args.fsid = fs_uuid; 2619 device = btrfs_find_device(fs_info->fs_devices, &args); 2620 BUG_ON(!device); /* Logic error */ 2621 2622 if (device->fs_devices->seeding) { 2623 btrfs_set_device_generation(leaf, dev_item, 2624 device->generation); 2625 btrfs_mark_buffer_dirty(leaf); 2626 } 2627 2628 path->slots[0]++; 2629 goto next_slot; 2630 } 2631 ret = 0; 2632 error: 2633 btrfs_free_path(path); 2634 return ret; 2635 } 2636 2637 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2638 { 2639 struct btrfs_root *root = fs_info->dev_root; 2640 struct request_queue *q; 2641 struct btrfs_trans_handle *trans; 2642 struct btrfs_device *device; 2643 struct block_device *bdev; 2644 struct super_block *sb = fs_info->sb; 2645 struct rcu_string *name; 2646 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2647 struct btrfs_fs_devices *seed_devices; 2648 u64 orig_super_total_bytes; 2649 u64 orig_super_num_devices; 2650 int ret = 0; 2651 bool seeding_dev = false; 2652 bool locked = false; 2653 2654 if (sb_rdonly(sb) && !fs_devices->seeding) 2655 return -EROFS; 2656 2657 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2658 fs_info->bdev_holder); 2659 if (IS_ERR(bdev)) 2660 return PTR_ERR(bdev); 2661 2662 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2663 ret = -EINVAL; 2664 goto error; 2665 } 2666 2667 if (fs_devices->seeding) { 2668 seeding_dev = true; 2669 down_write(&sb->s_umount); 2670 mutex_lock(&uuid_mutex); 2671 locked = true; 2672 } 2673 2674 sync_blockdev(bdev); 2675 2676 rcu_read_lock(); 2677 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2678 if (device->bdev == bdev) { 2679 ret = -EEXIST; 2680 rcu_read_unlock(); 2681 goto error; 2682 } 2683 } 2684 rcu_read_unlock(); 2685 2686 device = btrfs_alloc_device(fs_info, NULL, NULL); 2687 if (IS_ERR(device)) { 2688 /* we can safely leave the fs_devices entry around */ 2689 ret = PTR_ERR(device); 2690 goto error; 2691 } 2692 2693 name = rcu_string_strdup(device_path, GFP_KERNEL); 2694 if (!name) { 2695 ret = -ENOMEM; 2696 goto error_free_device; 2697 } 2698 rcu_assign_pointer(device->name, name); 2699 2700 device->fs_info = fs_info; 2701 device->bdev = bdev; 2702 2703 ret = btrfs_get_dev_zone_info(device, false); 2704 if (ret) 2705 goto error_free_device; 2706 2707 trans = btrfs_start_transaction(root, 0); 2708 if (IS_ERR(trans)) { 2709 ret = PTR_ERR(trans); 2710 goto error_free_zone; 2711 } 2712 2713 q = bdev_get_queue(bdev); 2714 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2715 device->generation = trans->transid; 2716 device->io_width = fs_info->sectorsize; 2717 device->io_align = fs_info->sectorsize; 2718 device->sector_size = fs_info->sectorsize; 2719 device->total_bytes = 2720 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2721 device->disk_total_bytes = device->total_bytes; 2722 device->commit_total_bytes = device->total_bytes; 2723 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2724 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2725 device->mode = FMODE_EXCL; 2726 device->dev_stats_valid = 1; 2727 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2728 2729 if (seeding_dev) { 2730 btrfs_clear_sb_rdonly(sb); 2731 2732 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2733 seed_devices = btrfs_init_sprout(fs_info); 2734 if (IS_ERR(seed_devices)) { 2735 ret = PTR_ERR(seed_devices); 2736 btrfs_abort_transaction(trans, ret); 2737 goto error_trans; 2738 } 2739 } 2740 2741 mutex_lock(&fs_devices->device_list_mutex); 2742 if (seeding_dev) { 2743 btrfs_setup_sprout(fs_info, seed_devices); 2744 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2745 device); 2746 } 2747 2748 device->fs_devices = fs_devices; 2749 2750 mutex_lock(&fs_info->chunk_mutex); 2751 list_add_rcu(&device->dev_list, &fs_devices->devices); 2752 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2753 fs_devices->num_devices++; 2754 fs_devices->open_devices++; 2755 fs_devices->rw_devices++; 2756 fs_devices->total_devices++; 2757 fs_devices->total_rw_bytes += device->total_bytes; 2758 2759 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2760 2761 if (!blk_queue_nonrot(q)) 2762 fs_devices->rotating = true; 2763 2764 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2765 btrfs_set_super_total_bytes(fs_info->super_copy, 2766 round_down(orig_super_total_bytes + device->total_bytes, 2767 fs_info->sectorsize)); 2768 2769 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2770 btrfs_set_super_num_devices(fs_info->super_copy, 2771 orig_super_num_devices + 1); 2772 2773 /* 2774 * we've got more storage, clear any full flags on the space 2775 * infos 2776 */ 2777 btrfs_clear_space_info_full(fs_info); 2778 2779 mutex_unlock(&fs_info->chunk_mutex); 2780 2781 /* Add sysfs device entry */ 2782 btrfs_sysfs_add_device(device); 2783 2784 mutex_unlock(&fs_devices->device_list_mutex); 2785 2786 if (seeding_dev) { 2787 mutex_lock(&fs_info->chunk_mutex); 2788 ret = init_first_rw_device(trans); 2789 mutex_unlock(&fs_info->chunk_mutex); 2790 if (ret) { 2791 btrfs_abort_transaction(trans, ret); 2792 goto error_sysfs; 2793 } 2794 } 2795 2796 ret = btrfs_add_dev_item(trans, device); 2797 if (ret) { 2798 btrfs_abort_transaction(trans, ret); 2799 goto error_sysfs; 2800 } 2801 2802 if (seeding_dev) { 2803 ret = btrfs_finish_sprout(trans); 2804 if (ret) { 2805 btrfs_abort_transaction(trans, ret); 2806 goto error_sysfs; 2807 } 2808 2809 /* 2810 * fs_devices now represents the newly sprouted filesystem and 2811 * its fsid has been changed by btrfs_sprout_splice(). 2812 */ 2813 btrfs_sysfs_update_sprout_fsid(fs_devices); 2814 } 2815 2816 ret = btrfs_commit_transaction(trans); 2817 2818 if (seeding_dev) { 2819 mutex_unlock(&uuid_mutex); 2820 up_write(&sb->s_umount); 2821 locked = false; 2822 2823 if (ret) /* transaction commit */ 2824 return ret; 2825 2826 ret = btrfs_relocate_sys_chunks(fs_info); 2827 if (ret < 0) 2828 btrfs_handle_fs_error(fs_info, ret, 2829 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2830 trans = btrfs_attach_transaction(root); 2831 if (IS_ERR(trans)) { 2832 if (PTR_ERR(trans) == -ENOENT) 2833 return 0; 2834 ret = PTR_ERR(trans); 2835 trans = NULL; 2836 goto error_sysfs; 2837 } 2838 ret = btrfs_commit_transaction(trans); 2839 } 2840 2841 /* 2842 * Now that we have written a new super block to this device, check all 2843 * other fs_devices list if device_path alienates any other scanned 2844 * device. 2845 * We can ignore the return value as it typically returns -EINVAL and 2846 * only succeeds if the device was an alien. 2847 */ 2848 btrfs_forget_devices(device_path); 2849 2850 /* Update ctime/mtime for blkid or udev */ 2851 update_dev_time(device_path); 2852 2853 return ret; 2854 2855 error_sysfs: 2856 btrfs_sysfs_remove_device(device); 2857 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2858 mutex_lock(&fs_info->chunk_mutex); 2859 list_del_rcu(&device->dev_list); 2860 list_del(&device->dev_alloc_list); 2861 fs_info->fs_devices->num_devices--; 2862 fs_info->fs_devices->open_devices--; 2863 fs_info->fs_devices->rw_devices--; 2864 fs_info->fs_devices->total_devices--; 2865 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2866 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2867 btrfs_set_super_total_bytes(fs_info->super_copy, 2868 orig_super_total_bytes); 2869 btrfs_set_super_num_devices(fs_info->super_copy, 2870 orig_super_num_devices); 2871 mutex_unlock(&fs_info->chunk_mutex); 2872 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2873 error_trans: 2874 if (seeding_dev) 2875 btrfs_set_sb_rdonly(sb); 2876 if (trans) 2877 btrfs_end_transaction(trans); 2878 error_free_zone: 2879 btrfs_destroy_dev_zone_info(device); 2880 error_free_device: 2881 btrfs_free_device(device); 2882 error: 2883 blkdev_put(bdev, FMODE_EXCL); 2884 if (locked) { 2885 mutex_unlock(&uuid_mutex); 2886 up_write(&sb->s_umount); 2887 } 2888 return ret; 2889 } 2890 2891 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2892 struct btrfs_device *device) 2893 { 2894 int ret; 2895 struct btrfs_path *path; 2896 struct btrfs_root *root = device->fs_info->chunk_root; 2897 struct btrfs_dev_item *dev_item; 2898 struct extent_buffer *leaf; 2899 struct btrfs_key key; 2900 2901 path = btrfs_alloc_path(); 2902 if (!path) 2903 return -ENOMEM; 2904 2905 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2906 key.type = BTRFS_DEV_ITEM_KEY; 2907 key.offset = device->devid; 2908 2909 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2910 if (ret < 0) 2911 goto out; 2912 2913 if (ret > 0) { 2914 ret = -ENOENT; 2915 goto out; 2916 } 2917 2918 leaf = path->nodes[0]; 2919 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2920 2921 btrfs_set_device_id(leaf, dev_item, device->devid); 2922 btrfs_set_device_type(leaf, dev_item, device->type); 2923 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2924 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2925 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2926 btrfs_set_device_total_bytes(leaf, dev_item, 2927 btrfs_device_get_disk_total_bytes(device)); 2928 btrfs_set_device_bytes_used(leaf, dev_item, 2929 btrfs_device_get_bytes_used(device)); 2930 btrfs_mark_buffer_dirty(leaf); 2931 2932 out: 2933 btrfs_free_path(path); 2934 return ret; 2935 } 2936 2937 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2938 struct btrfs_device *device, u64 new_size) 2939 { 2940 struct btrfs_fs_info *fs_info = device->fs_info; 2941 struct btrfs_super_block *super_copy = fs_info->super_copy; 2942 u64 old_total; 2943 u64 diff; 2944 int ret; 2945 2946 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2947 return -EACCES; 2948 2949 new_size = round_down(new_size, fs_info->sectorsize); 2950 2951 mutex_lock(&fs_info->chunk_mutex); 2952 old_total = btrfs_super_total_bytes(super_copy); 2953 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2954 2955 if (new_size <= device->total_bytes || 2956 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2957 mutex_unlock(&fs_info->chunk_mutex); 2958 return -EINVAL; 2959 } 2960 2961 btrfs_set_super_total_bytes(super_copy, 2962 round_down(old_total + diff, fs_info->sectorsize)); 2963 device->fs_devices->total_rw_bytes += diff; 2964 2965 btrfs_device_set_total_bytes(device, new_size); 2966 btrfs_device_set_disk_total_bytes(device, new_size); 2967 btrfs_clear_space_info_full(device->fs_info); 2968 if (list_empty(&device->post_commit_list)) 2969 list_add_tail(&device->post_commit_list, 2970 &trans->transaction->dev_update_list); 2971 mutex_unlock(&fs_info->chunk_mutex); 2972 2973 btrfs_reserve_chunk_metadata(trans, false); 2974 ret = btrfs_update_device(trans, device); 2975 btrfs_trans_release_chunk_metadata(trans); 2976 2977 return ret; 2978 } 2979 2980 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2981 { 2982 struct btrfs_fs_info *fs_info = trans->fs_info; 2983 struct btrfs_root *root = fs_info->chunk_root; 2984 int ret; 2985 struct btrfs_path *path; 2986 struct btrfs_key key; 2987 2988 path = btrfs_alloc_path(); 2989 if (!path) 2990 return -ENOMEM; 2991 2992 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2993 key.offset = chunk_offset; 2994 key.type = BTRFS_CHUNK_ITEM_KEY; 2995 2996 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2997 if (ret < 0) 2998 goto out; 2999 else if (ret > 0) { /* Logic error or corruption */ 3000 btrfs_handle_fs_error(fs_info, -ENOENT, 3001 "Failed lookup while freeing chunk."); 3002 ret = -ENOENT; 3003 goto out; 3004 } 3005 3006 ret = btrfs_del_item(trans, root, path); 3007 if (ret < 0) 3008 btrfs_handle_fs_error(fs_info, ret, 3009 "Failed to delete chunk item."); 3010 out: 3011 btrfs_free_path(path); 3012 return ret; 3013 } 3014 3015 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3016 { 3017 struct btrfs_super_block *super_copy = fs_info->super_copy; 3018 struct btrfs_disk_key *disk_key; 3019 struct btrfs_chunk *chunk; 3020 u8 *ptr; 3021 int ret = 0; 3022 u32 num_stripes; 3023 u32 array_size; 3024 u32 len = 0; 3025 u32 cur; 3026 struct btrfs_key key; 3027 3028 lockdep_assert_held(&fs_info->chunk_mutex); 3029 array_size = btrfs_super_sys_array_size(super_copy); 3030 3031 ptr = super_copy->sys_chunk_array; 3032 cur = 0; 3033 3034 while (cur < array_size) { 3035 disk_key = (struct btrfs_disk_key *)ptr; 3036 btrfs_disk_key_to_cpu(&key, disk_key); 3037 3038 len = sizeof(*disk_key); 3039 3040 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3041 chunk = (struct btrfs_chunk *)(ptr + len); 3042 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3043 len += btrfs_chunk_item_size(num_stripes); 3044 } else { 3045 ret = -EIO; 3046 break; 3047 } 3048 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3049 key.offset == chunk_offset) { 3050 memmove(ptr, ptr + len, array_size - (cur + len)); 3051 array_size -= len; 3052 btrfs_set_super_sys_array_size(super_copy, array_size); 3053 } else { 3054 ptr += len; 3055 cur += len; 3056 } 3057 } 3058 return ret; 3059 } 3060 3061 /* 3062 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3063 * @logical: Logical block offset in bytes. 3064 * @length: Length of extent in bytes. 3065 * 3066 * Return: Chunk mapping or ERR_PTR. 3067 */ 3068 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3069 u64 logical, u64 length) 3070 { 3071 struct extent_map_tree *em_tree; 3072 struct extent_map *em; 3073 3074 em_tree = &fs_info->mapping_tree; 3075 read_lock(&em_tree->lock); 3076 em = lookup_extent_mapping(em_tree, logical, length); 3077 read_unlock(&em_tree->lock); 3078 3079 if (!em) { 3080 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3081 logical, length); 3082 return ERR_PTR(-EINVAL); 3083 } 3084 3085 if (em->start > logical || em->start + em->len < logical) { 3086 btrfs_crit(fs_info, 3087 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3088 logical, length, em->start, em->start + em->len); 3089 free_extent_map(em); 3090 return ERR_PTR(-EINVAL); 3091 } 3092 3093 /* callers are responsible for dropping em's ref. */ 3094 return em; 3095 } 3096 3097 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3098 struct map_lookup *map, u64 chunk_offset) 3099 { 3100 int i; 3101 3102 /* 3103 * Removing chunk items and updating the device items in the chunks btree 3104 * requires holding the chunk_mutex. 3105 * See the comment at btrfs_chunk_alloc() for the details. 3106 */ 3107 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3108 3109 for (i = 0; i < map->num_stripes; i++) { 3110 int ret; 3111 3112 ret = btrfs_update_device(trans, map->stripes[i].dev); 3113 if (ret) 3114 return ret; 3115 } 3116 3117 return btrfs_free_chunk(trans, chunk_offset); 3118 } 3119 3120 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3121 { 3122 struct btrfs_fs_info *fs_info = trans->fs_info; 3123 struct extent_map *em; 3124 struct map_lookup *map; 3125 u64 dev_extent_len = 0; 3126 int i, ret = 0; 3127 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3128 3129 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3130 if (IS_ERR(em)) { 3131 /* 3132 * This is a logic error, but we don't want to just rely on the 3133 * user having built with ASSERT enabled, so if ASSERT doesn't 3134 * do anything we still error out. 3135 */ 3136 ASSERT(0); 3137 return PTR_ERR(em); 3138 } 3139 map = em->map_lookup; 3140 3141 /* 3142 * First delete the device extent items from the devices btree. 3143 * We take the device_list_mutex to avoid racing with the finishing phase 3144 * of a device replace operation. See the comment below before acquiring 3145 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3146 * because that can result in a deadlock when deleting the device extent 3147 * items from the devices btree - COWing an extent buffer from the btree 3148 * may result in allocating a new metadata chunk, which would attempt to 3149 * lock again fs_info->chunk_mutex. 3150 */ 3151 mutex_lock(&fs_devices->device_list_mutex); 3152 for (i = 0; i < map->num_stripes; i++) { 3153 struct btrfs_device *device = map->stripes[i].dev; 3154 ret = btrfs_free_dev_extent(trans, device, 3155 map->stripes[i].physical, 3156 &dev_extent_len); 3157 if (ret) { 3158 mutex_unlock(&fs_devices->device_list_mutex); 3159 btrfs_abort_transaction(trans, ret); 3160 goto out; 3161 } 3162 3163 if (device->bytes_used > 0) { 3164 mutex_lock(&fs_info->chunk_mutex); 3165 btrfs_device_set_bytes_used(device, 3166 device->bytes_used - dev_extent_len); 3167 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3168 btrfs_clear_space_info_full(fs_info); 3169 mutex_unlock(&fs_info->chunk_mutex); 3170 } 3171 } 3172 mutex_unlock(&fs_devices->device_list_mutex); 3173 3174 /* 3175 * We acquire fs_info->chunk_mutex for 2 reasons: 3176 * 3177 * 1) Just like with the first phase of the chunk allocation, we must 3178 * reserve system space, do all chunk btree updates and deletions, and 3179 * update the system chunk array in the superblock while holding this 3180 * mutex. This is for similar reasons as explained on the comment at 3181 * the top of btrfs_chunk_alloc(); 3182 * 3183 * 2) Prevent races with the final phase of a device replace operation 3184 * that replaces the device object associated with the map's stripes, 3185 * because the device object's id can change at any time during that 3186 * final phase of the device replace operation 3187 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3188 * replaced device and then see it with an ID of 3189 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3190 * the device item, which does not exists on the chunk btree. 3191 * The finishing phase of device replace acquires both the 3192 * device_list_mutex and the chunk_mutex, in that order, so we are 3193 * safe by just acquiring the chunk_mutex. 3194 */ 3195 trans->removing_chunk = true; 3196 mutex_lock(&fs_info->chunk_mutex); 3197 3198 check_system_chunk(trans, map->type); 3199 3200 ret = remove_chunk_item(trans, map, chunk_offset); 3201 /* 3202 * Normally we should not get -ENOSPC since we reserved space before 3203 * through the call to check_system_chunk(). 3204 * 3205 * Despite our system space_info having enough free space, we may not 3206 * be able to allocate extents from its block groups, because all have 3207 * an incompatible profile, which will force us to allocate a new system 3208 * block group with the right profile, or right after we called 3209 * check_system_space() above, a scrub turned the only system block group 3210 * with enough free space into RO mode. 3211 * This is explained with more detail at do_chunk_alloc(). 3212 * 3213 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3214 */ 3215 if (ret == -ENOSPC) { 3216 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3217 struct btrfs_block_group *sys_bg; 3218 3219 sys_bg = btrfs_create_chunk(trans, sys_flags); 3220 if (IS_ERR(sys_bg)) { 3221 ret = PTR_ERR(sys_bg); 3222 btrfs_abort_transaction(trans, ret); 3223 goto out; 3224 } 3225 3226 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3227 if (ret) { 3228 btrfs_abort_transaction(trans, ret); 3229 goto out; 3230 } 3231 3232 ret = remove_chunk_item(trans, map, chunk_offset); 3233 if (ret) { 3234 btrfs_abort_transaction(trans, ret); 3235 goto out; 3236 } 3237 } else if (ret) { 3238 btrfs_abort_transaction(trans, ret); 3239 goto out; 3240 } 3241 3242 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3243 3244 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3245 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3246 if (ret) { 3247 btrfs_abort_transaction(trans, ret); 3248 goto out; 3249 } 3250 } 3251 3252 mutex_unlock(&fs_info->chunk_mutex); 3253 trans->removing_chunk = false; 3254 3255 /* 3256 * We are done with chunk btree updates and deletions, so release the 3257 * system space we previously reserved (with check_system_chunk()). 3258 */ 3259 btrfs_trans_release_chunk_metadata(trans); 3260 3261 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3262 if (ret) { 3263 btrfs_abort_transaction(trans, ret); 3264 goto out; 3265 } 3266 3267 out: 3268 if (trans->removing_chunk) { 3269 mutex_unlock(&fs_info->chunk_mutex); 3270 trans->removing_chunk = false; 3271 } 3272 /* once for us */ 3273 free_extent_map(em); 3274 return ret; 3275 } 3276 3277 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3278 { 3279 struct btrfs_root *root = fs_info->chunk_root; 3280 struct btrfs_trans_handle *trans; 3281 struct btrfs_block_group *block_group; 3282 u64 length; 3283 int ret; 3284 3285 /* 3286 * Prevent races with automatic removal of unused block groups. 3287 * After we relocate and before we remove the chunk with offset 3288 * chunk_offset, automatic removal of the block group can kick in, 3289 * resulting in a failure when calling btrfs_remove_chunk() below. 3290 * 3291 * Make sure to acquire this mutex before doing a tree search (dev 3292 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3293 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3294 * we release the path used to search the chunk/dev tree and before 3295 * the current task acquires this mutex and calls us. 3296 */ 3297 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3298 3299 /* step one, relocate all the extents inside this chunk */ 3300 btrfs_scrub_pause(fs_info); 3301 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3302 btrfs_scrub_continue(fs_info); 3303 if (ret) 3304 return ret; 3305 3306 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3307 if (!block_group) 3308 return -ENOENT; 3309 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3310 length = block_group->length; 3311 btrfs_put_block_group(block_group); 3312 3313 /* 3314 * On a zoned file system, discard the whole block group, this will 3315 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3316 * resetting the zone fails, don't treat it as a fatal problem from the 3317 * filesystem's point of view. 3318 */ 3319 if (btrfs_is_zoned(fs_info)) { 3320 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3321 if (ret) 3322 btrfs_info(fs_info, 3323 "failed to reset zone %llu after relocation", 3324 chunk_offset); 3325 } 3326 3327 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3328 chunk_offset); 3329 if (IS_ERR(trans)) { 3330 ret = PTR_ERR(trans); 3331 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3332 return ret; 3333 } 3334 3335 /* 3336 * step two, delete the device extents and the 3337 * chunk tree entries 3338 */ 3339 ret = btrfs_remove_chunk(trans, chunk_offset); 3340 btrfs_end_transaction(trans); 3341 return ret; 3342 } 3343 3344 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3345 { 3346 struct btrfs_root *chunk_root = fs_info->chunk_root; 3347 struct btrfs_path *path; 3348 struct extent_buffer *leaf; 3349 struct btrfs_chunk *chunk; 3350 struct btrfs_key key; 3351 struct btrfs_key found_key; 3352 u64 chunk_type; 3353 bool retried = false; 3354 int failed = 0; 3355 int ret; 3356 3357 path = btrfs_alloc_path(); 3358 if (!path) 3359 return -ENOMEM; 3360 3361 again: 3362 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3363 key.offset = (u64)-1; 3364 key.type = BTRFS_CHUNK_ITEM_KEY; 3365 3366 while (1) { 3367 mutex_lock(&fs_info->reclaim_bgs_lock); 3368 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3369 if (ret < 0) { 3370 mutex_unlock(&fs_info->reclaim_bgs_lock); 3371 goto error; 3372 } 3373 BUG_ON(ret == 0); /* Corruption */ 3374 3375 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3376 key.type); 3377 if (ret) 3378 mutex_unlock(&fs_info->reclaim_bgs_lock); 3379 if (ret < 0) 3380 goto error; 3381 if (ret > 0) 3382 break; 3383 3384 leaf = path->nodes[0]; 3385 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3386 3387 chunk = btrfs_item_ptr(leaf, path->slots[0], 3388 struct btrfs_chunk); 3389 chunk_type = btrfs_chunk_type(leaf, chunk); 3390 btrfs_release_path(path); 3391 3392 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3393 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3394 if (ret == -ENOSPC) 3395 failed++; 3396 else 3397 BUG_ON(ret); 3398 } 3399 mutex_unlock(&fs_info->reclaim_bgs_lock); 3400 3401 if (found_key.offset == 0) 3402 break; 3403 key.offset = found_key.offset - 1; 3404 } 3405 ret = 0; 3406 if (failed && !retried) { 3407 failed = 0; 3408 retried = true; 3409 goto again; 3410 } else if (WARN_ON(failed && retried)) { 3411 ret = -ENOSPC; 3412 } 3413 error: 3414 btrfs_free_path(path); 3415 return ret; 3416 } 3417 3418 /* 3419 * return 1 : allocate a data chunk successfully, 3420 * return <0: errors during allocating a data chunk, 3421 * return 0 : no need to allocate a data chunk. 3422 */ 3423 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3424 u64 chunk_offset) 3425 { 3426 struct btrfs_block_group *cache; 3427 u64 bytes_used; 3428 u64 chunk_type; 3429 3430 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3431 ASSERT(cache); 3432 chunk_type = cache->flags; 3433 btrfs_put_block_group(cache); 3434 3435 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3436 return 0; 3437 3438 spin_lock(&fs_info->data_sinfo->lock); 3439 bytes_used = fs_info->data_sinfo->bytes_used; 3440 spin_unlock(&fs_info->data_sinfo->lock); 3441 3442 if (!bytes_used) { 3443 struct btrfs_trans_handle *trans; 3444 int ret; 3445 3446 trans = btrfs_join_transaction(fs_info->tree_root); 3447 if (IS_ERR(trans)) 3448 return PTR_ERR(trans); 3449 3450 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3451 btrfs_end_transaction(trans); 3452 if (ret < 0) 3453 return ret; 3454 return 1; 3455 } 3456 3457 return 0; 3458 } 3459 3460 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3461 struct btrfs_balance_control *bctl) 3462 { 3463 struct btrfs_root *root = fs_info->tree_root; 3464 struct btrfs_trans_handle *trans; 3465 struct btrfs_balance_item *item; 3466 struct btrfs_disk_balance_args disk_bargs; 3467 struct btrfs_path *path; 3468 struct extent_buffer *leaf; 3469 struct btrfs_key key; 3470 int ret, err; 3471 3472 path = btrfs_alloc_path(); 3473 if (!path) 3474 return -ENOMEM; 3475 3476 trans = btrfs_start_transaction(root, 0); 3477 if (IS_ERR(trans)) { 3478 btrfs_free_path(path); 3479 return PTR_ERR(trans); 3480 } 3481 3482 key.objectid = BTRFS_BALANCE_OBJECTID; 3483 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3484 key.offset = 0; 3485 3486 ret = btrfs_insert_empty_item(trans, root, path, &key, 3487 sizeof(*item)); 3488 if (ret) 3489 goto out; 3490 3491 leaf = path->nodes[0]; 3492 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3493 3494 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3495 3496 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3497 btrfs_set_balance_data(leaf, item, &disk_bargs); 3498 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3499 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3500 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3501 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3502 3503 btrfs_set_balance_flags(leaf, item, bctl->flags); 3504 3505 btrfs_mark_buffer_dirty(leaf); 3506 out: 3507 btrfs_free_path(path); 3508 err = btrfs_commit_transaction(trans); 3509 if (err && !ret) 3510 ret = err; 3511 return ret; 3512 } 3513 3514 static int del_balance_item(struct btrfs_fs_info *fs_info) 3515 { 3516 struct btrfs_root *root = fs_info->tree_root; 3517 struct btrfs_trans_handle *trans; 3518 struct btrfs_path *path; 3519 struct btrfs_key key; 3520 int ret, err; 3521 3522 path = btrfs_alloc_path(); 3523 if (!path) 3524 return -ENOMEM; 3525 3526 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3527 if (IS_ERR(trans)) { 3528 btrfs_free_path(path); 3529 return PTR_ERR(trans); 3530 } 3531 3532 key.objectid = BTRFS_BALANCE_OBJECTID; 3533 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3534 key.offset = 0; 3535 3536 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3537 if (ret < 0) 3538 goto out; 3539 if (ret > 0) { 3540 ret = -ENOENT; 3541 goto out; 3542 } 3543 3544 ret = btrfs_del_item(trans, root, path); 3545 out: 3546 btrfs_free_path(path); 3547 err = btrfs_commit_transaction(trans); 3548 if (err && !ret) 3549 ret = err; 3550 return ret; 3551 } 3552 3553 /* 3554 * This is a heuristic used to reduce the number of chunks balanced on 3555 * resume after balance was interrupted. 3556 */ 3557 static void update_balance_args(struct btrfs_balance_control *bctl) 3558 { 3559 /* 3560 * Turn on soft mode for chunk types that were being converted. 3561 */ 3562 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3563 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3564 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3565 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3566 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3567 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3568 3569 /* 3570 * Turn on usage filter if is not already used. The idea is 3571 * that chunks that we have already balanced should be 3572 * reasonably full. Don't do it for chunks that are being 3573 * converted - that will keep us from relocating unconverted 3574 * (albeit full) chunks. 3575 */ 3576 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3577 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3578 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3579 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3580 bctl->data.usage = 90; 3581 } 3582 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3583 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3584 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3585 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3586 bctl->sys.usage = 90; 3587 } 3588 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3589 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3590 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3591 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3592 bctl->meta.usage = 90; 3593 } 3594 } 3595 3596 /* 3597 * Clear the balance status in fs_info and delete the balance item from disk. 3598 */ 3599 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3600 { 3601 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3602 int ret; 3603 3604 BUG_ON(!fs_info->balance_ctl); 3605 3606 spin_lock(&fs_info->balance_lock); 3607 fs_info->balance_ctl = NULL; 3608 spin_unlock(&fs_info->balance_lock); 3609 3610 kfree(bctl); 3611 ret = del_balance_item(fs_info); 3612 if (ret) 3613 btrfs_handle_fs_error(fs_info, ret, NULL); 3614 } 3615 3616 /* 3617 * Balance filters. Return 1 if chunk should be filtered out 3618 * (should not be balanced). 3619 */ 3620 static int chunk_profiles_filter(u64 chunk_type, 3621 struct btrfs_balance_args *bargs) 3622 { 3623 chunk_type = chunk_to_extended(chunk_type) & 3624 BTRFS_EXTENDED_PROFILE_MASK; 3625 3626 if (bargs->profiles & chunk_type) 3627 return 0; 3628 3629 return 1; 3630 } 3631 3632 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3633 struct btrfs_balance_args *bargs) 3634 { 3635 struct btrfs_block_group *cache; 3636 u64 chunk_used; 3637 u64 user_thresh_min; 3638 u64 user_thresh_max; 3639 int ret = 1; 3640 3641 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3642 chunk_used = cache->used; 3643 3644 if (bargs->usage_min == 0) 3645 user_thresh_min = 0; 3646 else 3647 user_thresh_min = div_factor_fine(cache->length, 3648 bargs->usage_min); 3649 3650 if (bargs->usage_max == 0) 3651 user_thresh_max = 1; 3652 else if (bargs->usage_max > 100) 3653 user_thresh_max = cache->length; 3654 else 3655 user_thresh_max = div_factor_fine(cache->length, 3656 bargs->usage_max); 3657 3658 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3659 ret = 0; 3660 3661 btrfs_put_block_group(cache); 3662 return ret; 3663 } 3664 3665 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3666 u64 chunk_offset, struct btrfs_balance_args *bargs) 3667 { 3668 struct btrfs_block_group *cache; 3669 u64 chunk_used, user_thresh; 3670 int ret = 1; 3671 3672 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3673 chunk_used = cache->used; 3674 3675 if (bargs->usage_min == 0) 3676 user_thresh = 1; 3677 else if (bargs->usage > 100) 3678 user_thresh = cache->length; 3679 else 3680 user_thresh = div_factor_fine(cache->length, bargs->usage); 3681 3682 if (chunk_used < user_thresh) 3683 ret = 0; 3684 3685 btrfs_put_block_group(cache); 3686 return ret; 3687 } 3688 3689 static int chunk_devid_filter(struct extent_buffer *leaf, 3690 struct btrfs_chunk *chunk, 3691 struct btrfs_balance_args *bargs) 3692 { 3693 struct btrfs_stripe *stripe; 3694 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3695 int i; 3696 3697 for (i = 0; i < num_stripes; i++) { 3698 stripe = btrfs_stripe_nr(chunk, i); 3699 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3700 return 0; 3701 } 3702 3703 return 1; 3704 } 3705 3706 static u64 calc_data_stripes(u64 type, int num_stripes) 3707 { 3708 const int index = btrfs_bg_flags_to_raid_index(type); 3709 const int ncopies = btrfs_raid_array[index].ncopies; 3710 const int nparity = btrfs_raid_array[index].nparity; 3711 3712 return (num_stripes - nparity) / ncopies; 3713 } 3714 3715 /* [pstart, pend) */ 3716 static int chunk_drange_filter(struct extent_buffer *leaf, 3717 struct btrfs_chunk *chunk, 3718 struct btrfs_balance_args *bargs) 3719 { 3720 struct btrfs_stripe *stripe; 3721 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3722 u64 stripe_offset; 3723 u64 stripe_length; 3724 u64 type; 3725 int factor; 3726 int i; 3727 3728 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3729 return 0; 3730 3731 type = btrfs_chunk_type(leaf, chunk); 3732 factor = calc_data_stripes(type, num_stripes); 3733 3734 for (i = 0; i < num_stripes; i++) { 3735 stripe = btrfs_stripe_nr(chunk, i); 3736 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3737 continue; 3738 3739 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3740 stripe_length = btrfs_chunk_length(leaf, chunk); 3741 stripe_length = div_u64(stripe_length, factor); 3742 3743 if (stripe_offset < bargs->pend && 3744 stripe_offset + stripe_length > bargs->pstart) 3745 return 0; 3746 } 3747 3748 return 1; 3749 } 3750 3751 /* [vstart, vend) */ 3752 static int chunk_vrange_filter(struct extent_buffer *leaf, 3753 struct btrfs_chunk *chunk, 3754 u64 chunk_offset, 3755 struct btrfs_balance_args *bargs) 3756 { 3757 if (chunk_offset < bargs->vend && 3758 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3759 /* at least part of the chunk is inside this vrange */ 3760 return 0; 3761 3762 return 1; 3763 } 3764 3765 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3766 struct btrfs_chunk *chunk, 3767 struct btrfs_balance_args *bargs) 3768 { 3769 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3770 3771 if (bargs->stripes_min <= num_stripes 3772 && num_stripes <= bargs->stripes_max) 3773 return 0; 3774 3775 return 1; 3776 } 3777 3778 static int chunk_soft_convert_filter(u64 chunk_type, 3779 struct btrfs_balance_args *bargs) 3780 { 3781 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3782 return 0; 3783 3784 chunk_type = chunk_to_extended(chunk_type) & 3785 BTRFS_EXTENDED_PROFILE_MASK; 3786 3787 if (bargs->target == chunk_type) 3788 return 1; 3789 3790 return 0; 3791 } 3792 3793 static int should_balance_chunk(struct extent_buffer *leaf, 3794 struct btrfs_chunk *chunk, u64 chunk_offset) 3795 { 3796 struct btrfs_fs_info *fs_info = leaf->fs_info; 3797 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3798 struct btrfs_balance_args *bargs = NULL; 3799 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3800 3801 /* type filter */ 3802 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3803 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3804 return 0; 3805 } 3806 3807 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3808 bargs = &bctl->data; 3809 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3810 bargs = &bctl->sys; 3811 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3812 bargs = &bctl->meta; 3813 3814 /* profiles filter */ 3815 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3816 chunk_profiles_filter(chunk_type, bargs)) { 3817 return 0; 3818 } 3819 3820 /* usage filter */ 3821 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3822 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3823 return 0; 3824 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3825 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3826 return 0; 3827 } 3828 3829 /* devid filter */ 3830 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3831 chunk_devid_filter(leaf, chunk, bargs)) { 3832 return 0; 3833 } 3834 3835 /* drange filter, makes sense only with devid filter */ 3836 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3837 chunk_drange_filter(leaf, chunk, bargs)) { 3838 return 0; 3839 } 3840 3841 /* vrange filter */ 3842 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3843 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3844 return 0; 3845 } 3846 3847 /* stripes filter */ 3848 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3849 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3850 return 0; 3851 } 3852 3853 /* soft profile changing mode */ 3854 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3855 chunk_soft_convert_filter(chunk_type, bargs)) { 3856 return 0; 3857 } 3858 3859 /* 3860 * limited by count, must be the last filter 3861 */ 3862 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3863 if (bargs->limit == 0) 3864 return 0; 3865 else 3866 bargs->limit--; 3867 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3868 /* 3869 * Same logic as the 'limit' filter; the minimum cannot be 3870 * determined here because we do not have the global information 3871 * about the count of all chunks that satisfy the filters. 3872 */ 3873 if (bargs->limit_max == 0) 3874 return 0; 3875 else 3876 bargs->limit_max--; 3877 } 3878 3879 return 1; 3880 } 3881 3882 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3883 { 3884 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3885 struct btrfs_root *chunk_root = fs_info->chunk_root; 3886 u64 chunk_type; 3887 struct btrfs_chunk *chunk; 3888 struct btrfs_path *path = NULL; 3889 struct btrfs_key key; 3890 struct btrfs_key found_key; 3891 struct extent_buffer *leaf; 3892 int slot; 3893 int ret; 3894 int enospc_errors = 0; 3895 bool counting = true; 3896 /* The single value limit and min/max limits use the same bytes in the */ 3897 u64 limit_data = bctl->data.limit; 3898 u64 limit_meta = bctl->meta.limit; 3899 u64 limit_sys = bctl->sys.limit; 3900 u32 count_data = 0; 3901 u32 count_meta = 0; 3902 u32 count_sys = 0; 3903 int chunk_reserved = 0; 3904 3905 path = btrfs_alloc_path(); 3906 if (!path) { 3907 ret = -ENOMEM; 3908 goto error; 3909 } 3910 3911 /* zero out stat counters */ 3912 spin_lock(&fs_info->balance_lock); 3913 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3914 spin_unlock(&fs_info->balance_lock); 3915 again: 3916 if (!counting) { 3917 /* 3918 * The single value limit and min/max limits use the same bytes 3919 * in the 3920 */ 3921 bctl->data.limit = limit_data; 3922 bctl->meta.limit = limit_meta; 3923 bctl->sys.limit = limit_sys; 3924 } 3925 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3926 key.offset = (u64)-1; 3927 key.type = BTRFS_CHUNK_ITEM_KEY; 3928 3929 while (1) { 3930 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3931 atomic_read(&fs_info->balance_cancel_req)) { 3932 ret = -ECANCELED; 3933 goto error; 3934 } 3935 3936 mutex_lock(&fs_info->reclaim_bgs_lock); 3937 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3938 if (ret < 0) { 3939 mutex_unlock(&fs_info->reclaim_bgs_lock); 3940 goto error; 3941 } 3942 3943 /* 3944 * this shouldn't happen, it means the last relocate 3945 * failed 3946 */ 3947 if (ret == 0) 3948 BUG(); /* FIXME break ? */ 3949 3950 ret = btrfs_previous_item(chunk_root, path, 0, 3951 BTRFS_CHUNK_ITEM_KEY); 3952 if (ret) { 3953 mutex_unlock(&fs_info->reclaim_bgs_lock); 3954 ret = 0; 3955 break; 3956 } 3957 3958 leaf = path->nodes[0]; 3959 slot = path->slots[0]; 3960 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3961 3962 if (found_key.objectid != key.objectid) { 3963 mutex_unlock(&fs_info->reclaim_bgs_lock); 3964 break; 3965 } 3966 3967 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3968 chunk_type = btrfs_chunk_type(leaf, chunk); 3969 3970 if (!counting) { 3971 spin_lock(&fs_info->balance_lock); 3972 bctl->stat.considered++; 3973 spin_unlock(&fs_info->balance_lock); 3974 } 3975 3976 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3977 3978 btrfs_release_path(path); 3979 if (!ret) { 3980 mutex_unlock(&fs_info->reclaim_bgs_lock); 3981 goto loop; 3982 } 3983 3984 if (counting) { 3985 mutex_unlock(&fs_info->reclaim_bgs_lock); 3986 spin_lock(&fs_info->balance_lock); 3987 bctl->stat.expected++; 3988 spin_unlock(&fs_info->balance_lock); 3989 3990 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3991 count_data++; 3992 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3993 count_sys++; 3994 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3995 count_meta++; 3996 3997 goto loop; 3998 } 3999 4000 /* 4001 * Apply limit_min filter, no need to check if the LIMITS 4002 * filter is used, limit_min is 0 by default 4003 */ 4004 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4005 count_data < bctl->data.limit_min) 4006 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4007 count_meta < bctl->meta.limit_min) 4008 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4009 count_sys < bctl->sys.limit_min)) { 4010 mutex_unlock(&fs_info->reclaim_bgs_lock); 4011 goto loop; 4012 } 4013 4014 if (!chunk_reserved) { 4015 /* 4016 * We may be relocating the only data chunk we have, 4017 * which could potentially end up with losing data's 4018 * raid profile, so lets allocate an empty one in 4019 * advance. 4020 */ 4021 ret = btrfs_may_alloc_data_chunk(fs_info, 4022 found_key.offset); 4023 if (ret < 0) { 4024 mutex_unlock(&fs_info->reclaim_bgs_lock); 4025 goto error; 4026 } else if (ret == 1) { 4027 chunk_reserved = 1; 4028 } 4029 } 4030 4031 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4032 mutex_unlock(&fs_info->reclaim_bgs_lock); 4033 if (ret == -ENOSPC) { 4034 enospc_errors++; 4035 } else if (ret == -ETXTBSY) { 4036 btrfs_info(fs_info, 4037 "skipping relocation of block group %llu due to active swapfile", 4038 found_key.offset); 4039 ret = 0; 4040 } else if (ret) { 4041 goto error; 4042 } else { 4043 spin_lock(&fs_info->balance_lock); 4044 bctl->stat.completed++; 4045 spin_unlock(&fs_info->balance_lock); 4046 } 4047 loop: 4048 if (found_key.offset == 0) 4049 break; 4050 key.offset = found_key.offset - 1; 4051 } 4052 4053 if (counting) { 4054 btrfs_release_path(path); 4055 counting = false; 4056 goto again; 4057 } 4058 error: 4059 btrfs_free_path(path); 4060 if (enospc_errors) { 4061 btrfs_info(fs_info, "%d enospc errors during balance", 4062 enospc_errors); 4063 if (!ret) 4064 ret = -ENOSPC; 4065 } 4066 4067 return ret; 4068 } 4069 4070 /** 4071 * alloc_profile_is_valid - see if a given profile is valid and reduced 4072 * @flags: profile to validate 4073 * @extended: if true @flags is treated as an extended profile 4074 */ 4075 static int alloc_profile_is_valid(u64 flags, int extended) 4076 { 4077 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4078 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4079 4080 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4081 4082 /* 1) check that all other bits are zeroed */ 4083 if (flags & ~mask) 4084 return 0; 4085 4086 /* 2) see if profile is reduced */ 4087 if (flags == 0) 4088 return !extended; /* "0" is valid for usual profiles */ 4089 4090 return has_single_bit_set(flags); 4091 } 4092 4093 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4094 { 4095 /* cancel requested || normal exit path */ 4096 return atomic_read(&fs_info->balance_cancel_req) || 4097 (atomic_read(&fs_info->balance_pause_req) == 0 && 4098 atomic_read(&fs_info->balance_cancel_req) == 0); 4099 } 4100 4101 /* 4102 * Validate target profile against allowed profiles and return true if it's OK. 4103 * Otherwise print the error message and return false. 4104 */ 4105 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4106 const struct btrfs_balance_args *bargs, 4107 u64 allowed, const char *type) 4108 { 4109 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4110 return true; 4111 4112 if (fs_info->sectorsize < PAGE_SIZE && 4113 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4114 btrfs_err(fs_info, 4115 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4116 fs_info->sectorsize, PAGE_SIZE); 4117 return false; 4118 } 4119 /* Profile is valid and does not have bits outside of the allowed set */ 4120 if (alloc_profile_is_valid(bargs->target, 1) && 4121 (bargs->target & ~allowed) == 0) 4122 return true; 4123 4124 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4125 type, btrfs_bg_type_to_raid_name(bargs->target)); 4126 return false; 4127 } 4128 4129 /* 4130 * Fill @buf with textual description of balance filter flags @bargs, up to 4131 * @size_buf including the terminating null. The output may be trimmed if it 4132 * does not fit into the provided buffer. 4133 */ 4134 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4135 u32 size_buf) 4136 { 4137 int ret; 4138 u32 size_bp = size_buf; 4139 char *bp = buf; 4140 u64 flags = bargs->flags; 4141 char tmp_buf[128] = {'\0'}; 4142 4143 if (!flags) 4144 return; 4145 4146 #define CHECK_APPEND_NOARG(a) \ 4147 do { \ 4148 ret = snprintf(bp, size_bp, (a)); \ 4149 if (ret < 0 || ret >= size_bp) \ 4150 goto out_overflow; \ 4151 size_bp -= ret; \ 4152 bp += ret; \ 4153 } while (0) 4154 4155 #define CHECK_APPEND_1ARG(a, v1) \ 4156 do { \ 4157 ret = snprintf(bp, size_bp, (a), (v1)); \ 4158 if (ret < 0 || ret >= size_bp) \ 4159 goto out_overflow; \ 4160 size_bp -= ret; \ 4161 bp += ret; \ 4162 } while (0) 4163 4164 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4165 do { \ 4166 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4167 if (ret < 0 || ret >= size_bp) \ 4168 goto out_overflow; \ 4169 size_bp -= ret; \ 4170 bp += ret; \ 4171 } while (0) 4172 4173 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4174 CHECK_APPEND_1ARG("convert=%s,", 4175 btrfs_bg_type_to_raid_name(bargs->target)); 4176 4177 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4178 CHECK_APPEND_NOARG("soft,"); 4179 4180 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4181 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4182 sizeof(tmp_buf)); 4183 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4184 } 4185 4186 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4187 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4188 4189 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4190 CHECK_APPEND_2ARG("usage=%u..%u,", 4191 bargs->usage_min, bargs->usage_max); 4192 4193 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4194 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4195 4196 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4197 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4198 bargs->pstart, bargs->pend); 4199 4200 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4201 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4202 bargs->vstart, bargs->vend); 4203 4204 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4205 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4206 4207 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4208 CHECK_APPEND_2ARG("limit=%u..%u,", 4209 bargs->limit_min, bargs->limit_max); 4210 4211 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4212 CHECK_APPEND_2ARG("stripes=%u..%u,", 4213 bargs->stripes_min, bargs->stripes_max); 4214 4215 #undef CHECK_APPEND_2ARG 4216 #undef CHECK_APPEND_1ARG 4217 #undef CHECK_APPEND_NOARG 4218 4219 out_overflow: 4220 4221 if (size_bp < size_buf) 4222 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4223 else 4224 buf[0] = '\0'; 4225 } 4226 4227 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4228 { 4229 u32 size_buf = 1024; 4230 char tmp_buf[192] = {'\0'}; 4231 char *buf; 4232 char *bp; 4233 u32 size_bp = size_buf; 4234 int ret; 4235 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4236 4237 buf = kzalloc(size_buf, GFP_KERNEL); 4238 if (!buf) 4239 return; 4240 4241 bp = buf; 4242 4243 #define CHECK_APPEND_1ARG(a, v1) \ 4244 do { \ 4245 ret = snprintf(bp, size_bp, (a), (v1)); \ 4246 if (ret < 0 || ret >= size_bp) \ 4247 goto out_overflow; \ 4248 size_bp -= ret; \ 4249 bp += ret; \ 4250 } while (0) 4251 4252 if (bctl->flags & BTRFS_BALANCE_FORCE) 4253 CHECK_APPEND_1ARG("%s", "-f "); 4254 4255 if (bctl->flags & BTRFS_BALANCE_DATA) { 4256 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4257 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4258 } 4259 4260 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4261 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4262 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4263 } 4264 4265 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4266 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4267 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4268 } 4269 4270 #undef CHECK_APPEND_1ARG 4271 4272 out_overflow: 4273 4274 if (size_bp < size_buf) 4275 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4276 btrfs_info(fs_info, "balance: %s %s", 4277 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4278 "resume" : "start", buf); 4279 4280 kfree(buf); 4281 } 4282 4283 /* 4284 * Should be called with balance mutexe held 4285 */ 4286 int btrfs_balance(struct btrfs_fs_info *fs_info, 4287 struct btrfs_balance_control *bctl, 4288 struct btrfs_ioctl_balance_args *bargs) 4289 { 4290 u64 meta_target, data_target; 4291 u64 allowed; 4292 int mixed = 0; 4293 int ret; 4294 u64 num_devices; 4295 unsigned seq; 4296 bool reducing_redundancy; 4297 int i; 4298 4299 if (btrfs_fs_closing(fs_info) || 4300 atomic_read(&fs_info->balance_pause_req) || 4301 btrfs_should_cancel_balance(fs_info)) { 4302 ret = -EINVAL; 4303 goto out; 4304 } 4305 4306 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4307 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4308 mixed = 1; 4309 4310 /* 4311 * In case of mixed groups both data and meta should be picked, 4312 * and identical options should be given for both of them. 4313 */ 4314 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4315 if (mixed && (bctl->flags & allowed)) { 4316 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4317 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4318 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4319 btrfs_err(fs_info, 4320 "balance: mixed groups data and metadata options must be the same"); 4321 ret = -EINVAL; 4322 goto out; 4323 } 4324 } 4325 4326 /* 4327 * rw_devices will not change at the moment, device add/delete/replace 4328 * are exclusive 4329 */ 4330 num_devices = fs_info->fs_devices->rw_devices; 4331 4332 /* 4333 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4334 * special bit for it, to make it easier to distinguish. Thus we need 4335 * to set it manually, or balance would refuse the profile. 4336 */ 4337 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4338 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4339 if (num_devices >= btrfs_raid_array[i].devs_min) 4340 allowed |= btrfs_raid_array[i].bg_flag; 4341 4342 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4343 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4344 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4345 ret = -EINVAL; 4346 goto out; 4347 } 4348 4349 /* 4350 * Allow to reduce metadata or system integrity only if force set for 4351 * profiles with redundancy (copies, parity) 4352 */ 4353 allowed = 0; 4354 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4355 if (btrfs_raid_array[i].ncopies >= 2 || 4356 btrfs_raid_array[i].tolerated_failures >= 1) 4357 allowed |= btrfs_raid_array[i].bg_flag; 4358 } 4359 do { 4360 seq = read_seqbegin(&fs_info->profiles_lock); 4361 4362 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4363 (fs_info->avail_system_alloc_bits & allowed) && 4364 !(bctl->sys.target & allowed)) || 4365 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4366 (fs_info->avail_metadata_alloc_bits & allowed) && 4367 !(bctl->meta.target & allowed))) 4368 reducing_redundancy = true; 4369 else 4370 reducing_redundancy = false; 4371 4372 /* if we're not converting, the target field is uninitialized */ 4373 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4374 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4375 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4376 bctl->data.target : fs_info->avail_data_alloc_bits; 4377 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4378 4379 if (reducing_redundancy) { 4380 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4381 btrfs_info(fs_info, 4382 "balance: force reducing metadata redundancy"); 4383 } else { 4384 btrfs_err(fs_info, 4385 "balance: reduces metadata redundancy, use --force if you want this"); 4386 ret = -EINVAL; 4387 goto out; 4388 } 4389 } 4390 4391 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4392 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4393 btrfs_warn(fs_info, 4394 "balance: metadata profile %s has lower redundancy than data profile %s", 4395 btrfs_bg_type_to_raid_name(meta_target), 4396 btrfs_bg_type_to_raid_name(data_target)); 4397 } 4398 4399 ret = insert_balance_item(fs_info, bctl); 4400 if (ret && ret != -EEXIST) 4401 goto out; 4402 4403 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4404 BUG_ON(ret == -EEXIST); 4405 BUG_ON(fs_info->balance_ctl); 4406 spin_lock(&fs_info->balance_lock); 4407 fs_info->balance_ctl = bctl; 4408 spin_unlock(&fs_info->balance_lock); 4409 } else { 4410 BUG_ON(ret != -EEXIST); 4411 spin_lock(&fs_info->balance_lock); 4412 update_balance_args(bctl); 4413 spin_unlock(&fs_info->balance_lock); 4414 } 4415 4416 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4417 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4418 describe_balance_start_or_resume(fs_info); 4419 mutex_unlock(&fs_info->balance_mutex); 4420 4421 ret = __btrfs_balance(fs_info); 4422 4423 mutex_lock(&fs_info->balance_mutex); 4424 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4425 btrfs_info(fs_info, "balance: paused"); 4426 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4427 } 4428 /* 4429 * Balance can be canceled by: 4430 * 4431 * - Regular cancel request 4432 * Then ret == -ECANCELED and balance_cancel_req > 0 4433 * 4434 * - Fatal signal to "btrfs" process 4435 * Either the signal caught by wait_reserve_ticket() and callers 4436 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4437 * got -ECANCELED. 4438 * Either way, in this case balance_cancel_req = 0, and 4439 * ret == -EINTR or ret == -ECANCELED. 4440 * 4441 * So here we only check the return value to catch canceled balance. 4442 */ 4443 else if (ret == -ECANCELED || ret == -EINTR) 4444 btrfs_info(fs_info, "balance: canceled"); 4445 else 4446 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4447 4448 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4449 4450 if (bargs) { 4451 memset(bargs, 0, sizeof(*bargs)); 4452 btrfs_update_ioctl_balance_args(fs_info, bargs); 4453 } 4454 4455 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4456 balance_need_close(fs_info)) { 4457 reset_balance_state(fs_info); 4458 btrfs_exclop_finish(fs_info); 4459 } 4460 4461 wake_up(&fs_info->balance_wait_q); 4462 4463 return ret; 4464 out: 4465 if (bctl->flags & BTRFS_BALANCE_RESUME) 4466 reset_balance_state(fs_info); 4467 else 4468 kfree(bctl); 4469 btrfs_exclop_finish(fs_info); 4470 4471 return ret; 4472 } 4473 4474 static int balance_kthread(void *data) 4475 { 4476 struct btrfs_fs_info *fs_info = data; 4477 int ret = 0; 4478 4479 mutex_lock(&fs_info->balance_mutex); 4480 if (fs_info->balance_ctl) 4481 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4482 mutex_unlock(&fs_info->balance_mutex); 4483 4484 return ret; 4485 } 4486 4487 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4488 { 4489 struct task_struct *tsk; 4490 4491 mutex_lock(&fs_info->balance_mutex); 4492 if (!fs_info->balance_ctl) { 4493 mutex_unlock(&fs_info->balance_mutex); 4494 return 0; 4495 } 4496 mutex_unlock(&fs_info->balance_mutex); 4497 4498 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4499 btrfs_info(fs_info, "balance: resume skipped"); 4500 return 0; 4501 } 4502 4503 spin_lock(&fs_info->super_lock); 4504 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4505 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4506 spin_unlock(&fs_info->super_lock); 4507 /* 4508 * A ro->rw remount sequence should continue with the paused balance 4509 * regardless of who pauses it, system or the user as of now, so set 4510 * the resume flag. 4511 */ 4512 spin_lock(&fs_info->balance_lock); 4513 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4514 spin_unlock(&fs_info->balance_lock); 4515 4516 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4517 return PTR_ERR_OR_ZERO(tsk); 4518 } 4519 4520 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4521 { 4522 struct btrfs_balance_control *bctl; 4523 struct btrfs_balance_item *item; 4524 struct btrfs_disk_balance_args disk_bargs; 4525 struct btrfs_path *path; 4526 struct extent_buffer *leaf; 4527 struct btrfs_key key; 4528 int ret; 4529 4530 path = btrfs_alloc_path(); 4531 if (!path) 4532 return -ENOMEM; 4533 4534 key.objectid = BTRFS_BALANCE_OBJECTID; 4535 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4536 key.offset = 0; 4537 4538 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4539 if (ret < 0) 4540 goto out; 4541 if (ret > 0) { /* ret = -ENOENT; */ 4542 ret = 0; 4543 goto out; 4544 } 4545 4546 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4547 if (!bctl) { 4548 ret = -ENOMEM; 4549 goto out; 4550 } 4551 4552 leaf = path->nodes[0]; 4553 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4554 4555 bctl->flags = btrfs_balance_flags(leaf, item); 4556 bctl->flags |= BTRFS_BALANCE_RESUME; 4557 4558 btrfs_balance_data(leaf, item, &disk_bargs); 4559 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4560 btrfs_balance_meta(leaf, item, &disk_bargs); 4561 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4562 btrfs_balance_sys(leaf, item, &disk_bargs); 4563 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4564 4565 /* 4566 * This should never happen, as the paused balance state is recovered 4567 * during mount without any chance of other exclusive ops to collide. 4568 * 4569 * This gives the exclusive op status to balance and keeps in paused 4570 * state until user intervention (cancel or umount). If the ownership 4571 * cannot be assigned, show a message but do not fail. The balance 4572 * is in a paused state and must have fs_info::balance_ctl properly 4573 * set up. 4574 */ 4575 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4576 btrfs_warn(fs_info, 4577 "balance: cannot set exclusive op status, resume manually"); 4578 4579 btrfs_release_path(path); 4580 4581 mutex_lock(&fs_info->balance_mutex); 4582 BUG_ON(fs_info->balance_ctl); 4583 spin_lock(&fs_info->balance_lock); 4584 fs_info->balance_ctl = bctl; 4585 spin_unlock(&fs_info->balance_lock); 4586 mutex_unlock(&fs_info->balance_mutex); 4587 out: 4588 btrfs_free_path(path); 4589 return ret; 4590 } 4591 4592 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4593 { 4594 int ret = 0; 4595 4596 mutex_lock(&fs_info->balance_mutex); 4597 if (!fs_info->balance_ctl) { 4598 mutex_unlock(&fs_info->balance_mutex); 4599 return -ENOTCONN; 4600 } 4601 4602 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4603 atomic_inc(&fs_info->balance_pause_req); 4604 mutex_unlock(&fs_info->balance_mutex); 4605 4606 wait_event(fs_info->balance_wait_q, 4607 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4608 4609 mutex_lock(&fs_info->balance_mutex); 4610 /* we are good with balance_ctl ripped off from under us */ 4611 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4612 atomic_dec(&fs_info->balance_pause_req); 4613 } else { 4614 ret = -ENOTCONN; 4615 } 4616 4617 mutex_unlock(&fs_info->balance_mutex); 4618 return ret; 4619 } 4620 4621 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4622 { 4623 mutex_lock(&fs_info->balance_mutex); 4624 if (!fs_info->balance_ctl) { 4625 mutex_unlock(&fs_info->balance_mutex); 4626 return -ENOTCONN; 4627 } 4628 4629 /* 4630 * A paused balance with the item stored on disk can be resumed at 4631 * mount time if the mount is read-write. Otherwise it's still paused 4632 * and we must not allow cancelling as it deletes the item. 4633 */ 4634 if (sb_rdonly(fs_info->sb)) { 4635 mutex_unlock(&fs_info->balance_mutex); 4636 return -EROFS; 4637 } 4638 4639 atomic_inc(&fs_info->balance_cancel_req); 4640 /* 4641 * if we are running just wait and return, balance item is 4642 * deleted in btrfs_balance in this case 4643 */ 4644 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4645 mutex_unlock(&fs_info->balance_mutex); 4646 wait_event(fs_info->balance_wait_q, 4647 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4648 mutex_lock(&fs_info->balance_mutex); 4649 } else { 4650 mutex_unlock(&fs_info->balance_mutex); 4651 /* 4652 * Lock released to allow other waiters to continue, we'll 4653 * reexamine the status again. 4654 */ 4655 mutex_lock(&fs_info->balance_mutex); 4656 4657 if (fs_info->balance_ctl) { 4658 reset_balance_state(fs_info); 4659 btrfs_exclop_finish(fs_info); 4660 btrfs_info(fs_info, "balance: canceled"); 4661 } 4662 } 4663 4664 BUG_ON(fs_info->balance_ctl || 4665 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4666 atomic_dec(&fs_info->balance_cancel_req); 4667 mutex_unlock(&fs_info->balance_mutex); 4668 return 0; 4669 } 4670 4671 int btrfs_uuid_scan_kthread(void *data) 4672 { 4673 struct btrfs_fs_info *fs_info = data; 4674 struct btrfs_root *root = fs_info->tree_root; 4675 struct btrfs_key key; 4676 struct btrfs_path *path = NULL; 4677 int ret = 0; 4678 struct extent_buffer *eb; 4679 int slot; 4680 struct btrfs_root_item root_item; 4681 u32 item_size; 4682 struct btrfs_trans_handle *trans = NULL; 4683 bool closing = false; 4684 4685 path = btrfs_alloc_path(); 4686 if (!path) { 4687 ret = -ENOMEM; 4688 goto out; 4689 } 4690 4691 key.objectid = 0; 4692 key.type = BTRFS_ROOT_ITEM_KEY; 4693 key.offset = 0; 4694 4695 while (1) { 4696 if (btrfs_fs_closing(fs_info)) { 4697 closing = true; 4698 break; 4699 } 4700 ret = btrfs_search_forward(root, &key, path, 4701 BTRFS_OLDEST_GENERATION); 4702 if (ret) { 4703 if (ret > 0) 4704 ret = 0; 4705 break; 4706 } 4707 4708 if (key.type != BTRFS_ROOT_ITEM_KEY || 4709 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4710 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4711 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4712 goto skip; 4713 4714 eb = path->nodes[0]; 4715 slot = path->slots[0]; 4716 item_size = btrfs_item_size(eb, slot); 4717 if (item_size < sizeof(root_item)) 4718 goto skip; 4719 4720 read_extent_buffer(eb, &root_item, 4721 btrfs_item_ptr_offset(eb, slot), 4722 (int)sizeof(root_item)); 4723 if (btrfs_root_refs(&root_item) == 0) 4724 goto skip; 4725 4726 if (!btrfs_is_empty_uuid(root_item.uuid) || 4727 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4728 if (trans) 4729 goto update_tree; 4730 4731 btrfs_release_path(path); 4732 /* 4733 * 1 - subvol uuid item 4734 * 1 - received_subvol uuid item 4735 */ 4736 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4737 if (IS_ERR(trans)) { 4738 ret = PTR_ERR(trans); 4739 break; 4740 } 4741 continue; 4742 } else { 4743 goto skip; 4744 } 4745 update_tree: 4746 btrfs_release_path(path); 4747 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4748 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4749 BTRFS_UUID_KEY_SUBVOL, 4750 key.objectid); 4751 if (ret < 0) { 4752 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4753 ret); 4754 break; 4755 } 4756 } 4757 4758 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4759 ret = btrfs_uuid_tree_add(trans, 4760 root_item.received_uuid, 4761 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4762 key.objectid); 4763 if (ret < 0) { 4764 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4765 ret); 4766 break; 4767 } 4768 } 4769 4770 skip: 4771 btrfs_release_path(path); 4772 if (trans) { 4773 ret = btrfs_end_transaction(trans); 4774 trans = NULL; 4775 if (ret) 4776 break; 4777 } 4778 4779 if (key.offset < (u64)-1) { 4780 key.offset++; 4781 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4782 key.offset = 0; 4783 key.type = BTRFS_ROOT_ITEM_KEY; 4784 } else if (key.objectid < (u64)-1) { 4785 key.offset = 0; 4786 key.type = BTRFS_ROOT_ITEM_KEY; 4787 key.objectid++; 4788 } else { 4789 break; 4790 } 4791 cond_resched(); 4792 } 4793 4794 out: 4795 btrfs_free_path(path); 4796 if (trans && !IS_ERR(trans)) 4797 btrfs_end_transaction(trans); 4798 if (ret) 4799 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4800 else if (!closing) 4801 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4802 up(&fs_info->uuid_tree_rescan_sem); 4803 return 0; 4804 } 4805 4806 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4807 { 4808 struct btrfs_trans_handle *trans; 4809 struct btrfs_root *tree_root = fs_info->tree_root; 4810 struct btrfs_root *uuid_root; 4811 struct task_struct *task; 4812 int ret; 4813 4814 /* 4815 * 1 - root node 4816 * 1 - root item 4817 */ 4818 trans = btrfs_start_transaction(tree_root, 2); 4819 if (IS_ERR(trans)) 4820 return PTR_ERR(trans); 4821 4822 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4823 if (IS_ERR(uuid_root)) { 4824 ret = PTR_ERR(uuid_root); 4825 btrfs_abort_transaction(trans, ret); 4826 btrfs_end_transaction(trans); 4827 return ret; 4828 } 4829 4830 fs_info->uuid_root = uuid_root; 4831 4832 ret = btrfs_commit_transaction(trans); 4833 if (ret) 4834 return ret; 4835 4836 down(&fs_info->uuid_tree_rescan_sem); 4837 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4838 if (IS_ERR(task)) { 4839 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4840 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4841 up(&fs_info->uuid_tree_rescan_sem); 4842 return PTR_ERR(task); 4843 } 4844 4845 return 0; 4846 } 4847 4848 /* 4849 * shrinking a device means finding all of the device extents past 4850 * the new size, and then following the back refs to the chunks. 4851 * The chunk relocation code actually frees the device extent 4852 */ 4853 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4854 { 4855 struct btrfs_fs_info *fs_info = device->fs_info; 4856 struct btrfs_root *root = fs_info->dev_root; 4857 struct btrfs_trans_handle *trans; 4858 struct btrfs_dev_extent *dev_extent = NULL; 4859 struct btrfs_path *path; 4860 u64 length; 4861 u64 chunk_offset; 4862 int ret; 4863 int slot; 4864 int failed = 0; 4865 bool retried = false; 4866 struct extent_buffer *l; 4867 struct btrfs_key key; 4868 struct btrfs_super_block *super_copy = fs_info->super_copy; 4869 u64 old_total = btrfs_super_total_bytes(super_copy); 4870 u64 old_size = btrfs_device_get_total_bytes(device); 4871 u64 diff; 4872 u64 start; 4873 4874 new_size = round_down(new_size, fs_info->sectorsize); 4875 start = new_size; 4876 diff = round_down(old_size - new_size, fs_info->sectorsize); 4877 4878 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4879 return -EINVAL; 4880 4881 path = btrfs_alloc_path(); 4882 if (!path) 4883 return -ENOMEM; 4884 4885 path->reada = READA_BACK; 4886 4887 trans = btrfs_start_transaction(root, 0); 4888 if (IS_ERR(trans)) { 4889 btrfs_free_path(path); 4890 return PTR_ERR(trans); 4891 } 4892 4893 mutex_lock(&fs_info->chunk_mutex); 4894 4895 btrfs_device_set_total_bytes(device, new_size); 4896 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4897 device->fs_devices->total_rw_bytes -= diff; 4898 atomic64_sub(diff, &fs_info->free_chunk_space); 4899 } 4900 4901 /* 4902 * Once the device's size has been set to the new size, ensure all 4903 * in-memory chunks are synced to disk so that the loop below sees them 4904 * and relocates them accordingly. 4905 */ 4906 if (contains_pending_extent(device, &start, diff)) { 4907 mutex_unlock(&fs_info->chunk_mutex); 4908 ret = btrfs_commit_transaction(trans); 4909 if (ret) 4910 goto done; 4911 } else { 4912 mutex_unlock(&fs_info->chunk_mutex); 4913 btrfs_end_transaction(trans); 4914 } 4915 4916 again: 4917 key.objectid = device->devid; 4918 key.offset = (u64)-1; 4919 key.type = BTRFS_DEV_EXTENT_KEY; 4920 4921 do { 4922 mutex_lock(&fs_info->reclaim_bgs_lock); 4923 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4924 if (ret < 0) { 4925 mutex_unlock(&fs_info->reclaim_bgs_lock); 4926 goto done; 4927 } 4928 4929 ret = btrfs_previous_item(root, path, 0, key.type); 4930 if (ret) { 4931 mutex_unlock(&fs_info->reclaim_bgs_lock); 4932 if (ret < 0) 4933 goto done; 4934 ret = 0; 4935 btrfs_release_path(path); 4936 break; 4937 } 4938 4939 l = path->nodes[0]; 4940 slot = path->slots[0]; 4941 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4942 4943 if (key.objectid != device->devid) { 4944 mutex_unlock(&fs_info->reclaim_bgs_lock); 4945 btrfs_release_path(path); 4946 break; 4947 } 4948 4949 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4950 length = btrfs_dev_extent_length(l, dev_extent); 4951 4952 if (key.offset + length <= new_size) { 4953 mutex_unlock(&fs_info->reclaim_bgs_lock); 4954 btrfs_release_path(path); 4955 break; 4956 } 4957 4958 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4959 btrfs_release_path(path); 4960 4961 /* 4962 * We may be relocating the only data chunk we have, 4963 * which could potentially end up with losing data's 4964 * raid profile, so lets allocate an empty one in 4965 * advance. 4966 */ 4967 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4968 if (ret < 0) { 4969 mutex_unlock(&fs_info->reclaim_bgs_lock); 4970 goto done; 4971 } 4972 4973 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4974 mutex_unlock(&fs_info->reclaim_bgs_lock); 4975 if (ret == -ENOSPC) { 4976 failed++; 4977 } else if (ret) { 4978 if (ret == -ETXTBSY) { 4979 btrfs_warn(fs_info, 4980 "could not shrink block group %llu due to active swapfile", 4981 chunk_offset); 4982 } 4983 goto done; 4984 } 4985 } while (key.offset-- > 0); 4986 4987 if (failed && !retried) { 4988 failed = 0; 4989 retried = true; 4990 goto again; 4991 } else if (failed && retried) { 4992 ret = -ENOSPC; 4993 goto done; 4994 } 4995 4996 /* Shrinking succeeded, else we would be at "done". */ 4997 trans = btrfs_start_transaction(root, 0); 4998 if (IS_ERR(trans)) { 4999 ret = PTR_ERR(trans); 5000 goto done; 5001 } 5002 5003 mutex_lock(&fs_info->chunk_mutex); 5004 /* Clear all state bits beyond the shrunk device size */ 5005 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 5006 CHUNK_STATE_MASK); 5007 5008 btrfs_device_set_disk_total_bytes(device, new_size); 5009 if (list_empty(&device->post_commit_list)) 5010 list_add_tail(&device->post_commit_list, 5011 &trans->transaction->dev_update_list); 5012 5013 WARN_ON(diff > old_total); 5014 btrfs_set_super_total_bytes(super_copy, 5015 round_down(old_total - diff, fs_info->sectorsize)); 5016 mutex_unlock(&fs_info->chunk_mutex); 5017 5018 btrfs_reserve_chunk_metadata(trans, false); 5019 /* Now btrfs_update_device() will change the on-disk size. */ 5020 ret = btrfs_update_device(trans, device); 5021 btrfs_trans_release_chunk_metadata(trans); 5022 if (ret < 0) { 5023 btrfs_abort_transaction(trans, ret); 5024 btrfs_end_transaction(trans); 5025 } else { 5026 ret = btrfs_commit_transaction(trans); 5027 } 5028 done: 5029 btrfs_free_path(path); 5030 if (ret) { 5031 mutex_lock(&fs_info->chunk_mutex); 5032 btrfs_device_set_total_bytes(device, old_size); 5033 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5034 device->fs_devices->total_rw_bytes += diff; 5035 atomic64_add(diff, &fs_info->free_chunk_space); 5036 mutex_unlock(&fs_info->chunk_mutex); 5037 } 5038 return ret; 5039 } 5040 5041 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5042 struct btrfs_key *key, 5043 struct btrfs_chunk *chunk, int item_size) 5044 { 5045 struct btrfs_super_block *super_copy = fs_info->super_copy; 5046 struct btrfs_disk_key disk_key; 5047 u32 array_size; 5048 u8 *ptr; 5049 5050 lockdep_assert_held(&fs_info->chunk_mutex); 5051 5052 array_size = btrfs_super_sys_array_size(super_copy); 5053 if (array_size + item_size + sizeof(disk_key) 5054 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5055 return -EFBIG; 5056 5057 ptr = super_copy->sys_chunk_array + array_size; 5058 btrfs_cpu_key_to_disk(&disk_key, key); 5059 memcpy(ptr, &disk_key, sizeof(disk_key)); 5060 ptr += sizeof(disk_key); 5061 memcpy(ptr, chunk, item_size); 5062 item_size += sizeof(disk_key); 5063 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5064 5065 return 0; 5066 } 5067 5068 /* 5069 * sort the devices in descending order by max_avail, total_avail 5070 */ 5071 static int btrfs_cmp_device_info(const void *a, const void *b) 5072 { 5073 const struct btrfs_device_info *di_a = a; 5074 const struct btrfs_device_info *di_b = b; 5075 5076 if (di_a->max_avail > di_b->max_avail) 5077 return -1; 5078 if (di_a->max_avail < di_b->max_avail) 5079 return 1; 5080 if (di_a->total_avail > di_b->total_avail) 5081 return -1; 5082 if (di_a->total_avail < di_b->total_avail) 5083 return 1; 5084 return 0; 5085 } 5086 5087 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5088 { 5089 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5090 return; 5091 5092 btrfs_set_fs_incompat(info, RAID56); 5093 } 5094 5095 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5096 { 5097 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5098 return; 5099 5100 btrfs_set_fs_incompat(info, RAID1C34); 5101 } 5102 5103 /* 5104 * Structure used internally for btrfs_create_chunk() function. 5105 * Wraps needed parameters. 5106 */ 5107 struct alloc_chunk_ctl { 5108 u64 start; 5109 u64 type; 5110 /* Total number of stripes to allocate */ 5111 int num_stripes; 5112 /* sub_stripes info for map */ 5113 int sub_stripes; 5114 /* Stripes per device */ 5115 int dev_stripes; 5116 /* Maximum number of devices to use */ 5117 int devs_max; 5118 /* Minimum number of devices to use */ 5119 int devs_min; 5120 /* ndevs has to be a multiple of this */ 5121 int devs_increment; 5122 /* Number of copies */ 5123 int ncopies; 5124 /* Number of stripes worth of bytes to store parity information */ 5125 int nparity; 5126 u64 max_stripe_size; 5127 u64 max_chunk_size; 5128 u64 dev_extent_min; 5129 u64 stripe_size; 5130 u64 chunk_size; 5131 int ndevs; 5132 }; 5133 5134 static void init_alloc_chunk_ctl_policy_regular( 5135 struct btrfs_fs_devices *fs_devices, 5136 struct alloc_chunk_ctl *ctl) 5137 { 5138 u64 type = ctl->type; 5139 5140 if (type & BTRFS_BLOCK_GROUP_DATA) { 5141 ctl->max_stripe_size = SZ_1G; 5142 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5143 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5144 /* For larger filesystems, use larger metadata chunks */ 5145 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5146 ctl->max_stripe_size = SZ_1G; 5147 else 5148 ctl->max_stripe_size = SZ_256M; 5149 ctl->max_chunk_size = ctl->max_stripe_size; 5150 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5151 ctl->max_stripe_size = SZ_32M; 5152 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5153 ctl->devs_max = min_t(int, ctl->devs_max, 5154 BTRFS_MAX_DEVS_SYS_CHUNK); 5155 } else { 5156 BUG(); 5157 } 5158 5159 /* We don't want a chunk larger than 10% of writable space */ 5160 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5161 ctl->max_chunk_size); 5162 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5163 } 5164 5165 static void init_alloc_chunk_ctl_policy_zoned( 5166 struct btrfs_fs_devices *fs_devices, 5167 struct alloc_chunk_ctl *ctl) 5168 { 5169 u64 zone_size = fs_devices->fs_info->zone_size; 5170 u64 limit; 5171 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5172 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5173 u64 min_chunk_size = min_data_stripes * zone_size; 5174 u64 type = ctl->type; 5175 5176 ctl->max_stripe_size = zone_size; 5177 if (type & BTRFS_BLOCK_GROUP_DATA) { 5178 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5179 zone_size); 5180 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5181 ctl->max_chunk_size = ctl->max_stripe_size; 5182 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5183 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5184 ctl->devs_max = min_t(int, ctl->devs_max, 5185 BTRFS_MAX_DEVS_SYS_CHUNK); 5186 } else { 5187 BUG(); 5188 } 5189 5190 /* We don't want a chunk larger than 10% of writable space */ 5191 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5192 zone_size), 5193 min_chunk_size); 5194 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5195 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5196 } 5197 5198 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5199 struct alloc_chunk_ctl *ctl) 5200 { 5201 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5202 5203 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5204 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5205 ctl->devs_max = btrfs_raid_array[index].devs_max; 5206 if (!ctl->devs_max) 5207 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5208 ctl->devs_min = btrfs_raid_array[index].devs_min; 5209 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5210 ctl->ncopies = btrfs_raid_array[index].ncopies; 5211 ctl->nparity = btrfs_raid_array[index].nparity; 5212 ctl->ndevs = 0; 5213 5214 switch (fs_devices->chunk_alloc_policy) { 5215 case BTRFS_CHUNK_ALLOC_REGULAR: 5216 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5217 break; 5218 case BTRFS_CHUNK_ALLOC_ZONED: 5219 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5220 break; 5221 default: 5222 BUG(); 5223 } 5224 } 5225 5226 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5227 struct alloc_chunk_ctl *ctl, 5228 struct btrfs_device_info *devices_info) 5229 { 5230 struct btrfs_fs_info *info = fs_devices->fs_info; 5231 struct btrfs_device *device; 5232 u64 total_avail; 5233 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5234 int ret; 5235 int ndevs = 0; 5236 u64 max_avail; 5237 u64 dev_offset; 5238 5239 /* 5240 * in the first pass through the devices list, we gather information 5241 * about the available holes on each device. 5242 */ 5243 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5244 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5245 WARN(1, KERN_ERR 5246 "BTRFS: read-only device in alloc_list\n"); 5247 continue; 5248 } 5249 5250 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5251 &device->dev_state) || 5252 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5253 continue; 5254 5255 if (device->total_bytes > device->bytes_used) 5256 total_avail = device->total_bytes - device->bytes_used; 5257 else 5258 total_avail = 0; 5259 5260 /* If there is no space on this device, skip it. */ 5261 if (total_avail < ctl->dev_extent_min) 5262 continue; 5263 5264 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5265 &max_avail); 5266 if (ret && ret != -ENOSPC) 5267 return ret; 5268 5269 if (ret == 0) 5270 max_avail = dev_extent_want; 5271 5272 if (max_avail < ctl->dev_extent_min) { 5273 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5274 btrfs_debug(info, 5275 "%s: devid %llu has no free space, have=%llu want=%llu", 5276 __func__, device->devid, max_avail, 5277 ctl->dev_extent_min); 5278 continue; 5279 } 5280 5281 if (ndevs == fs_devices->rw_devices) { 5282 WARN(1, "%s: found more than %llu devices\n", 5283 __func__, fs_devices->rw_devices); 5284 break; 5285 } 5286 devices_info[ndevs].dev_offset = dev_offset; 5287 devices_info[ndevs].max_avail = max_avail; 5288 devices_info[ndevs].total_avail = total_avail; 5289 devices_info[ndevs].dev = device; 5290 ++ndevs; 5291 } 5292 ctl->ndevs = ndevs; 5293 5294 /* 5295 * now sort the devices by hole size / available space 5296 */ 5297 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5298 btrfs_cmp_device_info, NULL); 5299 5300 return 0; 5301 } 5302 5303 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5304 struct btrfs_device_info *devices_info) 5305 { 5306 /* Number of stripes that count for block group size */ 5307 int data_stripes; 5308 5309 /* 5310 * The primary goal is to maximize the number of stripes, so use as 5311 * many devices as possible, even if the stripes are not maximum sized. 5312 * 5313 * The DUP profile stores more than one stripe per device, the 5314 * max_avail is the total size so we have to adjust. 5315 */ 5316 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5317 ctl->dev_stripes); 5318 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5319 5320 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5321 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5322 5323 /* 5324 * Use the number of data stripes to figure out how big this chunk is 5325 * really going to be in terms of logical address space, and compare 5326 * that answer with the max chunk size. If it's higher, we try to 5327 * reduce stripe_size. 5328 */ 5329 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5330 /* 5331 * Reduce stripe_size, round it up to a 16MB boundary again and 5332 * then use it, unless it ends up being even bigger than the 5333 * previous value we had already. 5334 */ 5335 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5336 data_stripes), SZ_16M), 5337 ctl->stripe_size); 5338 } 5339 5340 /* Align to BTRFS_STRIPE_LEN */ 5341 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5342 ctl->chunk_size = ctl->stripe_size * data_stripes; 5343 5344 return 0; 5345 } 5346 5347 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5348 struct btrfs_device_info *devices_info) 5349 { 5350 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5351 /* Number of stripes that count for block group size */ 5352 int data_stripes; 5353 5354 /* 5355 * It should hold because: 5356 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5357 */ 5358 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5359 5360 ctl->stripe_size = zone_size; 5361 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5362 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5363 5364 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5365 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5366 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5367 ctl->stripe_size) + ctl->nparity, 5368 ctl->dev_stripes); 5369 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5370 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5371 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5372 } 5373 5374 ctl->chunk_size = ctl->stripe_size * data_stripes; 5375 5376 return 0; 5377 } 5378 5379 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5380 struct alloc_chunk_ctl *ctl, 5381 struct btrfs_device_info *devices_info) 5382 { 5383 struct btrfs_fs_info *info = fs_devices->fs_info; 5384 5385 /* 5386 * Round down to number of usable stripes, devs_increment can be any 5387 * number so we can't use round_down() that requires power of 2, while 5388 * rounddown is safe. 5389 */ 5390 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5391 5392 if (ctl->ndevs < ctl->devs_min) { 5393 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5394 btrfs_debug(info, 5395 "%s: not enough devices with free space: have=%d minimum required=%d", 5396 __func__, ctl->ndevs, ctl->devs_min); 5397 } 5398 return -ENOSPC; 5399 } 5400 5401 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5402 5403 switch (fs_devices->chunk_alloc_policy) { 5404 case BTRFS_CHUNK_ALLOC_REGULAR: 5405 return decide_stripe_size_regular(ctl, devices_info); 5406 case BTRFS_CHUNK_ALLOC_ZONED: 5407 return decide_stripe_size_zoned(ctl, devices_info); 5408 default: 5409 BUG(); 5410 } 5411 } 5412 5413 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5414 struct alloc_chunk_ctl *ctl, 5415 struct btrfs_device_info *devices_info) 5416 { 5417 struct btrfs_fs_info *info = trans->fs_info; 5418 struct map_lookup *map = NULL; 5419 struct extent_map_tree *em_tree; 5420 struct btrfs_block_group *block_group; 5421 struct extent_map *em; 5422 u64 start = ctl->start; 5423 u64 type = ctl->type; 5424 int ret; 5425 int i; 5426 int j; 5427 5428 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5429 if (!map) 5430 return ERR_PTR(-ENOMEM); 5431 map->num_stripes = ctl->num_stripes; 5432 5433 for (i = 0; i < ctl->ndevs; ++i) { 5434 for (j = 0; j < ctl->dev_stripes; ++j) { 5435 int s = i * ctl->dev_stripes + j; 5436 map->stripes[s].dev = devices_info[i].dev; 5437 map->stripes[s].physical = devices_info[i].dev_offset + 5438 j * ctl->stripe_size; 5439 } 5440 } 5441 map->stripe_len = BTRFS_STRIPE_LEN; 5442 map->io_align = BTRFS_STRIPE_LEN; 5443 map->io_width = BTRFS_STRIPE_LEN; 5444 map->type = type; 5445 map->sub_stripes = ctl->sub_stripes; 5446 5447 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5448 5449 em = alloc_extent_map(); 5450 if (!em) { 5451 kfree(map); 5452 return ERR_PTR(-ENOMEM); 5453 } 5454 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5455 em->map_lookup = map; 5456 em->start = start; 5457 em->len = ctl->chunk_size; 5458 em->block_start = 0; 5459 em->block_len = em->len; 5460 em->orig_block_len = ctl->stripe_size; 5461 5462 em_tree = &info->mapping_tree; 5463 write_lock(&em_tree->lock); 5464 ret = add_extent_mapping(em_tree, em, 0); 5465 if (ret) { 5466 write_unlock(&em_tree->lock); 5467 free_extent_map(em); 5468 return ERR_PTR(ret); 5469 } 5470 write_unlock(&em_tree->lock); 5471 5472 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5473 if (IS_ERR(block_group)) 5474 goto error_del_extent; 5475 5476 for (i = 0; i < map->num_stripes; i++) { 5477 struct btrfs_device *dev = map->stripes[i].dev; 5478 5479 btrfs_device_set_bytes_used(dev, 5480 dev->bytes_used + ctl->stripe_size); 5481 if (list_empty(&dev->post_commit_list)) 5482 list_add_tail(&dev->post_commit_list, 5483 &trans->transaction->dev_update_list); 5484 } 5485 5486 atomic64_sub(ctl->stripe_size * map->num_stripes, 5487 &info->free_chunk_space); 5488 5489 free_extent_map(em); 5490 check_raid56_incompat_flag(info, type); 5491 check_raid1c34_incompat_flag(info, type); 5492 5493 return block_group; 5494 5495 error_del_extent: 5496 write_lock(&em_tree->lock); 5497 remove_extent_mapping(em_tree, em); 5498 write_unlock(&em_tree->lock); 5499 5500 /* One for our allocation */ 5501 free_extent_map(em); 5502 /* One for the tree reference */ 5503 free_extent_map(em); 5504 5505 return block_group; 5506 } 5507 5508 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5509 u64 type) 5510 { 5511 struct btrfs_fs_info *info = trans->fs_info; 5512 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5513 struct btrfs_device_info *devices_info = NULL; 5514 struct alloc_chunk_ctl ctl; 5515 struct btrfs_block_group *block_group; 5516 int ret; 5517 5518 lockdep_assert_held(&info->chunk_mutex); 5519 5520 if (!alloc_profile_is_valid(type, 0)) { 5521 ASSERT(0); 5522 return ERR_PTR(-EINVAL); 5523 } 5524 5525 if (list_empty(&fs_devices->alloc_list)) { 5526 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5527 btrfs_debug(info, "%s: no writable device", __func__); 5528 return ERR_PTR(-ENOSPC); 5529 } 5530 5531 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5532 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5533 ASSERT(0); 5534 return ERR_PTR(-EINVAL); 5535 } 5536 5537 ctl.start = find_next_chunk(info); 5538 ctl.type = type; 5539 init_alloc_chunk_ctl(fs_devices, &ctl); 5540 5541 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5542 GFP_NOFS); 5543 if (!devices_info) 5544 return ERR_PTR(-ENOMEM); 5545 5546 ret = gather_device_info(fs_devices, &ctl, devices_info); 5547 if (ret < 0) { 5548 block_group = ERR_PTR(ret); 5549 goto out; 5550 } 5551 5552 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5553 if (ret < 0) { 5554 block_group = ERR_PTR(ret); 5555 goto out; 5556 } 5557 5558 block_group = create_chunk(trans, &ctl, devices_info); 5559 5560 out: 5561 kfree(devices_info); 5562 return block_group; 5563 } 5564 5565 /* 5566 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5567 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5568 * chunks. 5569 * 5570 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5571 * phases. 5572 */ 5573 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5574 struct btrfs_block_group *bg) 5575 { 5576 struct btrfs_fs_info *fs_info = trans->fs_info; 5577 struct btrfs_root *chunk_root = fs_info->chunk_root; 5578 struct btrfs_key key; 5579 struct btrfs_chunk *chunk; 5580 struct btrfs_stripe *stripe; 5581 struct extent_map *em; 5582 struct map_lookup *map; 5583 size_t item_size; 5584 int i; 5585 int ret; 5586 5587 /* 5588 * We take the chunk_mutex for 2 reasons: 5589 * 5590 * 1) Updates and insertions in the chunk btree must be done while holding 5591 * the chunk_mutex, as well as updating the system chunk array in the 5592 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5593 * details; 5594 * 5595 * 2) To prevent races with the final phase of a device replace operation 5596 * that replaces the device object associated with the map's stripes, 5597 * because the device object's id can change at any time during that 5598 * final phase of the device replace operation 5599 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5600 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5601 * which would cause a failure when updating the device item, which does 5602 * not exists, or persisting a stripe of the chunk item with such ID. 5603 * Here we can't use the device_list_mutex because our caller already 5604 * has locked the chunk_mutex, and the final phase of device replace 5605 * acquires both mutexes - first the device_list_mutex and then the 5606 * chunk_mutex. Using any of those two mutexes protects us from a 5607 * concurrent device replace. 5608 */ 5609 lockdep_assert_held(&fs_info->chunk_mutex); 5610 5611 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5612 if (IS_ERR(em)) { 5613 ret = PTR_ERR(em); 5614 btrfs_abort_transaction(trans, ret); 5615 return ret; 5616 } 5617 5618 map = em->map_lookup; 5619 item_size = btrfs_chunk_item_size(map->num_stripes); 5620 5621 chunk = kzalloc(item_size, GFP_NOFS); 5622 if (!chunk) { 5623 ret = -ENOMEM; 5624 btrfs_abort_transaction(trans, ret); 5625 goto out; 5626 } 5627 5628 for (i = 0; i < map->num_stripes; i++) { 5629 struct btrfs_device *device = map->stripes[i].dev; 5630 5631 ret = btrfs_update_device(trans, device); 5632 if (ret) 5633 goto out; 5634 } 5635 5636 stripe = &chunk->stripe; 5637 for (i = 0; i < map->num_stripes; i++) { 5638 struct btrfs_device *device = map->stripes[i].dev; 5639 const u64 dev_offset = map->stripes[i].physical; 5640 5641 btrfs_set_stack_stripe_devid(stripe, device->devid); 5642 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5643 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5644 stripe++; 5645 } 5646 5647 btrfs_set_stack_chunk_length(chunk, bg->length); 5648 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5649 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5650 btrfs_set_stack_chunk_type(chunk, map->type); 5651 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5652 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5653 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5654 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5655 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5656 5657 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5658 key.type = BTRFS_CHUNK_ITEM_KEY; 5659 key.offset = bg->start; 5660 5661 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5662 if (ret) 5663 goto out; 5664 5665 bg->chunk_item_inserted = 1; 5666 5667 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5668 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5669 if (ret) 5670 goto out; 5671 } 5672 5673 out: 5674 kfree(chunk); 5675 free_extent_map(em); 5676 return ret; 5677 } 5678 5679 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5680 { 5681 struct btrfs_fs_info *fs_info = trans->fs_info; 5682 u64 alloc_profile; 5683 struct btrfs_block_group *meta_bg; 5684 struct btrfs_block_group *sys_bg; 5685 5686 /* 5687 * When adding a new device for sprouting, the seed device is read-only 5688 * so we must first allocate a metadata and a system chunk. But before 5689 * adding the block group items to the extent, device and chunk btrees, 5690 * we must first: 5691 * 5692 * 1) Create both chunks without doing any changes to the btrees, as 5693 * otherwise we would get -ENOSPC since the block groups from the 5694 * seed device are read-only; 5695 * 5696 * 2) Add the device item for the new sprout device - finishing the setup 5697 * of a new block group requires updating the device item in the chunk 5698 * btree, so it must exist when we attempt to do it. The previous step 5699 * ensures this does not fail with -ENOSPC. 5700 * 5701 * After that we can add the block group items to their btrees: 5702 * update existing device item in the chunk btree, add a new block group 5703 * item to the extent btree, add a new chunk item to the chunk btree and 5704 * finally add the new device extent items to the devices btree. 5705 */ 5706 5707 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5708 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5709 if (IS_ERR(meta_bg)) 5710 return PTR_ERR(meta_bg); 5711 5712 alloc_profile = btrfs_system_alloc_profile(fs_info); 5713 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5714 if (IS_ERR(sys_bg)) 5715 return PTR_ERR(sys_bg); 5716 5717 return 0; 5718 } 5719 5720 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5721 { 5722 const int index = btrfs_bg_flags_to_raid_index(map->type); 5723 5724 return btrfs_raid_array[index].tolerated_failures; 5725 } 5726 5727 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5728 { 5729 struct extent_map *em; 5730 struct map_lookup *map; 5731 int miss_ndevs = 0; 5732 int i; 5733 bool ret = true; 5734 5735 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5736 if (IS_ERR(em)) 5737 return false; 5738 5739 map = em->map_lookup; 5740 for (i = 0; i < map->num_stripes; i++) { 5741 if (test_bit(BTRFS_DEV_STATE_MISSING, 5742 &map->stripes[i].dev->dev_state)) { 5743 miss_ndevs++; 5744 continue; 5745 } 5746 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5747 &map->stripes[i].dev->dev_state)) { 5748 ret = false; 5749 goto end; 5750 } 5751 } 5752 5753 /* 5754 * If the number of missing devices is larger than max errors, we can 5755 * not write the data into that chunk successfully. 5756 */ 5757 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5758 ret = false; 5759 end: 5760 free_extent_map(em); 5761 return ret; 5762 } 5763 5764 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5765 { 5766 struct extent_map *em; 5767 5768 while (1) { 5769 write_lock(&tree->lock); 5770 em = lookup_extent_mapping(tree, 0, (u64)-1); 5771 if (em) 5772 remove_extent_mapping(tree, em); 5773 write_unlock(&tree->lock); 5774 if (!em) 5775 break; 5776 /* once for us */ 5777 free_extent_map(em); 5778 /* once for the tree */ 5779 free_extent_map(em); 5780 } 5781 } 5782 5783 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5784 { 5785 struct extent_map *em; 5786 struct map_lookup *map; 5787 int ret; 5788 5789 em = btrfs_get_chunk_map(fs_info, logical, len); 5790 if (IS_ERR(em)) 5791 /* 5792 * We could return errors for these cases, but that could get 5793 * ugly and we'd probably do the same thing which is just not do 5794 * anything else and exit, so return 1 so the callers don't try 5795 * to use other copies. 5796 */ 5797 return 1; 5798 5799 map = em->map_lookup; 5800 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5801 ret = map->num_stripes; 5802 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5803 ret = map->sub_stripes; 5804 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5805 ret = 2; 5806 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5807 /* 5808 * There could be two corrupted data stripes, we need 5809 * to loop retry in order to rebuild the correct data. 5810 * 5811 * Fail a stripe at a time on every retry except the 5812 * stripe under reconstruction. 5813 */ 5814 ret = map->num_stripes; 5815 else 5816 ret = 1; 5817 free_extent_map(em); 5818 5819 down_read(&fs_info->dev_replace.rwsem); 5820 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5821 fs_info->dev_replace.tgtdev) 5822 ret++; 5823 up_read(&fs_info->dev_replace.rwsem); 5824 5825 return ret; 5826 } 5827 5828 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5829 u64 logical) 5830 { 5831 struct extent_map *em; 5832 struct map_lookup *map; 5833 unsigned long len = fs_info->sectorsize; 5834 5835 em = btrfs_get_chunk_map(fs_info, logical, len); 5836 5837 if (!WARN_ON(IS_ERR(em))) { 5838 map = em->map_lookup; 5839 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5840 len = map->stripe_len * nr_data_stripes(map); 5841 free_extent_map(em); 5842 } 5843 return len; 5844 } 5845 5846 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5847 { 5848 struct extent_map *em; 5849 struct map_lookup *map; 5850 int ret = 0; 5851 5852 em = btrfs_get_chunk_map(fs_info, logical, len); 5853 5854 if(!WARN_ON(IS_ERR(em))) { 5855 map = em->map_lookup; 5856 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5857 ret = 1; 5858 free_extent_map(em); 5859 } 5860 return ret; 5861 } 5862 5863 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5864 struct map_lookup *map, int first, 5865 int dev_replace_is_ongoing) 5866 { 5867 int i; 5868 int num_stripes; 5869 int preferred_mirror; 5870 int tolerance; 5871 struct btrfs_device *srcdev; 5872 5873 ASSERT((map->type & 5874 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5875 5876 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5877 num_stripes = map->sub_stripes; 5878 else 5879 num_stripes = map->num_stripes; 5880 5881 switch (fs_info->fs_devices->read_policy) { 5882 default: 5883 /* Shouldn't happen, just warn and use pid instead of failing */ 5884 btrfs_warn_rl(fs_info, 5885 "unknown read_policy type %u, reset to pid", 5886 fs_info->fs_devices->read_policy); 5887 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5888 fallthrough; 5889 case BTRFS_READ_POLICY_PID: 5890 preferred_mirror = first + (current->pid % num_stripes); 5891 break; 5892 } 5893 5894 if (dev_replace_is_ongoing && 5895 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5896 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5897 srcdev = fs_info->dev_replace.srcdev; 5898 else 5899 srcdev = NULL; 5900 5901 /* 5902 * try to avoid the drive that is the source drive for a 5903 * dev-replace procedure, only choose it if no other non-missing 5904 * mirror is available 5905 */ 5906 for (tolerance = 0; tolerance < 2; tolerance++) { 5907 if (map->stripes[preferred_mirror].dev->bdev && 5908 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5909 return preferred_mirror; 5910 for (i = first; i < first + num_stripes; i++) { 5911 if (map->stripes[i].dev->bdev && 5912 (tolerance || map->stripes[i].dev != srcdev)) 5913 return i; 5914 } 5915 } 5916 5917 /* we couldn't find one that doesn't fail. Just return something 5918 * and the io error handling code will clean up eventually 5919 */ 5920 return preferred_mirror; 5921 } 5922 5923 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5924 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5925 { 5926 int i; 5927 int again = 1; 5928 5929 while (again) { 5930 again = 0; 5931 for (i = 0; i < num_stripes - 1; i++) { 5932 /* Swap if parity is on a smaller index */ 5933 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5934 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5935 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5936 again = 1; 5937 } 5938 } 5939 } 5940 } 5941 5942 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5943 int total_stripes, 5944 int real_stripes) 5945 { 5946 struct btrfs_io_context *bioc = kzalloc( 5947 /* The size of btrfs_io_context */ 5948 sizeof(struct btrfs_io_context) + 5949 /* Plus the variable array for the stripes */ 5950 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5951 /* Plus the variable array for the tgt dev */ 5952 sizeof(int) * (real_stripes) + 5953 /* 5954 * Plus the raid_map, which includes both the tgt dev 5955 * and the stripes. 5956 */ 5957 sizeof(u64) * (total_stripes), 5958 GFP_NOFS|__GFP_NOFAIL); 5959 5960 atomic_set(&bioc->error, 0); 5961 refcount_set(&bioc->refs, 1); 5962 5963 bioc->fs_info = fs_info; 5964 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5965 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5966 5967 return bioc; 5968 } 5969 5970 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5971 { 5972 WARN_ON(!refcount_read(&bioc->refs)); 5973 refcount_inc(&bioc->refs); 5974 } 5975 5976 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5977 { 5978 if (!bioc) 5979 return; 5980 if (refcount_dec_and_test(&bioc->refs)) 5981 kfree(bioc); 5982 } 5983 5984 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5985 /* 5986 * Please note that, discard won't be sent to target device of device 5987 * replace. 5988 */ 5989 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5990 u64 logical, u64 *length_ret, 5991 struct btrfs_io_context **bioc_ret) 5992 { 5993 struct extent_map *em; 5994 struct map_lookup *map; 5995 struct btrfs_io_context *bioc; 5996 u64 length = *length_ret; 5997 u64 offset; 5998 u64 stripe_nr; 5999 u64 stripe_nr_end; 6000 u64 stripe_end_offset; 6001 u64 stripe_cnt; 6002 u64 stripe_len; 6003 u64 stripe_offset; 6004 u64 num_stripes; 6005 u32 stripe_index; 6006 u32 factor = 0; 6007 u32 sub_stripes = 0; 6008 u64 stripes_per_dev = 0; 6009 u32 remaining_stripes = 0; 6010 u32 last_stripe = 0; 6011 int ret = 0; 6012 int i; 6013 6014 /* Discard always returns a bioc. */ 6015 ASSERT(bioc_ret); 6016 6017 em = btrfs_get_chunk_map(fs_info, logical, length); 6018 if (IS_ERR(em)) 6019 return PTR_ERR(em); 6020 6021 map = em->map_lookup; 6022 /* we don't discard raid56 yet */ 6023 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6024 ret = -EOPNOTSUPP; 6025 goto out; 6026 } 6027 6028 offset = logical - em->start; 6029 length = min_t(u64, em->start + em->len - logical, length); 6030 *length_ret = length; 6031 6032 stripe_len = map->stripe_len; 6033 /* 6034 * stripe_nr counts the total number of stripes we have to stride 6035 * to get to this block 6036 */ 6037 stripe_nr = div64_u64(offset, stripe_len); 6038 6039 /* stripe_offset is the offset of this block in its stripe */ 6040 stripe_offset = offset - stripe_nr * stripe_len; 6041 6042 stripe_nr_end = round_up(offset + length, map->stripe_len); 6043 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 6044 stripe_cnt = stripe_nr_end - stripe_nr; 6045 stripe_end_offset = stripe_nr_end * map->stripe_len - 6046 (offset + length); 6047 /* 6048 * after this, stripe_nr is the number of stripes on this 6049 * device we have to walk to find the data, and stripe_index is 6050 * the number of our device in the stripe array 6051 */ 6052 num_stripes = 1; 6053 stripe_index = 0; 6054 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6055 BTRFS_BLOCK_GROUP_RAID10)) { 6056 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6057 sub_stripes = 1; 6058 else 6059 sub_stripes = map->sub_stripes; 6060 6061 factor = map->num_stripes / sub_stripes; 6062 num_stripes = min_t(u64, map->num_stripes, 6063 sub_stripes * stripe_cnt); 6064 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6065 stripe_index *= sub_stripes; 6066 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6067 &remaining_stripes); 6068 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6069 last_stripe *= sub_stripes; 6070 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6071 BTRFS_BLOCK_GROUP_DUP)) { 6072 num_stripes = map->num_stripes; 6073 } else { 6074 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6075 &stripe_index); 6076 } 6077 6078 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6079 if (!bioc) { 6080 ret = -ENOMEM; 6081 goto out; 6082 } 6083 6084 for (i = 0; i < num_stripes; i++) { 6085 bioc->stripes[i].physical = 6086 map->stripes[stripe_index].physical + 6087 stripe_offset + stripe_nr * map->stripe_len; 6088 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6089 6090 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6091 BTRFS_BLOCK_GROUP_RAID10)) { 6092 bioc->stripes[i].length = stripes_per_dev * 6093 map->stripe_len; 6094 6095 if (i / sub_stripes < remaining_stripes) 6096 bioc->stripes[i].length += map->stripe_len; 6097 6098 /* 6099 * Special for the first stripe and 6100 * the last stripe: 6101 * 6102 * |-------|...|-------| 6103 * |----------| 6104 * off end_off 6105 */ 6106 if (i < sub_stripes) 6107 bioc->stripes[i].length -= stripe_offset; 6108 6109 if (stripe_index >= last_stripe && 6110 stripe_index <= (last_stripe + 6111 sub_stripes - 1)) 6112 bioc->stripes[i].length -= stripe_end_offset; 6113 6114 if (i == sub_stripes - 1) 6115 stripe_offset = 0; 6116 } else { 6117 bioc->stripes[i].length = length; 6118 } 6119 6120 stripe_index++; 6121 if (stripe_index == map->num_stripes) { 6122 stripe_index = 0; 6123 stripe_nr++; 6124 } 6125 } 6126 6127 *bioc_ret = bioc; 6128 bioc->map_type = map->type; 6129 bioc->num_stripes = num_stripes; 6130 out: 6131 free_extent_map(em); 6132 return ret; 6133 } 6134 6135 /* 6136 * In dev-replace case, for repair case (that's the only case where the mirror 6137 * is selected explicitly when calling btrfs_map_block), blocks left of the 6138 * left cursor can also be read from the target drive. 6139 * 6140 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6141 * array of stripes. 6142 * For READ, it also needs to be supported using the same mirror number. 6143 * 6144 * If the requested block is not left of the left cursor, EIO is returned. This 6145 * can happen because btrfs_num_copies() returns one more in the dev-replace 6146 * case. 6147 */ 6148 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6149 u64 logical, u64 length, 6150 u64 srcdev_devid, int *mirror_num, 6151 u64 *physical) 6152 { 6153 struct btrfs_io_context *bioc = NULL; 6154 int num_stripes; 6155 int index_srcdev = 0; 6156 int found = 0; 6157 u64 physical_of_found = 0; 6158 int i; 6159 int ret = 0; 6160 6161 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6162 logical, &length, &bioc, 0, 0); 6163 if (ret) { 6164 ASSERT(bioc == NULL); 6165 return ret; 6166 } 6167 6168 num_stripes = bioc->num_stripes; 6169 if (*mirror_num > num_stripes) { 6170 /* 6171 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6172 * that means that the requested area is not left of the left 6173 * cursor 6174 */ 6175 btrfs_put_bioc(bioc); 6176 return -EIO; 6177 } 6178 6179 /* 6180 * process the rest of the function using the mirror_num of the source 6181 * drive. Therefore look it up first. At the end, patch the device 6182 * pointer to the one of the target drive. 6183 */ 6184 for (i = 0; i < num_stripes; i++) { 6185 if (bioc->stripes[i].dev->devid != srcdev_devid) 6186 continue; 6187 6188 /* 6189 * In case of DUP, in order to keep it simple, only add the 6190 * mirror with the lowest physical address 6191 */ 6192 if (found && 6193 physical_of_found <= bioc->stripes[i].physical) 6194 continue; 6195 6196 index_srcdev = i; 6197 found = 1; 6198 physical_of_found = bioc->stripes[i].physical; 6199 } 6200 6201 btrfs_put_bioc(bioc); 6202 6203 ASSERT(found); 6204 if (!found) 6205 return -EIO; 6206 6207 *mirror_num = index_srcdev + 1; 6208 *physical = physical_of_found; 6209 return ret; 6210 } 6211 6212 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6213 { 6214 struct btrfs_block_group *cache; 6215 bool ret; 6216 6217 /* Non zoned filesystem does not use "to_copy" flag */ 6218 if (!btrfs_is_zoned(fs_info)) 6219 return false; 6220 6221 cache = btrfs_lookup_block_group(fs_info, logical); 6222 6223 spin_lock(&cache->lock); 6224 ret = cache->to_copy; 6225 spin_unlock(&cache->lock); 6226 6227 btrfs_put_block_group(cache); 6228 return ret; 6229 } 6230 6231 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6232 struct btrfs_io_context **bioc_ret, 6233 struct btrfs_dev_replace *dev_replace, 6234 u64 logical, 6235 int *num_stripes_ret, int *max_errors_ret) 6236 { 6237 struct btrfs_io_context *bioc = *bioc_ret; 6238 u64 srcdev_devid = dev_replace->srcdev->devid; 6239 int tgtdev_indexes = 0; 6240 int num_stripes = *num_stripes_ret; 6241 int max_errors = *max_errors_ret; 6242 int i; 6243 6244 if (op == BTRFS_MAP_WRITE) { 6245 int index_where_to_add; 6246 6247 /* 6248 * A block group which have "to_copy" set will eventually 6249 * copied by dev-replace process. We can avoid cloning IO here. 6250 */ 6251 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6252 return; 6253 6254 /* 6255 * duplicate the write operations while the dev replace 6256 * procedure is running. Since the copying of the old disk to 6257 * the new disk takes place at run time while the filesystem is 6258 * mounted writable, the regular write operations to the old 6259 * disk have to be duplicated to go to the new disk as well. 6260 * 6261 * Note that device->missing is handled by the caller, and that 6262 * the write to the old disk is already set up in the stripes 6263 * array. 6264 */ 6265 index_where_to_add = num_stripes; 6266 for (i = 0; i < num_stripes; i++) { 6267 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6268 /* write to new disk, too */ 6269 struct btrfs_io_stripe *new = 6270 bioc->stripes + index_where_to_add; 6271 struct btrfs_io_stripe *old = 6272 bioc->stripes + i; 6273 6274 new->physical = old->physical; 6275 new->length = old->length; 6276 new->dev = dev_replace->tgtdev; 6277 bioc->tgtdev_map[i] = index_where_to_add; 6278 index_where_to_add++; 6279 max_errors++; 6280 tgtdev_indexes++; 6281 } 6282 } 6283 num_stripes = index_where_to_add; 6284 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6285 int index_srcdev = 0; 6286 int found = 0; 6287 u64 physical_of_found = 0; 6288 6289 /* 6290 * During the dev-replace procedure, the target drive can also 6291 * be used to read data in case it is needed to repair a corrupt 6292 * block elsewhere. This is possible if the requested area is 6293 * left of the left cursor. In this area, the target drive is a 6294 * full copy of the source drive. 6295 */ 6296 for (i = 0; i < num_stripes; i++) { 6297 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6298 /* 6299 * In case of DUP, in order to keep it simple, 6300 * only add the mirror with the lowest physical 6301 * address 6302 */ 6303 if (found && 6304 physical_of_found <= bioc->stripes[i].physical) 6305 continue; 6306 index_srcdev = i; 6307 found = 1; 6308 physical_of_found = bioc->stripes[i].physical; 6309 } 6310 } 6311 if (found) { 6312 struct btrfs_io_stripe *tgtdev_stripe = 6313 bioc->stripes + num_stripes; 6314 6315 tgtdev_stripe->physical = physical_of_found; 6316 tgtdev_stripe->length = 6317 bioc->stripes[index_srcdev].length; 6318 tgtdev_stripe->dev = dev_replace->tgtdev; 6319 bioc->tgtdev_map[index_srcdev] = num_stripes; 6320 6321 tgtdev_indexes++; 6322 num_stripes++; 6323 } 6324 } 6325 6326 *num_stripes_ret = num_stripes; 6327 *max_errors_ret = max_errors; 6328 bioc->num_tgtdevs = tgtdev_indexes; 6329 *bioc_ret = bioc; 6330 } 6331 6332 static bool need_full_stripe(enum btrfs_map_op op) 6333 { 6334 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6335 } 6336 6337 /* 6338 * Calculate the geometry of a particular (address, len) tuple. This 6339 * information is used to calculate how big a particular bio can get before it 6340 * straddles a stripe. 6341 * 6342 * @fs_info: the filesystem 6343 * @em: mapping containing the logical extent 6344 * @op: type of operation - write or read 6345 * @logical: address that we want to figure out the geometry of 6346 * @io_geom: pointer used to return values 6347 * 6348 * Returns < 0 in case a chunk for the given logical address cannot be found, 6349 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6350 */ 6351 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6352 enum btrfs_map_op op, u64 logical, 6353 struct btrfs_io_geometry *io_geom) 6354 { 6355 struct map_lookup *map; 6356 u64 len; 6357 u64 offset; 6358 u64 stripe_offset; 6359 u64 stripe_nr; 6360 u64 stripe_len; 6361 u64 raid56_full_stripe_start = (u64)-1; 6362 int data_stripes; 6363 6364 ASSERT(op != BTRFS_MAP_DISCARD); 6365 6366 map = em->map_lookup; 6367 /* Offset of this logical address in the chunk */ 6368 offset = logical - em->start; 6369 /* Len of a stripe in a chunk */ 6370 stripe_len = map->stripe_len; 6371 /* Stripe where this block falls in */ 6372 stripe_nr = div64_u64(offset, stripe_len); 6373 /* Offset of stripe in the chunk */ 6374 stripe_offset = stripe_nr * stripe_len; 6375 if (offset < stripe_offset) { 6376 btrfs_crit(fs_info, 6377 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6378 stripe_offset, offset, em->start, logical, stripe_len); 6379 return -EINVAL; 6380 } 6381 6382 /* stripe_offset is the offset of this block in its stripe */ 6383 stripe_offset = offset - stripe_offset; 6384 data_stripes = nr_data_stripes(map); 6385 6386 /* Only stripe based profiles needs to check against stripe length. */ 6387 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6388 u64 max_len = stripe_len - stripe_offset; 6389 6390 /* 6391 * In case of raid56, we need to know the stripe aligned start 6392 */ 6393 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6394 unsigned long full_stripe_len = stripe_len * data_stripes; 6395 raid56_full_stripe_start = offset; 6396 6397 /* 6398 * Allow a write of a full stripe, but make sure we 6399 * don't allow straddling of stripes 6400 */ 6401 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6402 full_stripe_len); 6403 raid56_full_stripe_start *= full_stripe_len; 6404 6405 /* 6406 * For writes to RAID[56], allow a full stripeset across 6407 * all disks. For other RAID types and for RAID[56] 6408 * reads, just allow a single stripe (on a single disk). 6409 */ 6410 if (op == BTRFS_MAP_WRITE) { 6411 max_len = stripe_len * data_stripes - 6412 (offset - raid56_full_stripe_start); 6413 } 6414 } 6415 len = min_t(u64, em->len - offset, max_len); 6416 } else { 6417 len = em->len - offset; 6418 } 6419 6420 io_geom->len = len; 6421 io_geom->offset = offset; 6422 io_geom->stripe_len = stripe_len; 6423 io_geom->stripe_nr = stripe_nr; 6424 io_geom->stripe_offset = stripe_offset; 6425 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6426 6427 return 0; 6428 } 6429 6430 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6431 enum btrfs_map_op op, 6432 u64 logical, u64 *length, 6433 struct btrfs_io_context **bioc_ret, 6434 int mirror_num, int need_raid_map) 6435 { 6436 struct extent_map *em; 6437 struct map_lookup *map; 6438 u64 stripe_offset; 6439 u64 stripe_nr; 6440 u64 stripe_len; 6441 u32 stripe_index; 6442 int data_stripes; 6443 int i; 6444 int ret = 0; 6445 int num_stripes; 6446 int max_errors = 0; 6447 int tgtdev_indexes = 0; 6448 struct btrfs_io_context *bioc = NULL; 6449 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6450 int dev_replace_is_ongoing = 0; 6451 int num_alloc_stripes; 6452 int patch_the_first_stripe_for_dev_replace = 0; 6453 u64 physical_to_patch_in_first_stripe = 0; 6454 u64 raid56_full_stripe_start = (u64)-1; 6455 struct btrfs_io_geometry geom; 6456 6457 ASSERT(bioc_ret); 6458 ASSERT(op != BTRFS_MAP_DISCARD); 6459 6460 em = btrfs_get_chunk_map(fs_info, logical, *length); 6461 ASSERT(!IS_ERR(em)); 6462 6463 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6464 if (ret < 0) 6465 return ret; 6466 6467 map = em->map_lookup; 6468 6469 *length = geom.len; 6470 stripe_len = geom.stripe_len; 6471 stripe_nr = geom.stripe_nr; 6472 stripe_offset = geom.stripe_offset; 6473 raid56_full_stripe_start = geom.raid56_stripe_offset; 6474 data_stripes = nr_data_stripes(map); 6475 6476 down_read(&dev_replace->rwsem); 6477 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6478 /* 6479 * Hold the semaphore for read during the whole operation, write is 6480 * requested at commit time but must wait. 6481 */ 6482 if (!dev_replace_is_ongoing) 6483 up_read(&dev_replace->rwsem); 6484 6485 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6486 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6487 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6488 dev_replace->srcdev->devid, 6489 &mirror_num, 6490 &physical_to_patch_in_first_stripe); 6491 if (ret) 6492 goto out; 6493 else 6494 patch_the_first_stripe_for_dev_replace = 1; 6495 } else if (mirror_num > map->num_stripes) { 6496 mirror_num = 0; 6497 } 6498 6499 num_stripes = 1; 6500 stripe_index = 0; 6501 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6502 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6503 &stripe_index); 6504 if (!need_full_stripe(op)) 6505 mirror_num = 1; 6506 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6507 if (need_full_stripe(op)) 6508 num_stripes = map->num_stripes; 6509 else if (mirror_num) 6510 stripe_index = mirror_num - 1; 6511 else { 6512 stripe_index = find_live_mirror(fs_info, map, 0, 6513 dev_replace_is_ongoing); 6514 mirror_num = stripe_index + 1; 6515 } 6516 6517 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6518 if (need_full_stripe(op)) { 6519 num_stripes = map->num_stripes; 6520 } else if (mirror_num) { 6521 stripe_index = mirror_num - 1; 6522 } else { 6523 mirror_num = 1; 6524 } 6525 6526 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6527 u32 factor = map->num_stripes / map->sub_stripes; 6528 6529 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6530 stripe_index *= map->sub_stripes; 6531 6532 if (need_full_stripe(op)) 6533 num_stripes = map->sub_stripes; 6534 else if (mirror_num) 6535 stripe_index += mirror_num - 1; 6536 else { 6537 int old_stripe_index = stripe_index; 6538 stripe_index = find_live_mirror(fs_info, map, 6539 stripe_index, 6540 dev_replace_is_ongoing); 6541 mirror_num = stripe_index - old_stripe_index + 1; 6542 } 6543 6544 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6545 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6546 /* push stripe_nr back to the start of the full stripe */ 6547 stripe_nr = div64_u64(raid56_full_stripe_start, 6548 stripe_len * data_stripes); 6549 6550 /* RAID[56] write or recovery. Return all stripes */ 6551 num_stripes = map->num_stripes; 6552 max_errors = nr_parity_stripes(map); 6553 6554 *length = map->stripe_len; 6555 stripe_index = 0; 6556 stripe_offset = 0; 6557 } else { 6558 /* 6559 * Mirror #0 or #1 means the original data block. 6560 * Mirror #2 is RAID5 parity block. 6561 * Mirror #3 is RAID6 Q block. 6562 */ 6563 stripe_nr = div_u64_rem(stripe_nr, 6564 data_stripes, &stripe_index); 6565 if (mirror_num > 1) 6566 stripe_index = data_stripes + mirror_num - 2; 6567 6568 /* We distribute the parity blocks across stripes */ 6569 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6570 &stripe_index); 6571 if (!need_full_stripe(op) && mirror_num <= 1) 6572 mirror_num = 1; 6573 } 6574 } else { 6575 /* 6576 * after this, stripe_nr is the number of stripes on this 6577 * device we have to walk to find the data, and stripe_index is 6578 * the number of our device in the stripe array 6579 */ 6580 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6581 &stripe_index); 6582 mirror_num = stripe_index + 1; 6583 } 6584 if (stripe_index >= map->num_stripes) { 6585 btrfs_crit(fs_info, 6586 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6587 stripe_index, map->num_stripes); 6588 ret = -EINVAL; 6589 goto out; 6590 } 6591 6592 num_alloc_stripes = num_stripes; 6593 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6594 if (op == BTRFS_MAP_WRITE) 6595 num_alloc_stripes <<= 1; 6596 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6597 num_alloc_stripes++; 6598 tgtdev_indexes = num_stripes; 6599 } 6600 6601 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6602 if (!bioc) { 6603 ret = -ENOMEM; 6604 goto out; 6605 } 6606 6607 for (i = 0; i < num_stripes; i++) { 6608 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6609 stripe_offset + stripe_nr * map->stripe_len; 6610 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6611 stripe_index++; 6612 } 6613 6614 /* Build raid_map */ 6615 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6616 (need_full_stripe(op) || mirror_num > 1)) { 6617 u64 tmp; 6618 unsigned rot; 6619 6620 /* Work out the disk rotation on this stripe-set */ 6621 div_u64_rem(stripe_nr, num_stripes, &rot); 6622 6623 /* Fill in the logical address of each stripe */ 6624 tmp = stripe_nr * data_stripes; 6625 for (i = 0; i < data_stripes; i++) 6626 bioc->raid_map[(i + rot) % num_stripes] = 6627 em->start + (tmp + i) * map->stripe_len; 6628 6629 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6630 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6631 bioc->raid_map[(i + rot + 1) % num_stripes] = 6632 RAID6_Q_STRIPE; 6633 6634 sort_parity_stripes(bioc, num_stripes); 6635 } 6636 6637 if (need_full_stripe(op)) 6638 max_errors = btrfs_chunk_max_errors(map); 6639 6640 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6641 need_full_stripe(op)) { 6642 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6643 &num_stripes, &max_errors); 6644 } 6645 6646 *bioc_ret = bioc; 6647 bioc->map_type = map->type; 6648 bioc->num_stripes = num_stripes; 6649 bioc->max_errors = max_errors; 6650 bioc->mirror_num = mirror_num; 6651 6652 /* 6653 * this is the case that REQ_READ && dev_replace_is_ongoing && 6654 * mirror_num == num_stripes + 1 && dev_replace target drive is 6655 * available as a mirror 6656 */ 6657 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6658 WARN_ON(num_stripes > 1); 6659 bioc->stripes[0].dev = dev_replace->tgtdev; 6660 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6661 bioc->mirror_num = map->num_stripes + 1; 6662 } 6663 out: 6664 if (dev_replace_is_ongoing) { 6665 lockdep_assert_held(&dev_replace->rwsem); 6666 /* Unlock and let waiting writers proceed */ 6667 up_read(&dev_replace->rwsem); 6668 } 6669 free_extent_map(em); 6670 return ret; 6671 } 6672 6673 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6674 u64 logical, u64 *length, 6675 struct btrfs_io_context **bioc_ret, int mirror_num) 6676 { 6677 if (op == BTRFS_MAP_DISCARD) 6678 return __btrfs_map_block_for_discard(fs_info, logical, 6679 length, bioc_ret); 6680 6681 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6682 mirror_num, 0); 6683 } 6684 6685 /* For Scrub/replace */ 6686 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6687 u64 logical, u64 *length, 6688 struct btrfs_io_context **bioc_ret) 6689 { 6690 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6691 } 6692 6693 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6694 { 6695 bio->bi_private = bioc->private; 6696 bio->bi_end_io = bioc->end_io; 6697 bio_endio(bio); 6698 6699 btrfs_put_bioc(bioc); 6700 } 6701 6702 static void btrfs_end_bio(struct bio *bio) 6703 { 6704 struct btrfs_io_context *bioc = bio->bi_private; 6705 int is_orig_bio = 0; 6706 6707 if (bio->bi_status) { 6708 atomic_inc(&bioc->error); 6709 if (bio->bi_status == BLK_STS_IOERR || 6710 bio->bi_status == BLK_STS_TARGET) { 6711 struct btrfs_device *dev = btrfs_bio(bio)->device; 6712 6713 ASSERT(dev->bdev); 6714 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6715 btrfs_dev_stat_inc_and_print(dev, 6716 BTRFS_DEV_STAT_WRITE_ERRS); 6717 else if (!(bio->bi_opf & REQ_RAHEAD)) 6718 btrfs_dev_stat_inc_and_print(dev, 6719 BTRFS_DEV_STAT_READ_ERRS); 6720 if (bio->bi_opf & REQ_PREFLUSH) 6721 btrfs_dev_stat_inc_and_print(dev, 6722 BTRFS_DEV_STAT_FLUSH_ERRS); 6723 } 6724 } 6725 6726 if (bio == bioc->orig_bio) 6727 is_orig_bio = 1; 6728 6729 btrfs_bio_counter_dec(bioc->fs_info); 6730 6731 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6732 if (!is_orig_bio) { 6733 bio_put(bio); 6734 bio = bioc->orig_bio; 6735 } 6736 6737 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6738 /* only send an error to the higher layers if it is 6739 * beyond the tolerance of the btrfs bio 6740 */ 6741 if (atomic_read(&bioc->error) > bioc->max_errors) { 6742 bio->bi_status = BLK_STS_IOERR; 6743 } else { 6744 /* 6745 * this bio is actually up to date, we didn't 6746 * go over the max number of errors 6747 */ 6748 bio->bi_status = BLK_STS_OK; 6749 } 6750 6751 btrfs_end_bioc(bioc, bio); 6752 } else if (!is_orig_bio) { 6753 bio_put(bio); 6754 } 6755 } 6756 6757 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6758 u64 physical, struct btrfs_device *dev) 6759 { 6760 struct btrfs_fs_info *fs_info = bioc->fs_info; 6761 6762 bio->bi_private = bioc; 6763 btrfs_bio(bio)->device = dev; 6764 bio->bi_end_io = btrfs_end_bio; 6765 bio->bi_iter.bi_sector = physical >> 9; 6766 /* 6767 * For zone append writing, bi_sector must point the beginning of the 6768 * zone 6769 */ 6770 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6771 if (btrfs_dev_is_sequential(dev, physical)) { 6772 u64 zone_start = round_down(physical, fs_info->zone_size); 6773 6774 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6775 } else { 6776 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6777 bio->bi_opf |= REQ_OP_WRITE; 6778 } 6779 } 6780 btrfs_debug_in_rcu(fs_info, 6781 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6782 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6783 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6784 dev->devid, bio->bi_iter.bi_size); 6785 bio_set_dev(bio, dev->bdev); 6786 6787 btrfs_bio_counter_inc_noblocked(fs_info); 6788 6789 btrfsic_submit_bio(bio); 6790 } 6791 6792 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6793 { 6794 atomic_inc(&bioc->error); 6795 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6796 /* Should be the original bio. */ 6797 WARN_ON(bio != bioc->orig_bio); 6798 6799 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6800 bio->bi_iter.bi_sector = logical >> 9; 6801 if (atomic_read(&bioc->error) > bioc->max_errors) 6802 bio->bi_status = BLK_STS_IOERR; 6803 else 6804 bio->bi_status = BLK_STS_OK; 6805 btrfs_end_bioc(bioc, bio); 6806 } 6807 } 6808 6809 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6810 int mirror_num) 6811 { 6812 struct btrfs_device *dev; 6813 struct bio *first_bio = bio; 6814 u64 logical = bio->bi_iter.bi_sector << 9; 6815 u64 length = 0; 6816 u64 map_length; 6817 int ret; 6818 int dev_nr; 6819 int total_devs; 6820 struct btrfs_io_context *bioc = NULL; 6821 6822 length = bio->bi_iter.bi_size; 6823 map_length = length; 6824 6825 btrfs_bio_counter_inc_blocked(fs_info); 6826 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6827 &map_length, &bioc, mirror_num, 1); 6828 if (ret) { 6829 btrfs_bio_counter_dec(fs_info); 6830 return errno_to_blk_status(ret); 6831 } 6832 6833 total_devs = bioc->num_stripes; 6834 bioc->orig_bio = first_bio; 6835 bioc->private = first_bio->bi_private; 6836 bioc->end_io = first_bio->bi_end_io; 6837 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6838 6839 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6840 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6841 /* In this case, map_length has been set to the length of 6842 a single stripe; not the whole write */ 6843 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6844 ret = raid56_parity_write(bio, bioc, map_length); 6845 } else { 6846 ret = raid56_parity_recover(bio, bioc, map_length, 6847 mirror_num, 1); 6848 } 6849 6850 btrfs_bio_counter_dec(fs_info); 6851 return errno_to_blk_status(ret); 6852 } 6853 6854 if (map_length < length) { 6855 btrfs_crit(fs_info, 6856 "mapping failed logical %llu bio len %llu len %llu", 6857 logical, length, map_length); 6858 BUG(); 6859 } 6860 6861 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6862 dev = bioc->stripes[dev_nr].dev; 6863 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6864 &dev->dev_state) || 6865 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6866 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6867 bioc_error(bioc, first_bio, logical); 6868 continue; 6869 } 6870 6871 if (dev_nr < total_devs - 1) 6872 bio = btrfs_bio_clone(first_bio); 6873 else 6874 bio = first_bio; 6875 6876 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6877 } 6878 btrfs_bio_counter_dec(fs_info); 6879 return BLK_STS_OK; 6880 } 6881 6882 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6883 const struct btrfs_fs_devices *fs_devices) 6884 { 6885 if (args->fsid == NULL) 6886 return true; 6887 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6888 return true; 6889 return false; 6890 } 6891 6892 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6893 const struct btrfs_device *device) 6894 { 6895 ASSERT((args->devid != (u64)-1) || args->missing); 6896 6897 if ((args->devid != (u64)-1) && device->devid != args->devid) 6898 return false; 6899 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6900 return false; 6901 if (!args->missing) 6902 return true; 6903 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6904 !device->bdev) 6905 return true; 6906 return false; 6907 } 6908 6909 /* 6910 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6911 * return NULL. 6912 * 6913 * If devid and uuid are both specified, the match must be exact, otherwise 6914 * only devid is used. 6915 */ 6916 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6917 const struct btrfs_dev_lookup_args *args) 6918 { 6919 struct btrfs_device *device; 6920 struct btrfs_fs_devices *seed_devs; 6921 6922 if (dev_args_match_fs_devices(args, fs_devices)) { 6923 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6924 if (dev_args_match_device(args, device)) 6925 return device; 6926 } 6927 } 6928 6929 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6930 if (!dev_args_match_fs_devices(args, seed_devs)) 6931 continue; 6932 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6933 if (dev_args_match_device(args, device)) 6934 return device; 6935 } 6936 } 6937 6938 return NULL; 6939 } 6940 6941 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6942 u64 devid, u8 *dev_uuid) 6943 { 6944 struct btrfs_device *device; 6945 unsigned int nofs_flag; 6946 6947 /* 6948 * We call this under the chunk_mutex, so we want to use NOFS for this 6949 * allocation, however we don't want to change btrfs_alloc_device() to 6950 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6951 * places. 6952 */ 6953 nofs_flag = memalloc_nofs_save(); 6954 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6955 memalloc_nofs_restore(nofs_flag); 6956 if (IS_ERR(device)) 6957 return device; 6958 6959 list_add(&device->dev_list, &fs_devices->devices); 6960 device->fs_devices = fs_devices; 6961 fs_devices->num_devices++; 6962 6963 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6964 fs_devices->missing_devices++; 6965 6966 return device; 6967 } 6968 6969 /** 6970 * btrfs_alloc_device - allocate struct btrfs_device 6971 * @fs_info: used only for generating a new devid, can be NULL if 6972 * devid is provided (i.e. @devid != NULL). 6973 * @devid: a pointer to devid for this device. If NULL a new devid 6974 * is generated. 6975 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6976 * is generated. 6977 * 6978 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6979 * on error. Returned struct is not linked onto any lists and must be 6980 * destroyed with btrfs_free_device. 6981 */ 6982 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6983 const u64 *devid, 6984 const u8 *uuid) 6985 { 6986 struct btrfs_device *dev; 6987 u64 tmp; 6988 6989 if (WARN_ON(!devid && !fs_info)) 6990 return ERR_PTR(-EINVAL); 6991 6992 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6993 if (!dev) 6994 return ERR_PTR(-ENOMEM); 6995 6996 /* 6997 * Preallocate a bio that's always going to be used for flushing device 6998 * barriers and matches the device lifespan 6999 */ 7000 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 7001 if (!dev->flush_bio) { 7002 kfree(dev); 7003 return ERR_PTR(-ENOMEM); 7004 } 7005 7006 INIT_LIST_HEAD(&dev->dev_list); 7007 INIT_LIST_HEAD(&dev->dev_alloc_list); 7008 INIT_LIST_HEAD(&dev->post_commit_list); 7009 7010 atomic_set(&dev->dev_stats_ccnt, 0); 7011 btrfs_device_data_ordered_init(dev); 7012 extent_io_tree_init(fs_info, &dev->alloc_state, 7013 IO_TREE_DEVICE_ALLOC_STATE, NULL); 7014 7015 if (devid) 7016 tmp = *devid; 7017 else { 7018 int ret; 7019 7020 ret = find_next_devid(fs_info, &tmp); 7021 if (ret) { 7022 btrfs_free_device(dev); 7023 return ERR_PTR(ret); 7024 } 7025 } 7026 dev->devid = tmp; 7027 7028 if (uuid) 7029 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 7030 else 7031 generate_random_uuid(dev->uuid); 7032 7033 return dev; 7034 } 7035 7036 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 7037 u64 devid, u8 *uuid, bool error) 7038 { 7039 if (error) 7040 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 7041 devid, uuid); 7042 else 7043 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 7044 devid, uuid); 7045 } 7046 7047 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 7048 { 7049 const int data_stripes = calc_data_stripes(type, num_stripes); 7050 7051 return div_u64(chunk_len, data_stripes); 7052 } 7053 7054 #if BITS_PER_LONG == 32 7055 /* 7056 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7057 * can't be accessed on 32bit systems. 7058 * 7059 * This function do mount time check to reject the fs if it already has 7060 * metadata chunk beyond that limit. 7061 */ 7062 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7063 u64 logical, u64 length, u64 type) 7064 { 7065 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7066 return 0; 7067 7068 if (logical + length < MAX_LFS_FILESIZE) 7069 return 0; 7070 7071 btrfs_err_32bit_limit(fs_info); 7072 return -EOVERFLOW; 7073 } 7074 7075 /* 7076 * This is to give early warning for any metadata chunk reaching 7077 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7078 * Although we can still access the metadata, it's not going to be possible 7079 * once the limit is reached. 7080 */ 7081 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7082 u64 logical, u64 length, u64 type) 7083 { 7084 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7085 return; 7086 7087 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7088 return; 7089 7090 btrfs_warn_32bit_limit(fs_info); 7091 } 7092 #endif 7093 7094 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 7095 u64 devid, u8 *uuid) 7096 { 7097 struct btrfs_device *dev; 7098 7099 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7100 btrfs_report_missing_device(fs_info, devid, uuid, true); 7101 return ERR_PTR(-ENOENT); 7102 } 7103 7104 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7105 if (IS_ERR(dev)) { 7106 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7107 devid, PTR_ERR(dev)); 7108 return dev; 7109 } 7110 btrfs_report_missing_device(fs_info, devid, uuid, false); 7111 7112 return dev; 7113 } 7114 7115 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7116 struct btrfs_chunk *chunk) 7117 { 7118 BTRFS_DEV_LOOKUP_ARGS(args); 7119 struct btrfs_fs_info *fs_info = leaf->fs_info; 7120 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7121 struct map_lookup *map; 7122 struct extent_map *em; 7123 u64 logical; 7124 u64 length; 7125 u64 devid; 7126 u64 type; 7127 u8 uuid[BTRFS_UUID_SIZE]; 7128 int num_stripes; 7129 int ret; 7130 int i; 7131 7132 logical = key->offset; 7133 length = btrfs_chunk_length(leaf, chunk); 7134 type = btrfs_chunk_type(leaf, chunk); 7135 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7136 7137 #if BITS_PER_LONG == 32 7138 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7139 if (ret < 0) 7140 return ret; 7141 warn_32bit_meta_chunk(fs_info, logical, length, type); 7142 #endif 7143 7144 /* 7145 * Only need to verify chunk item if we're reading from sys chunk array, 7146 * as chunk item in tree block is already verified by tree-checker. 7147 */ 7148 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7149 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7150 if (ret) 7151 return ret; 7152 } 7153 7154 read_lock(&map_tree->lock); 7155 em = lookup_extent_mapping(map_tree, logical, 1); 7156 read_unlock(&map_tree->lock); 7157 7158 /* already mapped? */ 7159 if (em && em->start <= logical && em->start + em->len > logical) { 7160 free_extent_map(em); 7161 return 0; 7162 } else if (em) { 7163 free_extent_map(em); 7164 } 7165 7166 em = alloc_extent_map(); 7167 if (!em) 7168 return -ENOMEM; 7169 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7170 if (!map) { 7171 free_extent_map(em); 7172 return -ENOMEM; 7173 } 7174 7175 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7176 em->map_lookup = map; 7177 em->start = logical; 7178 em->len = length; 7179 em->orig_start = 0; 7180 em->block_start = 0; 7181 em->block_len = em->len; 7182 7183 map->num_stripes = num_stripes; 7184 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7185 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7186 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7187 map->type = type; 7188 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7189 map->verified_stripes = 0; 7190 em->orig_block_len = calc_stripe_length(type, em->len, 7191 map->num_stripes); 7192 for (i = 0; i < num_stripes; i++) { 7193 map->stripes[i].physical = 7194 btrfs_stripe_offset_nr(leaf, chunk, i); 7195 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7196 args.devid = devid; 7197 read_extent_buffer(leaf, uuid, (unsigned long) 7198 btrfs_stripe_dev_uuid_nr(chunk, i), 7199 BTRFS_UUID_SIZE); 7200 args.uuid = uuid; 7201 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7202 if (!map->stripes[i].dev) { 7203 map->stripes[i].dev = handle_missing_device(fs_info, 7204 devid, uuid); 7205 if (IS_ERR(map->stripes[i].dev)) { 7206 free_extent_map(em); 7207 return PTR_ERR(map->stripes[i].dev); 7208 } 7209 } 7210 7211 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7212 &(map->stripes[i].dev->dev_state)); 7213 } 7214 7215 write_lock(&map_tree->lock); 7216 ret = add_extent_mapping(map_tree, em, 0); 7217 write_unlock(&map_tree->lock); 7218 if (ret < 0) { 7219 btrfs_err(fs_info, 7220 "failed to add chunk map, start=%llu len=%llu: %d", 7221 em->start, em->len, ret); 7222 } 7223 free_extent_map(em); 7224 7225 return ret; 7226 } 7227 7228 static void fill_device_from_item(struct extent_buffer *leaf, 7229 struct btrfs_dev_item *dev_item, 7230 struct btrfs_device *device) 7231 { 7232 unsigned long ptr; 7233 7234 device->devid = btrfs_device_id(leaf, dev_item); 7235 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7236 device->total_bytes = device->disk_total_bytes; 7237 device->commit_total_bytes = device->disk_total_bytes; 7238 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7239 device->commit_bytes_used = device->bytes_used; 7240 device->type = btrfs_device_type(leaf, dev_item); 7241 device->io_align = btrfs_device_io_align(leaf, dev_item); 7242 device->io_width = btrfs_device_io_width(leaf, dev_item); 7243 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7244 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7245 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7246 7247 ptr = btrfs_device_uuid(dev_item); 7248 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7249 } 7250 7251 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7252 u8 *fsid) 7253 { 7254 struct btrfs_fs_devices *fs_devices; 7255 int ret; 7256 7257 lockdep_assert_held(&uuid_mutex); 7258 ASSERT(fsid); 7259 7260 /* This will match only for multi-device seed fs */ 7261 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7262 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7263 return fs_devices; 7264 7265 7266 fs_devices = find_fsid(fsid, NULL); 7267 if (!fs_devices) { 7268 if (!btrfs_test_opt(fs_info, DEGRADED)) 7269 return ERR_PTR(-ENOENT); 7270 7271 fs_devices = alloc_fs_devices(fsid, NULL); 7272 if (IS_ERR(fs_devices)) 7273 return fs_devices; 7274 7275 fs_devices->seeding = true; 7276 fs_devices->opened = 1; 7277 return fs_devices; 7278 } 7279 7280 /* 7281 * Upon first call for a seed fs fsid, just create a private copy of the 7282 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7283 */ 7284 fs_devices = clone_fs_devices(fs_devices); 7285 if (IS_ERR(fs_devices)) 7286 return fs_devices; 7287 7288 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7289 if (ret) { 7290 free_fs_devices(fs_devices); 7291 return ERR_PTR(ret); 7292 } 7293 7294 if (!fs_devices->seeding) { 7295 close_fs_devices(fs_devices); 7296 free_fs_devices(fs_devices); 7297 return ERR_PTR(-EINVAL); 7298 } 7299 7300 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7301 7302 return fs_devices; 7303 } 7304 7305 static int read_one_dev(struct extent_buffer *leaf, 7306 struct btrfs_dev_item *dev_item) 7307 { 7308 BTRFS_DEV_LOOKUP_ARGS(args); 7309 struct btrfs_fs_info *fs_info = leaf->fs_info; 7310 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7311 struct btrfs_device *device; 7312 u64 devid; 7313 int ret; 7314 u8 fs_uuid[BTRFS_FSID_SIZE]; 7315 u8 dev_uuid[BTRFS_UUID_SIZE]; 7316 7317 devid = args.devid = btrfs_device_id(leaf, dev_item); 7318 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7319 BTRFS_UUID_SIZE); 7320 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7321 BTRFS_FSID_SIZE); 7322 args.uuid = dev_uuid; 7323 args.fsid = fs_uuid; 7324 7325 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7326 fs_devices = open_seed_devices(fs_info, fs_uuid); 7327 if (IS_ERR(fs_devices)) 7328 return PTR_ERR(fs_devices); 7329 } 7330 7331 device = btrfs_find_device(fs_info->fs_devices, &args); 7332 if (!device) { 7333 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7334 btrfs_report_missing_device(fs_info, devid, 7335 dev_uuid, true); 7336 return -ENOENT; 7337 } 7338 7339 device = add_missing_dev(fs_devices, devid, dev_uuid); 7340 if (IS_ERR(device)) { 7341 btrfs_err(fs_info, 7342 "failed to add missing dev %llu: %ld", 7343 devid, PTR_ERR(device)); 7344 return PTR_ERR(device); 7345 } 7346 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7347 } else { 7348 if (!device->bdev) { 7349 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7350 btrfs_report_missing_device(fs_info, 7351 devid, dev_uuid, true); 7352 return -ENOENT; 7353 } 7354 btrfs_report_missing_device(fs_info, devid, 7355 dev_uuid, false); 7356 } 7357 7358 if (!device->bdev && 7359 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7360 /* 7361 * this happens when a device that was properly setup 7362 * in the device info lists suddenly goes bad. 7363 * device->bdev is NULL, and so we have to set 7364 * device->missing to one here 7365 */ 7366 device->fs_devices->missing_devices++; 7367 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7368 } 7369 7370 /* Move the device to its own fs_devices */ 7371 if (device->fs_devices != fs_devices) { 7372 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7373 &device->dev_state)); 7374 7375 list_move(&device->dev_list, &fs_devices->devices); 7376 device->fs_devices->num_devices--; 7377 fs_devices->num_devices++; 7378 7379 device->fs_devices->missing_devices--; 7380 fs_devices->missing_devices++; 7381 7382 device->fs_devices = fs_devices; 7383 } 7384 } 7385 7386 if (device->fs_devices != fs_info->fs_devices) { 7387 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7388 if (device->generation != 7389 btrfs_device_generation(leaf, dev_item)) 7390 return -EINVAL; 7391 } 7392 7393 fill_device_from_item(leaf, dev_item, device); 7394 if (device->bdev) { 7395 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7396 7397 if (device->total_bytes > max_total_bytes) { 7398 btrfs_err(fs_info, 7399 "device total_bytes should be at most %llu but found %llu", 7400 max_total_bytes, device->total_bytes); 7401 return -EINVAL; 7402 } 7403 } 7404 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7405 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7406 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7407 device->fs_devices->total_rw_bytes += device->total_bytes; 7408 atomic64_add(device->total_bytes - device->bytes_used, 7409 &fs_info->free_chunk_space); 7410 } 7411 ret = 0; 7412 return ret; 7413 } 7414 7415 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7416 { 7417 struct btrfs_root *root = fs_info->tree_root; 7418 struct btrfs_super_block *super_copy = fs_info->super_copy; 7419 struct extent_buffer *sb; 7420 struct btrfs_disk_key *disk_key; 7421 struct btrfs_chunk *chunk; 7422 u8 *array_ptr; 7423 unsigned long sb_array_offset; 7424 int ret = 0; 7425 u32 num_stripes; 7426 u32 array_size; 7427 u32 len = 0; 7428 u32 cur_offset; 7429 u64 type; 7430 struct btrfs_key key; 7431 7432 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7433 /* 7434 * This will create extent buffer of nodesize, superblock size is 7435 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7436 * overallocate but we can keep it as-is, only the first page is used. 7437 */ 7438 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7439 root->root_key.objectid, 0); 7440 if (IS_ERR(sb)) 7441 return PTR_ERR(sb); 7442 set_extent_buffer_uptodate(sb); 7443 /* 7444 * The sb extent buffer is artificial and just used to read the system array. 7445 * set_extent_buffer_uptodate() call does not properly mark all it's 7446 * pages up-to-date when the page is larger: extent does not cover the 7447 * whole page and consequently check_page_uptodate does not find all 7448 * the page's extents up-to-date (the hole beyond sb), 7449 * write_extent_buffer then triggers a WARN_ON. 7450 * 7451 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7452 * but sb spans only this function. Add an explicit SetPageUptodate call 7453 * to silence the warning eg. on PowerPC 64. 7454 */ 7455 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7456 SetPageUptodate(sb->pages[0]); 7457 7458 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7459 array_size = btrfs_super_sys_array_size(super_copy); 7460 7461 array_ptr = super_copy->sys_chunk_array; 7462 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7463 cur_offset = 0; 7464 7465 while (cur_offset < array_size) { 7466 disk_key = (struct btrfs_disk_key *)array_ptr; 7467 len = sizeof(*disk_key); 7468 if (cur_offset + len > array_size) 7469 goto out_short_read; 7470 7471 btrfs_disk_key_to_cpu(&key, disk_key); 7472 7473 array_ptr += len; 7474 sb_array_offset += len; 7475 cur_offset += len; 7476 7477 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7478 btrfs_err(fs_info, 7479 "unexpected item type %u in sys_array at offset %u", 7480 (u32)key.type, cur_offset); 7481 ret = -EIO; 7482 break; 7483 } 7484 7485 chunk = (struct btrfs_chunk *)sb_array_offset; 7486 /* 7487 * At least one btrfs_chunk with one stripe must be present, 7488 * exact stripe count check comes afterwards 7489 */ 7490 len = btrfs_chunk_item_size(1); 7491 if (cur_offset + len > array_size) 7492 goto out_short_read; 7493 7494 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7495 if (!num_stripes) { 7496 btrfs_err(fs_info, 7497 "invalid number of stripes %u in sys_array at offset %u", 7498 num_stripes, cur_offset); 7499 ret = -EIO; 7500 break; 7501 } 7502 7503 type = btrfs_chunk_type(sb, chunk); 7504 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7505 btrfs_err(fs_info, 7506 "invalid chunk type %llu in sys_array at offset %u", 7507 type, cur_offset); 7508 ret = -EIO; 7509 break; 7510 } 7511 7512 len = btrfs_chunk_item_size(num_stripes); 7513 if (cur_offset + len > array_size) 7514 goto out_short_read; 7515 7516 ret = read_one_chunk(&key, sb, chunk); 7517 if (ret) 7518 break; 7519 7520 array_ptr += len; 7521 sb_array_offset += len; 7522 cur_offset += len; 7523 } 7524 clear_extent_buffer_uptodate(sb); 7525 free_extent_buffer_stale(sb); 7526 return ret; 7527 7528 out_short_read: 7529 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7530 len, cur_offset); 7531 clear_extent_buffer_uptodate(sb); 7532 free_extent_buffer_stale(sb); 7533 return -EIO; 7534 } 7535 7536 /* 7537 * Check if all chunks in the fs are OK for read-write degraded mount 7538 * 7539 * If the @failing_dev is specified, it's accounted as missing. 7540 * 7541 * Return true if all chunks meet the minimal RW mount requirements. 7542 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7543 */ 7544 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7545 struct btrfs_device *failing_dev) 7546 { 7547 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7548 struct extent_map *em; 7549 u64 next_start = 0; 7550 bool ret = true; 7551 7552 read_lock(&map_tree->lock); 7553 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7554 read_unlock(&map_tree->lock); 7555 /* No chunk at all? Return false anyway */ 7556 if (!em) { 7557 ret = false; 7558 goto out; 7559 } 7560 while (em) { 7561 struct map_lookup *map; 7562 int missing = 0; 7563 int max_tolerated; 7564 int i; 7565 7566 map = em->map_lookup; 7567 max_tolerated = 7568 btrfs_get_num_tolerated_disk_barrier_failures( 7569 map->type); 7570 for (i = 0; i < map->num_stripes; i++) { 7571 struct btrfs_device *dev = map->stripes[i].dev; 7572 7573 if (!dev || !dev->bdev || 7574 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7575 dev->last_flush_error) 7576 missing++; 7577 else if (failing_dev && failing_dev == dev) 7578 missing++; 7579 } 7580 if (missing > max_tolerated) { 7581 if (!failing_dev) 7582 btrfs_warn(fs_info, 7583 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7584 em->start, missing, max_tolerated); 7585 free_extent_map(em); 7586 ret = false; 7587 goto out; 7588 } 7589 next_start = extent_map_end(em); 7590 free_extent_map(em); 7591 7592 read_lock(&map_tree->lock); 7593 em = lookup_extent_mapping(map_tree, next_start, 7594 (u64)(-1) - next_start); 7595 read_unlock(&map_tree->lock); 7596 } 7597 out: 7598 return ret; 7599 } 7600 7601 static void readahead_tree_node_children(struct extent_buffer *node) 7602 { 7603 int i; 7604 const int nr_items = btrfs_header_nritems(node); 7605 7606 for (i = 0; i < nr_items; i++) 7607 btrfs_readahead_node_child(node, i); 7608 } 7609 7610 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7611 { 7612 struct btrfs_root *root = fs_info->chunk_root; 7613 struct btrfs_path *path; 7614 struct extent_buffer *leaf; 7615 struct btrfs_key key; 7616 struct btrfs_key found_key; 7617 int ret; 7618 int slot; 7619 u64 total_dev = 0; 7620 u64 last_ra_node = 0; 7621 7622 path = btrfs_alloc_path(); 7623 if (!path) 7624 return -ENOMEM; 7625 7626 /* 7627 * uuid_mutex is needed only if we are mounting a sprout FS 7628 * otherwise we don't need it. 7629 */ 7630 mutex_lock(&uuid_mutex); 7631 7632 /* 7633 * It is possible for mount and umount to race in such a way that 7634 * we execute this code path, but open_fs_devices failed to clear 7635 * total_rw_bytes. We certainly want it cleared before reading the 7636 * device items, so clear it here. 7637 */ 7638 fs_info->fs_devices->total_rw_bytes = 0; 7639 7640 /* 7641 * Lockdep complains about possible circular locking dependency between 7642 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7643 * used for freeze procection of a fs (struct super_block.s_writers), 7644 * which we take when starting a transaction, and extent buffers of the 7645 * chunk tree if we call read_one_dev() while holding a lock on an 7646 * extent buffer of the chunk tree. Since we are mounting the filesystem 7647 * and at this point there can't be any concurrent task modifying the 7648 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7649 */ 7650 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7651 path->skip_locking = 1; 7652 7653 /* 7654 * Read all device items, and then all the chunk items. All 7655 * device items are found before any chunk item (their object id 7656 * is smaller than the lowest possible object id for a chunk 7657 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7658 */ 7659 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7660 key.offset = 0; 7661 key.type = 0; 7662 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7663 if (ret < 0) 7664 goto error; 7665 while (1) { 7666 struct extent_buffer *node; 7667 7668 leaf = path->nodes[0]; 7669 slot = path->slots[0]; 7670 if (slot >= btrfs_header_nritems(leaf)) { 7671 ret = btrfs_next_leaf(root, path); 7672 if (ret == 0) 7673 continue; 7674 if (ret < 0) 7675 goto error; 7676 break; 7677 } 7678 node = path->nodes[1]; 7679 if (node) { 7680 if (last_ra_node != node->start) { 7681 readahead_tree_node_children(node); 7682 last_ra_node = node->start; 7683 } 7684 } 7685 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7686 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7687 struct btrfs_dev_item *dev_item; 7688 dev_item = btrfs_item_ptr(leaf, slot, 7689 struct btrfs_dev_item); 7690 ret = read_one_dev(leaf, dev_item); 7691 if (ret) 7692 goto error; 7693 total_dev++; 7694 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7695 struct btrfs_chunk *chunk; 7696 7697 /* 7698 * We are only called at mount time, so no need to take 7699 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7700 * we always lock first fs_info->chunk_mutex before 7701 * acquiring any locks on the chunk tree. This is a 7702 * requirement for chunk allocation, see the comment on 7703 * top of btrfs_chunk_alloc() for details. 7704 */ 7705 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7706 ret = read_one_chunk(&found_key, leaf, chunk); 7707 if (ret) 7708 goto error; 7709 } 7710 path->slots[0]++; 7711 } 7712 7713 /* 7714 * After loading chunk tree, we've got all device information, 7715 * do another round of validation checks. 7716 */ 7717 if (total_dev != fs_info->fs_devices->total_devices) { 7718 btrfs_err(fs_info, 7719 "super_num_devices %llu mismatch with num_devices %llu found here", 7720 btrfs_super_num_devices(fs_info->super_copy), 7721 total_dev); 7722 ret = -EINVAL; 7723 goto error; 7724 } 7725 if (btrfs_super_total_bytes(fs_info->super_copy) < 7726 fs_info->fs_devices->total_rw_bytes) { 7727 btrfs_err(fs_info, 7728 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7729 btrfs_super_total_bytes(fs_info->super_copy), 7730 fs_info->fs_devices->total_rw_bytes); 7731 ret = -EINVAL; 7732 goto error; 7733 } 7734 ret = 0; 7735 error: 7736 mutex_unlock(&uuid_mutex); 7737 7738 btrfs_free_path(path); 7739 return ret; 7740 } 7741 7742 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7743 { 7744 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7745 struct btrfs_device *device; 7746 7747 fs_devices->fs_info = fs_info; 7748 7749 mutex_lock(&fs_devices->device_list_mutex); 7750 list_for_each_entry(device, &fs_devices->devices, dev_list) 7751 device->fs_info = fs_info; 7752 7753 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7754 list_for_each_entry(device, &seed_devs->devices, dev_list) 7755 device->fs_info = fs_info; 7756 7757 seed_devs->fs_info = fs_info; 7758 } 7759 mutex_unlock(&fs_devices->device_list_mutex); 7760 } 7761 7762 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7763 const struct btrfs_dev_stats_item *ptr, 7764 int index) 7765 { 7766 u64 val; 7767 7768 read_extent_buffer(eb, &val, 7769 offsetof(struct btrfs_dev_stats_item, values) + 7770 ((unsigned long)ptr) + (index * sizeof(u64)), 7771 sizeof(val)); 7772 return val; 7773 } 7774 7775 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7776 struct btrfs_dev_stats_item *ptr, 7777 int index, u64 val) 7778 { 7779 write_extent_buffer(eb, &val, 7780 offsetof(struct btrfs_dev_stats_item, values) + 7781 ((unsigned long)ptr) + (index * sizeof(u64)), 7782 sizeof(val)); 7783 } 7784 7785 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7786 struct btrfs_path *path) 7787 { 7788 struct btrfs_dev_stats_item *ptr; 7789 struct extent_buffer *eb; 7790 struct btrfs_key key; 7791 int item_size; 7792 int i, ret, slot; 7793 7794 if (!device->fs_info->dev_root) 7795 return 0; 7796 7797 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7798 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7799 key.offset = device->devid; 7800 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7801 if (ret) { 7802 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7803 btrfs_dev_stat_set(device, i, 0); 7804 device->dev_stats_valid = 1; 7805 btrfs_release_path(path); 7806 return ret < 0 ? ret : 0; 7807 } 7808 slot = path->slots[0]; 7809 eb = path->nodes[0]; 7810 item_size = btrfs_item_size(eb, slot); 7811 7812 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7813 7814 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7815 if (item_size >= (1 + i) * sizeof(__le64)) 7816 btrfs_dev_stat_set(device, i, 7817 btrfs_dev_stats_value(eb, ptr, i)); 7818 else 7819 btrfs_dev_stat_set(device, i, 0); 7820 } 7821 7822 device->dev_stats_valid = 1; 7823 btrfs_dev_stat_print_on_load(device); 7824 btrfs_release_path(path); 7825 7826 return 0; 7827 } 7828 7829 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7830 { 7831 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7832 struct btrfs_device *device; 7833 struct btrfs_path *path = NULL; 7834 int ret = 0; 7835 7836 path = btrfs_alloc_path(); 7837 if (!path) 7838 return -ENOMEM; 7839 7840 mutex_lock(&fs_devices->device_list_mutex); 7841 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7842 ret = btrfs_device_init_dev_stats(device, path); 7843 if (ret) 7844 goto out; 7845 } 7846 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7847 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7848 ret = btrfs_device_init_dev_stats(device, path); 7849 if (ret) 7850 goto out; 7851 } 7852 } 7853 out: 7854 mutex_unlock(&fs_devices->device_list_mutex); 7855 7856 btrfs_free_path(path); 7857 return ret; 7858 } 7859 7860 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7861 struct btrfs_device *device) 7862 { 7863 struct btrfs_fs_info *fs_info = trans->fs_info; 7864 struct btrfs_root *dev_root = fs_info->dev_root; 7865 struct btrfs_path *path; 7866 struct btrfs_key key; 7867 struct extent_buffer *eb; 7868 struct btrfs_dev_stats_item *ptr; 7869 int ret; 7870 int i; 7871 7872 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7873 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7874 key.offset = device->devid; 7875 7876 path = btrfs_alloc_path(); 7877 if (!path) 7878 return -ENOMEM; 7879 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7880 if (ret < 0) { 7881 btrfs_warn_in_rcu(fs_info, 7882 "error %d while searching for dev_stats item for device %s", 7883 ret, rcu_str_deref(device->name)); 7884 goto out; 7885 } 7886 7887 if (ret == 0 && 7888 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7889 /* need to delete old one and insert a new one */ 7890 ret = btrfs_del_item(trans, dev_root, path); 7891 if (ret != 0) { 7892 btrfs_warn_in_rcu(fs_info, 7893 "delete too small dev_stats item for device %s failed %d", 7894 rcu_str_deref(device->name), ret); 7895 goto out; 7896 } 7897 ret = 1; 7898 } 7899 7900 if (ret == 1) { 7901 /* need to insert a new item */ 7902 btrfs_release_path(path); 7903 ret = btrfs_insert_empty_item(trans, dev_root, path, 7904 &key, sizeof(*ptr)); 7905 if (ret < 0) { 7906 btrfs_warn_in_rcu(fs_info, 7907 "insert dev_stats item for device %s failed %d", 7908 rcu_str_deref(device->name), ret); 7909 goto out; 7910 } 7911 } 7912 7913 eb = path->nodes[0]; 7914 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7915 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7916 btrfs_set_dev_stats_value(eb, ptr, i, 7917 btrfs_dev_stat_read(device, i)); 7918 btrfs_mark_buffer_dirty(eb); 7919 7920 out: 7921 btrfs_free_path(path); 7922 return ret; 7923 } 7924 7925 /* 7926 * called from commit_transaction. Writes all changed device stats to disk. 7927 */ 7928 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7929 { 7930 struct btrfs_fs_info *fs_info = trans->fs_info; 7931 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7932 struct btrfs_device *device; 7933 int stats_cnt; 7934 int ret = 0; 7935 7936 mutex_lock(&fs_devices->device_list_mutex); 7937 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7938 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7939 if (!device->dev_stats_valid || stats_cnt == 0) 7940 continue; 7941 7942 7943 /* 7944 * There is a LOAD-LOAD control dependency between the value of 7945 * dev_stats_ccnt and updating the on-disk values which requires 7946 * reading the in-memory counters. Such control dependencies 7947 * require explicit read memory barriers. 7948 * 7949 * This memory barriers pairs with smp_mb__before_atomic in 7950 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7951 * barrier implied by atomic_xchg in 7952 * btrfs_dev_stats_read_and_reset 7953 */ 7954 smp_rmb(); 7955 7956 ret = update_dev_stat_item(trans, device); 7957 if (!ret) 7958 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7959 } 7960 mutex_unlock(&fs_devices->device_list_mutex); 7961 7962 return ret; 7963 } 7964 7965 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7966 { 7967 btrfs_dev_stat_inc(dev, index); 7968 btrfs_dev_stat_print_on_error(dev); 7969 } 7970 7971 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7972 { 7973 if (!dev->dev_stats_valid) 7974 return; 7975 btrfs_err_rl_in_rcu(dev->fs_info, 7976 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7977 rcu_str_deref(dev->name), 7978 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7979 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7980 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7981 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7982 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7983 } 7984 7985 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7986 { 7987 int i; 7988 7989 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7990 if (btrfs_dev_stat_read(dev, i) != 0) 7991 break; 7992 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7993 return; /* all values == 0, suppress message */ 7994 7995 btrfs_info_in_rcu(dev->fs_info, 7996 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7997 rcu_str_deref(dev->name), 7998 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7999 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 8000 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 8001 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 8002 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 8003 } 8004 8005 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 8006 struct btrfs_ioctl_get_dev_stats *stats) 8007 { 8008 BTRFS_DEV_LOOKUP_ARGS(args); 8009 struct btrfs_device *dev; 8010 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 8011 int i; 8012 8013 mutex_lock(&fs_devices->device_list_mutex); 8014 args.devid = stats->devid; 8015 dev = btrfs_find_device(fs_info->fs_devices, &args); 8016 mutex_unlock(&fs_devices->device_list_mutex); 8017 8018 if (!dev) { 8019 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 8020 return -ENODEV; 8021 } else if (!dev->dev_stats_valid) { 8022 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 8023 return -ENODEV; 8024 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 8025 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 8026 if (stats->nr_items > i) 8027 stats->values[i] = 8028 btrfs_dev_stat_read_and_reset(dev, i); 8029 else 8030 btrfs_dev_stat_set(dev, i, 0); 8031 } 8032 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 8033 current->comm, task_pid_nr(current)); 8034 } else { 8035 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 8036 if (stats->nr_items > i) 8037 stats->values[i] = btrfs_dev_stat_read(dev, i); 8038 } 8039 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 8040 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 8041 return 0; 8042 } 8043 8044 /* 8045 * Update the size and bytes used for each device where it changed. This is 8046 * delayed since we would otherwise get errors while writing out the 8047 * superblocks. 8048 * 8049 * Must be invoked during transaction commit. 8050 */ 8051 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 8052 { 8053 struct btrfs_device *curr, *next; 8054 8055 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 8056 8057 if (list_empty(&trans->dev_update_list)) 8058 return; 8059 8060 /* 8061 * We don't need the device_list_mutex here. This list is owned by the 8062 * transaction and the transaction must complete before the device is 8063 * released. 8064 */ 8065 mutex_lock(&trans->fs_info->chunk_mutex); 8066 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 8067 post_commit_list) { 8068 list_del_init(&curr->post_commit_list); 8069 curr->commit_total_bytes = curr->disk_total_bytes; 8070 curr->commit_bytes_used = curr->bytes_used; 8071 } 8072 mutex_unlock(&trans->fs_info->chunk_mutex); 8073 } 8074 8075 /* 8076 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 8077 */ 8078 int btrfs_bg_type_to_factor(u64 flags) 8079 { 8080 const int index = btrfs_bg_flags_to_raid_index(flags); 8081 8082 return btrfs_raid_array[index].ncopies; 8083 } 8084 8085 8086 8087 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 8088 u64 chunk_offset, u64 devid, 8089 u64 physical_offset, u64 physical_len) 8090 { 8091 struct btrfs_dev_lookup_args args = { .devid = devid }; 8092 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8093 struct extent_map *em; 8094 struct map_lookup *map; 8095 struct btrfs_device *dev; 8096 u64 stripe_len; 8097 bool found = false; 8098 int ret = 0; 8099 int i; 8100 8101 read_lock(&em_tree->lock); 8102 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8103 read_unlock(&em_tree->lock); 8104 8105 if (!em) { 8106 btrfs_err(fs_info, 8107 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8108 physical_offset, devid); 8109 ret = -EUCLEAN; 8110 goto out; 8111 } 8112 8113 map = em->map_lookup; 8114 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8115 if (physical_len != stripe_len) { 8116 btrfs_err(fs_info, 8117 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8118 physical_offset, devid, em->start, physical_len, 8119 stripe_len); 8120 ret = -EUCLEAN; 8121 goto out; 8122 } 8123 8124 for (i = 0; i < map->num_stripes; i++) { 8125 if (map->stripes[i].dev->devid == devid && 8126 map->stripes[i].physical == physical_offset) { 8127 found = true; 8128 if (map->verified_stripes >= map->num_stripes) { 8129 btrfs_err(fs_info, 8130 "too many dev extents for chunk %llu found", 8131 em->start); 8132 ret = -EUCLEAN; 8133 goto out; 8134 } 8135 map->verified_stripes++; 8136 break; 8137 } 8138 } 8139 if (!found) { 8140 btrfs_err(fs_info, 8141 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8142 physical_offset, devid); 8143 ret = -EUCLEAN; 8144 } 8145 8146 /* Make sure no dev extent is beyond device boundary */ 8147 dev = btrfs_find_device(fs_info->fs_devices, &args); 8148 if (!dev) { 8149 btrfs_err(fs_info, "failed to find devid %llu", devid); 8150 ret = -EUCLEAN; 8151 goto out; 8152 } 8153 8154 if (physical_offset + physical_len > dev->disk_total_bytes) { 8155 btrfs_err(fs_info, 8156 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8157 devid, physical_offset, physical_len, 8158 dev->disk_total_bytes); 8159 ret = -EUCLEAN; 8160 goto out; 8161 } 8162 8163 if (dev->zone_info) { 8164 u64 zone_size = dev->zone_info->zone_size; 8165 8166 if (!IS_ALIGNED(physical_offset, zone_size) || 8167 !IS_ALIGNED(physical_len, zone_size)) { 8168 btrfs_err(fs_info, 8169 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8170 devid, physical_offset, physical_len); 8171 ret = -EUCLEAN; 8172 goto out; 8173 } 8174 } 8175 8176 out: 8177 free_extent_map(em); 8178 return ret; 8179 } 8180 8181 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8182 { 8183 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8184 struct extent_map *em; 8185 struct rb_node *node; 8186 int ret = 0; 8187 8188 read_lock(&em_tree->lock); 8189 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8190 em = rb_entry(node, struct extent_map, rb_node); 8191 if (em->map_lookup->num_stripes != 8192 em->map_lookup->verified_stripes) { 8193 btrfs_err(fs_info, 8194 "chunk %llu has missing dev extent, have %d expect %d", 8195 em->start, em->map_lookup->verified_stripes, 8196 em->map_lookup->num_stripes); 8197 ret = -EUCLEAN; 8198 goto out; 8199 } 8200 } 8201 out: 8202 read_unlock(&em_tree->lock); 8203 return ret; 8204 } 8205 8206 /* 8207 * Ensure that all dev extents are mapped to correct chunk, otherwise 8208 * later chunk allocation/free would cause unexpected behavior. 8209 * 8210 * NOTE: This will iterate through the whole device tree, which should be of 8211 * the same size level as the chunk tree. This slightly increases mount time. 8212 */ 8213 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8214 { 8215 struct btrfs_path *path; 8216 struct btrfs_root *root = fs_info->dev_root; 8217 struct btrfs_key key; 8218 u64 prev_devid = 0; 8219 u64 prev_dev_ext_end = 0; 8220 int ret = 0; 8221 8222 /* 8223 * We don't have a dev_root because we mounted with ignorebadroots and 8224 * failed to load the root, so we want to skip the verification in this 8225 * case for sure. 8226 * 8227 * However if the dev root is fine, but the tree itself is corrupted 8228 * we'd still fail to mount. This verification is only to make sure 8229 * writes can happen safely, so instead just bypass this check 8230 * completely in the case of IGNOREBADROOTS. 8231 */ 8232 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8233 return 0; 8234 8235 key.objectid = 1; 8236 key.type = BTRFS_DEV_EXTENT_KEY; 8237 key.offset = 0; 8238 8239 path = btrfs_alloc_path(); 8240 if (!path) 8241 return -ENOMEM; 8242 8243 path->reada = READA_FORWARD; 8244 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8245 if (ret < 0) 8246 goto out; 8247 8248 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8249 ret = btrfs_next_leaf(root, path); 8250 if (ret < 0) 8251 goto out; 8252 /* No dev extents at all? Not good */ 8253 if (ret > 0) { 8254 ret = -EUCLEAN; 8255 goto out; 8256 } 8257 } 8258 while (1) { 8259 struct extent_buffer *leaf = path->nodes[0]; 8260 struct btrfs_dev_extent *dext; 8261 int slot = path->slots[0]; 8262 u64 chunk_offset; 8263 u64 physical_offset; 8264 u64 physical_len; 8265 u64 devid; 8266 8267 btrfs_item_key_to_cpu(leaf, &key, slot); 8268 if (key.type != BTRFS_DEV_EXTENT_KEY) 8269 break; 8270 devid = key.objectid; 8271 physical_offset = key.offset; 8272 8273 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8274 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8275 physical_len = btrfs_dev_extent_length(leaf, dext); 8276 8277 /* Check if this dev extent overlaps with the previous one */ 8278 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8279 btrfs_err(fs_info, 8280 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8281 devid, physical_offset, prev_dev_ext_end); 8282 ret = -EUCLEAN; 8283 goto out; 8284 } 8285 8286 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8287 physical_offset, physical_len); 8288 if (ret < 0) 8289 goto out; 8290 prev_devid = devid; 8291 prev_dev_ext_end = physical_offset + physical_len; 8292 8293 ret = btrfs_next_item(root, path); 8294 if (ret < 0) 8295 goto out; 8296 if (ret > 0) { 8297 ret = 0; 8298 break; 8299 } 8300 } 8301 8302 /* Ensure all chunks have corresponding dev extents */ 8303 ret = verify_chunk_dev_extent_mapping(fs_info); 8304 out: 8305 btrfs_free_path(path); 8306 return ret; 8307 } 8308 8309 /* 8310 * Check whether the given block group or device is pinned by any inode being 8311 * used as a swapfile. 8312 */ 8313 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8314 { 8315 struct btrfs_swapfile_pin *sp; 8316 struct rb_node *node; 8317 8318 spin_lock(&fs_info->swapfile_pins_lock); 8319 node = fs_info->swapfile_pins.rb_node; 8320 while (node) { 8321 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8322 if (ptr < sp->ptr) 8323 node = node->rb_left; 8324 else if (ptr > sp->ptr) 8325 node = node->rb_right; 8326 else 8327 break; 8328 } 8329 spin_unlock(&fs_info->swapfile_pins_lock); 8330 return node != NULL; 8331 } 8332 8333 static int relocating_repair_kthread(void *data) 8334 { 8335 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8336 struct btrfs_fs_info *fs_info = cache->fs_info; 8337 u64 target; 8338 int ret = 0; 8339 8340 target = cache->start; 8341 btrfs_put_block_group(cache); 8342 8343 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8344 btrfs_info(fs_info, 8345 "zoned: skip relocating block group %llu to repair: EBUSY", 8346 target); 8347 return -EBUSY; 8348 } 8349 8350 mutex_lock(&fs_info->reclaim_bgs_lock); 8351 8352 /* Ensure block group still exists */ 8353 cache = btrfs_lookup_block_group(fs_info, target); 8354 if (!cache) 8355 goto out; 8356 8357 if (!cache->relocating_repair) 8358 goto out; 8359 8360 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8361 if (ret < 0) 8362 goto out; 8363 8364 btrfs_info(fs_info, 8365 "zoned: relocating block group %llu to repair IO failure", 8366 target); 8367 ret = btrfs_relocate_chunk(fs_info, target); 8368 8369 out: 8370 if (cache) 8371 btrfs_put_block_group(cache); 8372 mutex_unlock(&fs_info->reclaim_bgs_lock); 8373 btrfs_exclop_finish(fs_info); 8374 8375 return ret; 8376 } 8377 8378 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8379 { 8380 struct btrfs_block_group *cache; 8381 8382 if (!btrfs_is_zoned(fs_info)) 8383 return false; 8384 8385 /* Do not attempt to repair in degraded state */ 8386 if (btrfs_test_opt(fs_info, DEGRADED)) 8387 return true; 8388 8389 cache = btrfs_lookup_block_group(fs_info, logical); 8390 if (!cache) 8391 return true; 8392 8393 spin_lock(&cache->lock); 8394 if (cache->relocating_repair) { 8395 spin_unlock(&cache->lock); 8396 btrfs_put_block_group(cache); 8397 return true; 8398 } 8399 cache->relocating_repair = 1; 8400 spin_unlock(&cache->lock); 8401 8402 kthread_run(relocating_repair_kthread, cache, 8403 "btrfs-relocating-repair"); 8404 8405 return true; 8406 } 8407