1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 38 BTRFS_BLOCK_GROUP_RAID10 | \ 39 BTRFS_BLOCK_GROUP_RAID56_MASK) 40 41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 42 [BTRFS_RAID_RAID10] = { 43 .sub_stripes = 2, 44 .dev_stripes = 1, 45 .devs_max = 0, /* 0 == as many as possible */ 46 .devs_min = 2, 47 .tolerated_failures = 1, 48 .devs_increment = 2, 49 .ncopies = 2, 50 .nparity = 0, 51 .raid_name = "raid10", 52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 .nparity = 0, 64 .raid_name = "raid1", 65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 67 }, 68 [BTRFS_RAID_RAID1C3] = { 69 .sub_stripes = 1, 70 .dev_stripes = 1, 71 .devs_max = 3, 72 .devs_min = 3, 73 .tolerated_failures = 2, 74 .devs_increment = 3, 75 .ncopies = 3, 76 .nparity = 0, 77 .raid_name = "raid1c3", 78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 80 }, 81 [BTRFS_RAID_RAID1C4] = { 82 .sub_stripes = 1, 83 .dev_stripes = 1, 84 .devs_max = 4, 85 .devs_min = 4, 86 .tolerated_failures = 3, 87 .devs_increment = 4, 88 .ncopies = 4, 89 .nparity = 0, 90 .raid_name = "raid1c4", 91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 93 }, 94 [BTRFS_RAID_DUP] = { 95 .sub_stripes = 1, 96 .dev_stripes = 2, 97 .devs_max = 1, 98 .devs_min = 1, 99 .tolerated_failures = 0, 100 .devs_increment = 1, 101 .ncopies = 2, 102 .nparity = 0, 103 .raid_name = "dup", 104 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 105 .mindev_error = 0, 106 }, 107 [BTRFS_RAID_RAID0] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 1, 112 .tolerated_failures = 0, 113 .devs_increment = 1, 114 .ncopies = 1, 115 .nparity = 0, 116 .raid_name = "raid0", 117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 118 .mindev_error = 0, 119 }, 120 [BTRFS_RAID_SINGLE] = { 121 .sub_stripes = 1, 122 .dev_stripes = 1, 123 .devs_max = 1, 124 .devs_min = 1, 125 .tolerated_failures = 0, 126 .devs_increment = 1, 127 .ncopies = 1, 128 .nparity = 0, 129 .raid_name = "single", 130 .bg_flag = 0, 131 .mindev_error = 0, 132 }, 133 [BTRFS_RAID_RAID5] = { 134 .sub_stripes = 1, 135 .dev_stripes = 1, 136 .devs_max = 0, 137 .devs_min = 2, 138 .tolerated_failures = 1, 139 .devs_increment = 1, 140 .ncopies = 1, 141 .nparity = 1, 142 .raid_name = "raid5", 143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 145 }, 146 [BTRFS_RAID_RAID6] = { 147 .sub_stripes = 1, 148 .dev_stripes = 1, 149 .devs_max = 0, 150 .devs_min = 3, 151 .tolerated_failures = 2, 152 .devs_increment = 1, 153 .ncopies = 1, 154 .nparity = 2, 155 .raid_name = "raid6", 156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 158 }, 159 }; 160 161 /* 162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 163 * can be used as index to access btrfs_raid_array[]. 164 */ 165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 166 { 167 if (flags & BTRFS_BLOCK_GROUP_RAID10) 168 return BTRFS_RAID_RAID10; 169 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 170 return BTRFS_RAID_RAID1; 171 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 172 return BTRFS_RAID_RAID1C3; 173 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 174 return BTRFS_RAID_RAID1C4; 175 else if (flags & BTRFS_BLOCK_GROUP_DUP) 176 return BTRFS_RAID_DUP; 177 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 178 return BTRFS_RAID_RAID0; 179 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 180 return BTRFS_RAID_RAID5; 181 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 182 return BTRFS_RAID_RAID6; 183 184 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 /* 198 * Fill @buf with textual description of @bg_flags, no more than @size_buf 199 * bytes including terminating null byte. 200 */ 201 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 202 { 203 int i; 204 int ret; 205 char *bp = buf; 206 u64 flags = bg_flags; 207 u32 size_bp = size_buf; 208 209 if (!flags) { 210 strcpy(bp, "NONE"); 211 return; 212 } 213 214 #define DESCRIBE_FLAG(flag, desc) \ 215 do { \ 216 if (flags & (flag)) { \ 217 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 218 if (ret < 0 || ret >= size_bp) \ 219 goto out_overflow; \ 220 size_bp -= ret; \ 221 bp += ret; \ 222 flags &= ~(flag); \ 223 } \ 224 } while (0) 225 226 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 227 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 228 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 229 230 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 231 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 232 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 233 btrfs_raid_array[i].raid_name); 234 #undef DESCRIBE_FLAG 235 236 if (flags) { 237 ret = snprintf(bp, size_bp, "0x%llx|", flags); 238 size_bp -= ret; 239 } 240 241 if (size_bp < size_buf) 242 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 243 244 /* 245 * The text is trimmed, it's up to the caller to provide sufficiently 246 * large buffer 247 */ 248 out_overflow:; 249 } 250 251 static int init_first_rw_device(struct btrfs_trans_handle *trans); 252 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 253 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 254 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 255 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 256 enum btrfs_map_op op, 257 u64 logical, u64 *length, 258 struct btrfs_io_context **bioc_ret, 259 int mirror_num, int need_raid_map); 260 261 /* 262 * Device locking 263 * ============== 264 * 265 * There are several mutexes that protect manipulation of devices and low-level 266 * structures like chunks but not block groups, extents or files 267 * 268 * uuid_mutex (global lock) 269 * ------------------------ 270 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 271 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 272 * device) or requested by the device= mount option 273 * 274 * the mutex can be very coarse and can cover long-running operations 275 * 276 * protects: updates to fs_devices counters like missing devices, rw devices, 277 * seeding, structure cloning, opening/closing devices at mount/umount time 278 * 279 * global::fs_devs - add, remove, updates to the global list 280 * 281 * does not protect: manipulation of the fs_devices::devices list in general 282 * but in mount context it could be used to exclude list modifications by eg. 283 * scan ioctl 284 * 285 * btrfs_device::name - renames (write side), read is RCU 286 * 287 * fs_devices::device_list_mutex (per-fs, with RCU) 288 * ------------------------------------------------ 289 * protects updates to fs_devices::devices, ie. adding and deleting 290 * 291 * simple list traversal with read-only actions can be done with RCU protection 292 * 293 * may be used to exclude some operations from running concurrently without any 294 * modifications to the list (see write_all_supers) 295 * 296 * Is not required at mount and close times, because our device list is 297 * protected by the uuid_mutex at that point. 298 * 299 * balance_mutex 300 * ------------- 301 * protects balance structures (status, state) and context accessed from 302 * several places (internally, ioctl) 303 * 304 * chunk_mutex 305 * ----------- 306 * protects chunks, adding or removing during allocation, trim or when a new 307 * device is added/removed. Additionally it also protects post_commit_list of 308 * individual devices, since they can be added to the transaction's 309 * post_commit_list only with chunk_mutex held. 310 * 311 * cleaner_mutex 312 * ------------- 313 * a big lock that is held by the cleaner thread and prevents running subvolume 314 * cleaning together with relocation or delayed iputs 315 * 316 * 317 * Lock nesting 318 * ============ 319 * 320 * uuid_mutex 321 * device_list_mutex 322 * chunk_mutex 323 * balance_mutex 324 * 325 * 326 * Exclusive operations 327 * ==================== 328 * 329 * Maintains the exclusivity of the following operations that apply to the 330 * whole filesystem and cannot run in parallel. 331 * 332 * - Balance (*) 333 * - Device add 334 * - Device remove 335 * - Device replace (*) 336 * - Resize 337 * 338 * The device operations (as above) can be in one of the following states: 339 * 340 * - Running state 341 * - Paused state 342 * - Completed state 343 * 344 * Only device operations marked with (*) can go into the Paused state for the 345 * following reasons: 346 * 347 * - ioctl (only Balance can be Paused through ioctl) 348 * - filesystem remounted as read-only 349 * - filesystem unmounted and mounted as read-only 350 * - system power-cycle and filesystem mounted as read-only 351 * - filesystem or device errors leading to forced read-only 352 * 353 * The status of exclusive operation is set and cleared atomically. 354 * During the course of Paused state, fs_info::exclusive_operation remains set. 355 * A device operation in Paused or Running state can be canceled or resumed 356 * either by ioctl (Balance only) or when remounted as read-write. 357 * The exclusive status is cleared when the device operation is canceled or 358 * completed. 359 */ 360 361 DEFINE_MUTEX(uuid_mutex); 362 static LIST_HEAD(fs_uuids); 363 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 364 { 365 return &fs_uuids; 366 } 367 368 /* 369 * alloc_fs_devices - allocate struct btrfs_fs_devices 370 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 371 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 372 * 373 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 374 * The returned struct is not linked onto any lists and can be destroyed with 375 * kfree() right away. 376 */ 377 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 378 const u8 *metadata_fsid) 379 { 380 struct btrfs_fs_devices *fs_devs; 381 382 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 383 if (!fs_devs) 384 return ERR_PTR(-ENOMEM); 385 386 mutex_init(&fs_devs->device_list_mutex); 387 388 INIT_LIST_HEAD(&fs_devs->devices); 389 INIT_LIST_HEAD(&fs_devs->alloc_list); 390 INIT_LIST_HEAD(&fs_devs->fs_list); 391 INIT_LIST_HEAD(&fs_devs->seed_list); 392 if (fsid) 393 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 394 395 if (metadata_fsid) 396 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 397 else if (fsid) 398 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 399 400 return fs_devs; 401 } 402 403 void btrfs_free_device(struct btrfs_device *device) 404 { 405 WARN_ON(!list_empty(&device->post_commit_list)); 406 rcu_string_free(device->name); 407 extent_io_tree_release(&device->alloc_state); 408 bio_put(device->flush_bio); 409 btrfs_destroy_dev_zone_info(device); 410 kfree(device); 411 } 412 413 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 414 { 415 struct btrfs_device *device; 416 WARN_ON(fs_devices->opened); 417 while (!list_empty(&fs_devices->devices)) { 418 device = list_entry(fs_devices->devices.next, 419 struct btrfs_device, dev_list); 420 list_del(&device->dev_list); 421 btrfs_free_device(device); 422 } 423 kfree(fs_devices); 424 } 425 426 void __exit btrfs_cleanup_fs_uuids(void) 427 { 428 struct btrfs_fs_devices *fs_devices; 429 430 while (!list_empty(&fs_uuids)) { 431 fs_devices = list_entry(fs_uuids.next, 432 struct btrfs_fs_devices, fs_list); 433 list_del(&fs_devices->fs_list); 434 free_fs_devices(fs_devices); 435 } 436 } 437 438 static noinline struct btrfs_fs_devices *find_fsid( 439 const u8 *fsid, const u8 *metadata_fsid) 440 { 441 struct btrfs_fs_devices *fs_devices; 442 443 ASSERT(fsid); 444 445 /* Handle non-split brain cases */ 446 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 447 if (metadata_fsid) { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 449 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 450 BTRFS_FSID_SIZE) == 0) 451 return fs_devices; 452 } else { 453 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 454 return fs_devices; 455 } 456 } 457 return NULL; 458 } 459 460 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 461 struct btrfs_super_block *disk_super) 462 { 463 464 struct btrfs_fs_devices *fs_devices; 465 466 /* 467 * Handle scanned device having completed its fsid change but 468 * belonging to a fs_devices that was created by first scanning 469 * a device which didn't have its fsid/metadata_uuid changed 470 * at all and the CHANGING_FSID_V2 flag set. 471 */ 472 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 473 if (fs_devices->fsid_change && 474 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 475 BTRFS_FSID_SIZE) == 0 && 476 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 477 BTRFS_FSID_SIZE) == 0) { 478 return fs_devices; 479 } 480 } 481 /* 482 * Handle scanned device having completed its fsid change but 483 * belonging to a fs_devices that was created by a device that 484 * has an outdated pair of fsid/metadata_uuid and 485 * CHANGING_FSID_V2 flag set. 486 */ 487 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 488 if (fs_devices->fsid_change && 489 memcmp(fs_devices->metadata_uuid, 490 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 491 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 492 BTRFS_FSID_SIZE) == 0) { 493 return fs_devices; 494 } 495 } 496 497 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 498 } 499 500 501 static int 502 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 503 int flush, struct block_device **bdev, 504 struct btrfs_super_block **disk_super) 505 { 506 int ret; 507 508 *bdev = blkdev_get_by_path(device_path, flags, holder); 509 510 if (IS_ERR(*bdev)) { 511 ret = PTR_ERR(*bdev); 512 goto error; 513 } 514 515 if (flush) 516 sync_blockdev(*bdev); 517 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 518 if (ret) { 519 blkdev_put(*bdev, flags); 520 goto error; 521 } 522 invalidate_bdev(*bdev); 523 *disk_super = btrfs_read_dev_super(*bdev); 524 if (IS_ERR(*disk_super)) { 525 ret = PTR_ERR(*disk_super); 526 blkdev_put(*bdev, flags); 527 goto error; 528 } 529 530 return 0; 531 532 error: 533 *bdev = NULL; 534 return ret; 535 } 536 537 static bool device_path_matched(const char *path, struct btrfs_device *device) 538 { 539 int found; 540 541 rcu_read_lock(); 542 found = strcmp(rcu_str_deref(device->name), path); 543 rcu_read_unlock(); 544 545 return found == 0; 546 } 547 548 /* 549 * Search and remove all stale (devices which are not mounted) devices. 550 * When both inputs are NULL, it will search and release all stale devices. 551 * path: Optional. When provided will it release all unmounted devices 552 * matching this path only. 553 * skip_dev: Optional. Will skip this device when searching for the stale 554 * devices. 555 * Return: 0 for success or if @path is NULL. 556 * -EBUSY if @path is a mounted device. 557 * -ENOENT if @path does not match any device in the list. 558 */ 559 static int btrfs_free_stale_devices(const char *path, 560 struct btrfs_device *skip_device) 561 { 562 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 563 struct btrfs_device *device, *tmp_device; 564 int ret = 0; 565 566 lockdep_assert_held(&uuid_mutex); 567 568 if (path) 569 ret = -ENOENT; 570 571 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 572 573 mutex_lock(&fs_devices->device_list_mutex); 574 list_for_each_entry_safe(device, tmp_device, 575 &fs_devices->devices, dev_list) { 576 if (skip_device && skip_device == device) 577 continue; 578 if (path && !device->name) 579 continue; 580 if (path && !device_path_matched(path, device)) 581 continue; 582 if (fs_devices->opened) { 583 /* for an already deleted device return 0 */ 584 if (path && ret != 0) 585 ret = -EBUSY; 586 break; 587 } 588 589 /* delete the stale device */ 590 fs_devices->num_devices--; 591 list_del(&device->dev_list); 592 btrfs_free_device(device); 593 594 ret = 0; 595 } 596 mutex_unlock(&fs_devices->device_list_mutex); 597 598 if (fs_devices->num_devices == 0) { 599 btrfs_sysfs_remove_fsid(fs_devices); 600 list_del(&fs_devices->fs_list); 601 free_fs_devices(fs_devices); 602 } 603 } 604 605 return ret; 606 } 607 608 /* 609 * This is only used on mount, and we are protected from competing things 610 * messing with our fs_devices by the uuid_mutex, thus we do not need the 611 * fs_devices->device_list_mutex here. 612 */ 613 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 614 struct btrfs_device *device, fmode_t flags, 615 void *holder) 616 { 617 struct request_queue *q; 618 struct block_device *bdev; 619 struct btrfs_super_block *disk_super; 620 u64 devid; 621 int ret; 622 623 if (device->bdev) 624 return -EINVAL; 625 if (!device->name) 626 return -EINVAL; 627 628 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 629 &bdev, &disk_super); 630 if (ret) 631 return ret; 632 633 devid = btrfs_stack_device_id(&disk_super->dev_item); 634 if (devid != device->devid) 635 goto error_free_page; 636 637 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 638 goto error_free_page; 639 640 device->generation = btrfs_super_generation(disk_super); 641 642 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 643 if (btrfs_super_incompat_flags(disk_super) & 644 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 645 pr_err( 646 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 647 goto error_free_page; 648 } 649 650 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 651 fs_devices->seeding = true; 652 } else { 653 if (bdev_read_only(bdev)) 654 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 655 else 656 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 657 } 658 659 q = bdev_get_queue(bdev); 660 if (!blk_queue_nonrot(q)) 661 fs_devices->rotating = true; 662 663 device->bdev = bdev; 664 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 665 device->mode = flags; 666 667 fs_devices->open_devices++; 668 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 669 device->devid != BTRFS_DEV_REPLACE_DEVID) { 670 fs_devices->rw_devices++; 671 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 672 } 673 btrfs_release_disk_super(disk_super); 674 675 return 0; 676 677 error_free_page: 678 btrfs_release_disk_super(disk_super); 679 blkdev_put(bdev, flags); 680 681 return -EINVAL; 682 } 683 684 /* 685 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 686 * being created with a disk that has already completed its fsid change. Such 687 * disk can belong to an fs which has its FSID changed or to one which doesn't. 688 * Handle both cases here. 689 */ 690 static struct btrfs_fs_devices *find_fsid_inprogress( 691 struct btrfs_super_block *disk_super) 692 { 693 struct btrfs_fs_devices *fs_devices; 694 695 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 696 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 697 BTRFS_FSID_SIZE) != 0 && 698 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 699 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 700 return fs_devices; 701 } 702 } 703 704 return find_fsid(disk_super->fsid, NULL); 705 } 706 707 708 static struct btrfs_fs_devices *find_fsid_changed( 709 struct btrfs_super_block *disk_super) 710 { 711 struct btrfs_fs_devices *fs_devices; 712 713 /* 714 * Handles the case where scanned device is part of an fs that had 715 * multiple successful changes of FSID but currently device didn't 716 * observe it. Meaning our fsid will be different than theirs. We need 717 * to handle two subcases : 718 * 1 - The fs still continues to have different METADATA/FSID uuids. 719 * 2 - The fs is switched back to its original FSID (METADATA/FSID 720 * are equal). 721 */ 722 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 723 /* Changed UUIDs */ 724 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 725 BTRFS_FSID_SIZE) != 0 && 726 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 727 BTRFS_FSID_SIZE) == 0 && 728 memcmp(fs_devices->fsid, disk_super->fsid, 729 BTRFS_FSID_SIZE) != 0) 730 return fs_devices; 731 732 /* Unchanged UUIDs */ 733 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 734 BTRFS_FSID_SIZE) == 0 && 735 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 736 BTRFS_FSID_SIZE) == 0) 737 return fs_devices; 738 } 739 740 return NULL; 741 } 742 743 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 744 struct btrfs_super_block *disk_super) 745 { 746 struct btrfs_fs_devices *fs_devices; 747 748 /* 749 * Handle the case where the scanned device is part of an fs whose last 750 * metadata UUID change reverted it to the original FSID. At the same 751 * time * fs_devices was first created by another constitutent device 752 * which didn't fully observe the operation. This results in an 753 * btrfs_fs_devices created with metadata/fsid different AND 754 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 755 * fs_devices equal to the FSID of the disk. 756 */ 757 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 758 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 759 BTRFS_FSID_SIZE) != 0 && 760 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 761 BTRFS_FSID_SIZE) == 0 && 762 fs_devices->fsid_change) 763 return fs_devices; 764 } 765 766 return NULL; 767 } 768 /* 769 * Add new device to list of registered devices 770 * 771 * Returns: 772 * device pointer which was just added or updated when successful 773 * error pointer when failed 774 */ 775 static noinline struct btrfs_device *device_list_add(const char *path, 776 struct btrfs_super_block *disk_super, 777 bool *new_device_added) 778 { 779 struct btrfs_device *device; 780 struct btrfs_fs_devices *fs_devices = NULL; 781 struct rcu_string *name; 782 u64 found_transid = btrfs_super_generation(disk_super); 783 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 784 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 785 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 786 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 787 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 788 789 if (fsid_change_in_progress) { 790 if (!has_metadata_uuid) 791 fs_devices = find_fsid_inprogress(disk_super); 792 else 793 fs_devices = find_fsid_changed(disk_super); 794 } else if (has_metadata_uuid) { 795 fs_devices = find_fsid_with_metadata_uuid(disk_super); 796 } else { 797 fs_devices = find_fsid_reverted_metadata(disk_super); 798 if (!fs_devices) 799 fs_devices = find_fsid(disk_super->fsid, NULL); 800 } 801 802 803 if (!fs_devices) { 804 if (has_metadata_uuid) 805 fs_devices = alloc_fs_devices(disk_super->fsid, 806 disk_super->metadata_uuid); 807 else 808 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 809 810 if (IS_ERR(fs_devices)) 811 return ERR_CAST(fs_devices); 812 813 fs_devices->fsid_change = fsid_change_in_progress; 814 815 mutex_lock(&fs_devices->device_list_mutex); 816 list_add(&fs_devices->fs_list, &fs_uuids); 817 818 device = NULL; 819 } else { 820 struct btrfs_dev_lookup_args args = { 821 .devid = devid, 822 .uuid = disk_super->dev_item.uuid, 823 }; 824 825 mutex_lock(&fs_devices->device_list_mutex); 826 device = btrfs_find_device(fs_devices, &args); 827 828 /* 829 * If this disk has been pulled into an fs devices created by 830 * a device which had the CHANGING_FSID_V2 flag then replace the 831 * metadata_uuid/fsid values of the fs_devices. 832 */ 833 if (fs_devices->fsid_change && 834 found_transid > fs_devices->latest_generation) { 835 memcpy(fs_devices->fsid, disk_super->fsid, 836 BTRFS_FSID_SIZE); 837 838 if (has_metadata_uuid) 839 memcpy(fs_devices->metadata_uuid, 840 disk_super->metadata_uuid, 841 BTRFS_FSID_SIZE); 842 else 843 memcpy(fs_devices->metadata_uuid, 844 disk_super->fsid, BTRFS_FSID_SIZE); 845 846 fs_devices->fsid_change = false; 847 } 848 } 849 850 if (!device) { 851 if (fs_devices->opened) { 852 mutex_unlock(&fs_devices->device_list_mutex); 853 return ERR_PTR(-EBUSY); 854 } 855 856 device = btrfs_alloc_device(NULL, &devid, 857 disk_super->dev_item.uuid); 858 if (IS_ERR(device)) { 859 mutex_unlock(&fs_devices->device_list_mutex); 860 /* we can safely leave the fs_devices entry around */ 861 return device; 862 } 863 864 name = rcu_string_strdup(path, GFP_NOFS); 865 if (!name) { 866 btrfs_free_device(device); 867 mutex_unlock(&fs_devices->device_list_mutex); 868 return ERR_PTR(-ENOMEM); 869 } 870 rcu_assign_pointer(device->name, name); 871 872 list_add_rcu(&device->dev_list, &fs_devices->devices); 873 fs_devices->num_devices++; 874 875 device->fs_devices = fs_devices; 876 *new_device_added = true; 877 878 if (disk_super->label[0]) 879 pr_info( 880 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 881 disk_super->label, devid, found_transid, path, 882 current->comm, task_pid_nr(current)); 883 else 884 pr_info( 885 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 886 disk_super->fsid, devid, found_transid, path, 887 current->comm, task_pid_nr(current)); 888 889 } else if (!device->name || strcmp(device->name->str, path)) { 890 /* 891 * When FS is already mounted. 892 * 1. If you are here and if the device->name is NULL that 893 * means this device was missing at time of FS mount. 894 * 2. If you are here and if the device->name is different 895 * from 'path' that means either 896 * a. The same device disappeared and reappeared with 897 * different name. or 898 * b. The missing-disk-which-was-replaced, has 899 * reappeared now. 900 * 901 * We must allow 1 and 2a above. But 2b would be a spurious 902 * and unintentional. 903 * 904 * Further in case of 1 and 2a above, the disk at 'path' 905 * would have missed some transaction when it was away and 906 * in case of 2a the stale bdev has to be updated as well. 907 * 2b must not be allowed at all time. 908 */ 909 910 /* 911 * For now, we do allow update to btrfs_fs_device through the 912 * btrfs dev scan cli after FS has been mounted. We're still 913 * tracking a problem where systems fail mount by subvolume id 914 * when we reject replacement on a mounted FS. 915 */ 916 if (!fs_devices->opened && found_transid < device->generation) { 917 /* 918 * That is if the FS is _not_ mounted and if you 919 * are here, that means there is more than one 920 * disk with same uuid and devid.We keep the one 921 * with larger generation number or the last-in if 922 * generation are equal. 923 */ 924 mutex_unlock(&fs_devices->device_list_mutex); 925 return ERR_PTR(-EEXIST); 926 } 927 928 /* 929 * We are going to replace the device path for a given devid, 930 * make sure it's the same device if the device is mounted 931 */ 932 if (device->bdev) { 933 int error; 934 dev_t path_dev; 935 936 error = lookup_bdev(path, &path_dev); 937 if (error) { 938 mutex_unlock(&fs_devices->device_list_mutex); 939 return ERR_PTR(error); 940 } 941 942 if (device->bdev->bd_dev != path_dev) { 943 mutex_unlock(&fs_devices->device_list_mutex); 944 /* 945 * device->fs_info may not be reliable here, so 946 * pass in a NULL instead. This avoids a 947 * possible use-after-free when the fs_info and 948 * fs_info->sb are already torn down. 949 */ 950 btrfs_warn_in_rcu(NULL, 951 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 952 path, devid, found_transid, 953 current->comm, 954 task_pid_nr(current)); 955 return ERR_PTR(-EEXIST); 956 } 957 btrfs_info_in_rcu(device->fs_info, 958 "devid %llu device path %s changed to %s scanned by %s (%d)", 959 devid, rcu_str_deref(device->name), 960 path, current->comm, 961 task_pid_nr(current)); 962 } 963 964 name = rcu_string_strdup(path, GFP_NOFS); 965 if (!name) { 966 mutex_unlock(&fs_devices->device_list_mutex); 967 return ERR_PTR(-ENOMEM); 968 } 969 rcu_string_free(device->name); 970 rcu_assign_pointer(device->name, name); 971 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 972 fs_devices->missing_devices--; 973 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 974 } 975 } 976 977 /* 978 * Unmount does not free the btrfs_device struct but would zero 979 * generation along with most of the other members. So just update 980 * it back. We need it to pick the disk with largest generation 981 * (as above). 982 */ 983 if (!fs_devices->opened) { 984 device->generation = found_transid; 985 fs_devices->latest_generation = max_t(u64, found_transid, 986 fs_devices->latest_generation); 987 } 988 989 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 990 991 mutex_unlock(&fs_devices->device_list_mutex); 992 return device; 993 } 994 995 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 996 { 997 struct btrfs_fs_devices *fs_devices; 998 struct btrfs_device *device; 999 struct btrfs_device *orig_dev; 1000 int ret = 0; 1001 1002 lockdep_assert_held(&uuid_mutex); 1003 1004 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1005 if (IS_ERR(fs_devices)) 1006 return fs_devices; 1007 1008 fs_devices->total_devices = orig->total_devices; 1009 1010 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1011 struct rcu_string *name; 1012 1013 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1014 orig_dev->uuid); 1015 if (IS_ERR(device)) { 1016 ret = PTR_ERR(device); 1017 goto error; 1018 } 1019 1020 /* 1021 * This is ok to do without rcu read locked because we hold the 1022 * uuid mutex so nothing we touch in here is going to disappear. 1023 */ 1024 if (orig_dev->name) { 1025 name = rcu_string_strdup(orig_dev->name->str, 1026 GFP_KERNEL); 1027 if (!name) { 1028 btrfs_free_device(device); 1029 ret = -ENOMEM; 1030 goto error; 1031 } 1032 rcu_assign_pointer(device->name, name); 1033 } 1034 1035 list_add(&device->dev_list, &fs_devices->devices); 1036 device->fs_devices = fs_devices; 1037 fs_devices->num_devices++; 1038 } 1039 return fs_devices; 1040 error: 1041 free_fs_devices(fs_devices); 1042 return ERR_PTR(ret); 1043 } 1044 1045 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1046 struct btrfs_device **latest_dev) 1047 { 1048 struct btrfs_device *device, *next; 1049 1050 /* This is the initialized path, it is safe to release the devices. */ 1051 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1052 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1053 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1054 &device->dev_state) && 1055 !test_bit(BTRFS_DEV_STATE_MISSING, 1056 &device->dev_state) && 1057 (!*latest_dev || 1058 device->generation > (*latest_dev)->generation)) { 1059 *latest_dev = device; 1060 } 1061 continue; 1062 } 1063 1064 /* 1065 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1066 * in btrfs_init_dev_replace() so just continue. 1067 */ 1068 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1069 continue; 1070 1071 if (device->bdev) { 1072 blkdev_put(device->bdev, device->mode); 1073 device->bdev = NULL; 1074 fs_devices->open_devices--; 1075 } 1076 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1077 list_del_init(&device->dev_alloc_list); 1078 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1079 fs_devices->rw_devices--; 1080 } 1081 list_del_init(&device->dev_list); 1082 fs_devices->num_devices--; 1083 btrfs_free_device(device); 1084 } 1085 1086 } 1087 1088 /* 1089 * After we have read the system tree and know devids belonging to this 1090 * filesystem, remove the device which does not belong there. 1091 */ 1092 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1093 { 1094 struct btrfs_device *latest_dev = NULL; 1095 struct btrfs_fs_devices *seed_dev; 1096 1097 mutex_lock(&uuid_mutex); 1098 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1099 1100 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1101 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1102 1103 fs_devices->latest_dev = latest_dev; 1104 1105 mutex_unlock(&uuid_mutex); 1106 } 1107 1108 static void btrfs_close_bdev(struct btrfs_device *device) 1109 { 1110 if (!device->bdev) 1111 return; 1112 1113 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1114 sync_blockdev(device->bdev); 1115 invalidate_bdev(device->bdev); 1116 } 1117 1118 blkdev_put(device->bdev, device->mode); 1119 } 1120 1121 static void btrfs_close_one_device(struct btrfs_device *device) 1122 { 1123 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1124 1125 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1126 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1127 list_del_init(&device->dev_alloc_list); 1128 fs_devices->rw_devices--; 1129 } 1130 1131 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1132 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1133 1134 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1135 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1136 fs_devices->missing_devices--; 1137 } 1138 1139 btrfs_close_bdev(device); 1140 if (device->bdev) { 1141 fs_devices->open_devices--; 1142 device->bdev = NULL; 1143 } 1144 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1145 btrfs_destroy_dev_zone_info(device); 1146 1147 device->fs_info = NULL; 1148 atomic_set(&device->dev_stats_ccnt, 0); 1149 extent_io_tree_release(&device->alloc_state); 1150 1151 /* 1152 * Reset the flush error record. We might have a transient flush error 1153 * in this mount, and if so we aborted the current transaction and set 1154 * the fs to an error state, guaranteeing no super blocks can be further 1155 * committed. However that error might be transient and if we unmount the 1156 * filesystem and mount it again, we should allow the mount to succeed 1157 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1158 * filesystem again we still get flush errors, then we will again abort 1159 * any transaction and set the error state, guaranteeing no commits of 1160 * unsafe super blocks. 1161 */ 1162 device->last_flush_error = 0; 1163 1164 /* Verify the device is back in a pristine state */ 1165 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1166 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1167 ASSERT(list_empty(&device->dev_alloc_list)); 1168 ASSERT(list_empty(&device->post_commit_list)); 1169 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1170 } 1171 1172 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1173 { 1174 struct btrfs_device *device, *tmp; 1175 1176 lockdep_assert_held(&uuid_mutex); 1177 1178 if (--fs_devices->opened > 0) 1179 return; 1180 1181 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1182 btrfs_close_one_device(device); 1183 1184 WARN_ON(fs_devices->open_devices); 1185 WARN_ON(fs_devices->rw_devices); 1186 fs_devices->opened = 0; 1187 fs_devices->seeding = false; 1188 fs_devices->fs_info = NULL; 1189 } 1190 1191 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1192 { 1193 LIST_HEAD(list); 1194 struct btrfs_fs_devices *tmp; 1195 1196 mutex_lock(&uuid_mutex); 1197 close_fs_devices(fs_devices); 1198 if (!fs_devices->opened) 1199 list_splice_init(&fs_devices->seed_list, &list); 1200 1201 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1202 close_fs_devices(fs_devices); 1203 list_del(&fs_devices->seed_list); 1204 free_fs_devices(fs_devices); 1205 } 1206 mutex_unlock(&uuid_mutex); 1207 } 1208 1209 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1210 fmode_t flags, void *holder) 1211 { 1212 struct btrfs_device *device; 1213 struct btrfs_device *latest_dev = NULL; 1214 struct btrfs_device *tmp_device; 1215 1216 flags |= FMODE_EXCL; 1217 1218 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1219 dev_list) { 1220 int ret; 1221 1222 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1223 if (ret == 0 && 1224 (!latest_dev || device->generation > latest_dev->generation)) { 1225 latest_dev = device; 1226 } else if (ret == -ENODATA) { 1227 fs_devices->num_devices--; 1228 list_del(&device->dev_list); 1229 btrfs_free_device(device); 1230 } 1231 } 1232 if (fs_devices->open_devices == 0) 1233 return -EINVAL; 1234 1235 fs_devices->opened = 1; 1236 fs_devices->latest_dev = latest_dev; 1237 fs_devices->total_rw_bytes = 0; 1238 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1239 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1240 1241 return 0; 1242 } 1243 1244 static int devid_cmp(void *priv, const struct list_head *a, 1245 const struct list_head *b) 1246 { 1247 const struct btrfs_device *dev1, *dev2; 1248 1249 dev1 = list_entry(a, struct btrfs_device, dev_list); 1250 dev2 = list_entry(b, struct btrfs_device, dev_list); 1251 1252 if (dev1->devid < dev2->devid) 1253 return -1; 1254 else if (dev1->devid > dev2->devid) 1255 return 1; 1256 return 0; 1257 } 1258 1259 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1260 fmode_t flags, void *holder) 1261 { 1262 int ret; 1263 1264 lockdep_assert_held(&uuid_mutex); 1265 /* 1266 * The device_list_mutex cannot be taken here in case opening the 1267 * underlying device takes further locks like open_mutex. 1268 * 1269 * We also don't need the lock here as this is called during mount and 1270 * exclusion is provided by uuid_mutex 1271 */ 1272 1273 if (fs_devices->opened) { 1274 fs_devices->opened++; 1275 ret = 0; 1276 } else { 1277 list_sort(NULL, &fs_devices->devices, devid_cmp); 1278 ret = open_fs_devices(fs_devices, flags, holder); 1279 } 1280 1281 return ret; 1282 } 1283 1284 void btrfs_release_disk_super(struct btrfs_super_block *super) 1285 { 1286 struct page *page = virt_to_page(super); 1287 1288 put_page(page); 1289 } 1290 1291 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1292 u64 bytenr, u64 bytenr_orig) 1293 { 1294 struct btrfs_super_block *disk_super; 1295 struct page *page; 1296 void *p; 1297 pgoff_t index; 1298 1299 /* make sure our super fits in the device */ 1300 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1301 return ERR_PTR(-EINVAL); 1302 1303 /* make sure our super fits in the page */ 1304 if (sizeof(*disk_super) > PAGE_SIZE) 1305 return ERR_PTR(-EINVAL); 1306 1307 /* make sure our super doesn't straddle pages on disk */ 1308 index = bytenr >> PAGE_SHIFT; 1309 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1310 return ERR_PTR(-EINVAL); 1311 1312 /* pull in the page with our super */ 1313 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1314 1315 if (IS_ERR(page)) 1316 return ERR_CAST(page); 1317 1318 p = page_address(page); 1319 1320 /* align our pointer to the offset of the super block */ 1321 disk_super = p + offset_in_page(bytenr); 1322 1323 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1324 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1325 btrfs_release_disk_super(p); 1326 return ERR_PTR(-EINVAL); 1327 } 1328 1329 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1330 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1331 1332 return disk_super; 1333 } 1334 1335 int btrfs_forget_devices(const char *path) 1336 { 1337 int ret; 1338 1339 mutex_lock(&uuid_mutex); 1340 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1341 mutex_unlock(&uuid_mutex); 1342 1343 return ret; 1344 } 1345 1346 /* 1347 * Look for a btrfs signature on a device. This may be called out of the mount path 1348 * and we are not allowed to call set_blocksize during the scan. The superblock 1349 * is read via pagecache 1350 */ 1351 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1352 void *holder) 1353 { 1354 struct btrfs_super_block *disk_super; 1355 bool new_device_added = false; 1356 struct btrfs_device *device = NULL; 1357 struct block_device *bdev; 1358 u64 bytenr, bytenr_orig; 1359 int ret; 1360 1361 lockdep_assert_held(&uuid_mutex); 1362 1363 /* 1364 * we would like to check all the supers, but that would make 1365 * a btrfs mount succeed after a mkfs from a different FS. 1366 * So, we need to add a special mount option to scan for 1367 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1368 */ 1369 flags |= FMODE_EXCL; 1370 1371 bdev = blkdev_get_by_path(path, flags, holder); 1372 if (IS_ERR(bdev)) 1373 return ERR_CAST(bdev); 1374 1375 bytenr_orig = btrfs_sb_offset(0); 1376 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1377 if (ret) { 1378 device = ERR_PTR(ret); 1379 goto error_bdev_put; 1380 } 1381 1382 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1383 if (IS_ERR(disk_super)) { 1384 device = ERR_CAST(disk_super); 1385 goto error_bdev_put; 1386 } 1387 1388 device = device_list_add(path, disk_super, &new_device_added); 1389 if (!IS_ERR(device)) { 1390 if (new_device_added) 1391 btrfs_free_stale_devices(path, device); 1392 } 1393 1394 btrfs_release_disk_super(disk_super); 1395 1396 error_bdev_put: 1397 blkdev_put(bdev, flags); 1398 1399 return device; 1400 } 1401 1402 /* 1403 * Try to find a chunk that intersects [start, start + len] range and when one 1404 * such is found, record the end of it in *start 1405 */ 1406 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1407 u64 len) 1408 { 1409 u64 physical_start, physical_end; 1410 1411 lockdep_assert_held(&device->fs_info->chunk_mutex); 1412 1413 if (!find_first_extent_bit(&device->alloc_state, *start, 1414 &physical_start, &physical_end, 1415 CHUNK_ALLOCATED, NULL)) { 1416 1417 if (in_range(physical_start, *start, len) || 1418 in_range(*start, physical_start, 1419 physical_end - physical_start)) { 1420 *start = physical_end + 1; 1421 return true; 1422 } 1423 } 1424 return false; 1425 } 1426 1427 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1428 { 1429 switch (device->fs_devices->chunk_alloc_policy) { 1430 case BTRFS_CHUNK_ALLOC_REGULAR: 1431 /* 1432 * We don't want to overwrite the superblock on the drive nor 1433 * any area used by the boot loader (grub for example), so we 1434 * make sure to start at an offset of at least 1MB. 1435 */ 1436 return max_t(u64, start, SZ_1M); 1437 case BTRFS_CHUNK_ALLOC_ZONED: 1438 /* 1439 * We don't care about the starting region like regular 1440 * allocator, because we anyway use/reserve the first two zones 1441 * for superblock logging. 1442 */ 1443 return ALIGN(start, device->zone_info->zone_size); 1444 default: 1445 BUG(); 1446 } 1447 } 1448 1449 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1450 u64 *hole_start, u64 *hole_size, 1451 u64 num_bytes) 1452 { 1453 u64 zone_size = device->zone_info->zone_size; 1454 u64 pos; 1455 int ret; 1456 bool changed = false; 1457 1458 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1459 1460 while (*hole_size > 0) { 1461 pos = btrfs_find_allocatable_zones(device, *hole_start, 1462 *hole_start + *hole_size, 1463 num_bytes); 1464 if (pos != *hole_start) { 1465 *hole_size = *hole_start + *hole_size - pos; 1466 *hole_start = pos; 1467 changed = true; 1468 if (*hole_size < num_bytes) 1469 break; 1470 } 1471 1472 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1473 1474 /* Range is ensured to be empty */ 1475 if (!ret) 1476 return changed; 1477 1478 /* Given hole range was invalid (outside of device) */ 1479 if (ret == -ERANGE) { 1480 *hole_start += *hole_size; 1481 *hole_size = 0; 1482 return true; 1483 } 1484 1485 *hole_start += zone_size; 1486 *hole_size -= zone_size; 1487 changed = true; 1488 } 1489 1490 return changed; 1491 } 1492 1493 /** 1494 * dev_extent_hole_check - check if specified hole is suitable for allocation 1495 * @device: the device which we have the hole 1496 * @hole_start: starting position of the hole 1497 * @hole_size: the size of the hole 1498 * @num_bytes: the size of the free space that we need 1499 * 1500 * This function may modify @hole_start and @hole_size to reflect the suitable 1501 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1502 */ 1503 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1504 u64 *hole_size, u64 num_bytes) 1505 { 1506 bool changed = false; 1507 u64 hole_end = *hole_start + *hole_size; 1508 1509 for (;;) { 1510 /* 1511 * Check before we set max_hole_start, otherwise we could end up 1512 * sending back this offset anyway. 1513 */ 1514 if (contains_pending_extent(device, hole_start, *hole_size)) { 1515 if (hole_end >= *hole_start) 1516 *hole_size = hole_end - *hole_start; 1517 else 1518 *hole_size = 0; 1519 changed = true; 1520 } 1521 1522 switch (device->fs_devices->chunk_alloc_policy) { 1523 case BTRFS_CHUNK_ALLOC_REGULAR: 1524 /* No extra check */ 1525 break; 1526 case BTRFS_CHUNK_ALLOC_ZONED: 1527 if (dev_extent_hole_check_zoned(device, hole_start, 1528 hole_size, num_bytes)) { 1529 changed = true; 1530 /* 1531 * The changed hole can contain pending extent. 1532 * Loop again to check that. 1533 */ 1534 continue; 1535 } 1536 break; 1537 default: 1538 BUG(); 1539 } 1540 1541 break; 1542 } 1543 1544 return changed; 1545 } 1546 1547 /* 1548 * find_free_dev_extent_start - find free space in the specified device 1549 * @device: the device which we search the free space in 1550 * @num_bytes: the size of the free space that we need 1551 * @search_start: the position from which to begin the search 1552 * @start: store the start of the free space. 1553 * @len: the size of the free space. that we find, or the size 1554 * of the max free space if we don't find suitable free space 1555 * 1556 * this uses a pretty simple search, the expectation is that it is 1557 * called very infrequently and that a given device has a small number 1558 * of extents 1559 * 1560 * @start is used to store the start of the free space if we find. But if we 1561 * don't find suitable free space, it will be used to store the start position 1562 * of the max free space. 1563 * 1564 * @len is used to store the size of the free space that we find. 1565 * But if we don't find suitable free space, it is used to store the size of 1566 * the max free space. 1567 * 1568 * NOTE: This function will search *commit* root of device tree, and does extra 1569 * check to ensure dev extents are not double allocated. 1570 * This makes the function safe to allocate dev extents but may not report 1571 * correct usable device space, as device extent freed in current transaction 1572 * is not reported as available. 1573 */ 1574 static int find_free_dev_extent_start(struct btrfs_device *device, 1575 u64 num_bytes, u64 search_start, u64 *start, 1576 u64 *len) 1577 { 1578 struct btrfs_fs_info *fs_info = device->fs_info; 1579 struct btrfs_root *root = fs_info->dev_root; 1580 struct btrfs_key key; 1581 struct btrfs_dev_extent *dev_extent; 1582 struct btrfs_path *path; 1583 u64 hole_size; 1584 u64 max_hole_start; 1585 u64 max_hole_size; 1586 u64 extent_end; 1587 u64 search_end = device->total_bytes; 1588 int ret; 1589 int slot; 1590 struct extent_buffer *l; 1591 1592 search_start = dev_extent_search_start(device, search_start); 1593 1594 WARN_ON(device->zone_info && 1595 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1596 1597 path = btrfs_alloc_path(); 1598 if (!path) 1599 return -ENOMEM; 1600 1601 max_hole_start = search_start; 1602 max_hole_size = 0; 1603 1604 again: 1605 if (search_start >= search_end || 1606 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1607 ret = -ENOSPC; 1608 goto out; 1609 } 1610 1611 path->reada = READA_FORWARD; 1612 path->search_commit_root = 1; 1613 path->skip_locking = 1; 1614 1615 key.objectid = device->devid; 1616 key.offset = search_start; 1617 key.type = BTRFS_DEV_EXTENT_KEY; 1618 1619 ret = btrfs_search_backwards(root, &key, path); 1620 if (ret < 0) 1621 goto out; 1622 1623 while (1) { 1624 l = path->nodes[0]; 1625 slot = path->slots[0]; 1626 if (slot >= btrfs_header_nritems(l)) { 1627 ret = btrfs_next_leaf(root, path); 1628 if (ret == 0) 1629 continue; 1630 if (ret < 0) 1631 goto out; 1632 1633 break; 1634 } 1635 btrfs_item_key_to_cpu(l, &key, slot); 1636 1637 if (key.objectid < device->devid) 1638 goto next; 1639 1640 if (key.objectid > device->devid) 1641 break; 1642 1643 if (key.type != BTRFS_DEV_EXTENT_KEY) 1644 goto next; 1645 1646 if (key.offset > search_start) { 1647 hole_size = key.offset - search_start; 1648 dev_extent_hole_check(device, &search_start, &hole_size, 1649 num_bytes); 1650 1651 if (hole_size > max_hole_size) { 1652 max_hole_start = search_start; 1653 max_hole_size = hole_size; 1654 } 1655 1656 /* 1657 * If this free space is greater than which we need, 1658 * it must be the max free space that we have found 1659 * until now, so max_hole_start must point to the start 1660 * of this free space and the length of this free space 1661 * is stored in max_hole_size. Thus, we return 1662 * max_hole_start and max_hole_size and go back to the 1663 * caller. 1664 */ 1665 if (hole_size >= num_bytes) { 1666 ret = 0; 1667 goto out; 1668 } 1669 } 1670 1671 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1672 extent_end = key.offset + btrfs_dev_extent_length(l, 1673 dev_extent); 1674 if (extent_end > search_start) 1675 search_start = extent_end; 1676 next: 1677 path->slots[0]++; 1678 cond_resched(); 1679 } 1680 1681 /* 1682 * At this point, search_start should be the end of 1683 * allocated dev extents, and when shrinking the device, 1684 * search_end may be smaller than search_start. 1685 */ 1686 if (search_end > search_start) { 1687 hole_size = search_end - search_start; 1688 if (dev_extent_hole_check(device, &search_start, &hole_size, 1689 num_bytes)) { 1690 btrfs_release_path(path); 1691 goto again; 1692 } 1693 1694 if (hole_size > max_hole_size) { 1695 max_hole_start = search_start; 1696 max_hole_size = hole_size; 1697 } 1698 } 1699 1700 /* See above. */ 1701 if (max_hole_size < num_bytes) 1702 ret = -ENOSPC; 1703 else 1704 ret = 0; 1705 1706 out: 1707 btrfs_free_path(path); 1708 *start = max_hole_start; 1709 if (len) 1710 *len = max_hole_size; 1711 return ret; 1712 } 1713 1714 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1715 u64 *start, u64 *len) 1716 { 1717 /* FIXME use last free of some kind */ 1718 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1719 } 1720 1721 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1722 struct btrfs_device *device, 1723 u64 start, u64 *dev_extent_len) 1724 { 1725 struct btrfs_fs_info *fs_info = device->fs_info; 1726 struct btrfs_root *root = fs_info->dev_root; 1727 int ret; 1728 struct btrfs_path *path; 1729 struct btrfs_key key; 1730 struct btrfs_key found_key; 1731 struct extent_buffer *leaf = NULL; 1732 struct btrfs_dev_extent *extent = NULL; 1733 1734 path = btrfs_alloc_path(); 1735 if (!path) 1736 return -ENOMEM; 1737 1738 key.objectid = device->devid; 1739 key.offset = start; 1740 key.type = BTRFS_DEV_EXTENT_KEY; 1741 again: 1742 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1743 if (ret > 0) { 1744 ret = btrfs_previous_item(root, path, key.objectid, 1745 BTRFS_DEV_EXTENT_KEY); 1746 if (ret) 1747 goto out; 1748 leaf = path->nodes[0]; 1749 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1750 extent = btrfs_item_ptr(leaf, path->slots[0], 1751 struct btrfs_dev_extent); 1752 BUG_ON(found_key.offset > start || found_key.offset + 1753 btrfs_dev_extent_length(leaf, extent) < start); 1754 key = found_key; 1755 btrfs_release_path(path); 1756 goto again; 1757 } else if (ret == 0) { 1758 leaf = path->nodes[0]; 1759 extent = btrfs_item_ptr(leaf, path->slots[0], 1760 struct btrfs_dev_extent); 1761 } else { 1762 goto out; 1763 } 1764 1765 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1766 1767 ret = btrfs_del_item(trans, root, path); 1768 if (ret == 0) 1769 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1770 out: 1771 btrfs_free_path(path); 1772 return ret; 1773 } 1774 1775 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1776 { 1777 struct extent_map_tree *em_tree; 1778 struct extent_map *em; 1779 struct rb_node *n; 1780 u64 ret = 0; 1781 1782 em_tree = &fs_info->mapping_tree; 1783 read_lock(&em_tree->lock); 1784 n = rb_last(&em_tree->map.rb_root); 1785 if (n) { 1786 em = rb_entry(n, struct extent_map, rb_node); 1787 ret = em->start + em->len; 1788 } 1789 read_unlock(&em_tree->lock); 1790 1791 return ret; 1792 } 1793 1794 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1795 u64 *devid_ret) 1796 { 1797 int ret; 1798 struct btrfs_key key; 1799 struct btrfs_key found_key; 1800 struct btrfs_path *path; 1801 1802 path = btrfs_alloc_path(); 1803 if (!path) 1804 return -ENOMEM; 1805 1806 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1807 key.type = BTRFS_DEV_ITEM_KEY; 1808 key.offset = (u64)-1; 1809 1810 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1811 if (ret < 0) 1812 goto error; 1813 1814 if (ret == 0) { 1815 /* Corruption */ 1816 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1817 ret = -EUCLEAN; 1818 goto error; 1819 } 1820 1821 ret = btrfs_previous_item(fs_info->chunk_root, path, 1822 BTRFS_DEV_ITEMS_OBJECTID, 1823 BTRFS_DEV_ITEM_KEY); 1824 if (ret) { 1825 *devid_ret = 1; 1826 } else { 1827 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1828 path->slots[0]); 1829 *devid_ret = found_key.offset + 1; 1830 } 1831 ret = 0; 1832 error: 1833 btrfs_free_path(path); 1834 return ret; 1835 } 1836 1837 /* 1838 * the device information is stored in the chunk root 1839 * the btrfs_device struct should be fully filled in 1840 */ 1841 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1842 struct btrfs_device *device) 1843 { 1844 int ret; 1845 struct btrfs_path *path; 1846 struct btrfs_dev_item *dev_item; 1847 struct extent_buffer *leaf; 1848 struct btrfs_key key; 1849 unsigned long ptr; 1850 1851 path = btrfs_alloc_path(); 1852 if (!path) 1853 return -ENOMEM; 1854 1855 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1856 key.type = BTRFS_DEV_ITEM_KEY; 1857 key.offset = device->devid; 1858 1859 btrfs_reserve_chunk_metadata(trans, true); 1860 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1861 &key, sizeof(*dev_item)); 1862 btrfs_trans_release_chunk_metadata(trans); 1863 if (ret) 1864 goto out; 1865 1866 leaf = path->nodes[0]; 1867 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1868 1869 btrfs_set_device_id(leaf, dev_item, device->devid); 1870 btrfs_set_device_generation(leaf, dev_item, 0); 1871 btrfs_set_device_type(leaf, dev_item, device->type); 1872 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1873 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1874 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1875 btrfs_set_device_total_bytes(leaf, dev_item, 1876 btrfs_device_get_disk_total_bytes(device)); 1877 btrfs_set_device_bytes_used(leaf, dev_item, 1878 btrfs_device_get_bytes_used(device)); 1879 btrfs_set_device_group(leaf, dev_item, 0); 1880 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1881 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1882 btrfs_set_device_start_offset(leaf, dev_item, 0); 1883 1884 ptr = btrfs_device_uuid(dev_item); 1885 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1886 ptr = btrfs_device_fsid(dev_item); 1887 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1888 ptr, BTRFS_FSID_SIZE); 1889 btrfs_mark_buffer_dirty(leaf); 1890 1891 ret = 0; 1892 out: 1893 btrfs_free_path(path); 1894 return ret; 1895 } 1896 1897 /* 1898 * Function to update ctime/mtime for a given device path. 1899 * Mainly used for ctime/mtime based probe like libblkid. 1900 * 1901 * We don't care about errors here, this is just to be kind to userspace. 1902 */ 1903 static void update_dev_time(const char *device_path) 1904 { 1905 struct path path; 1906 struct timespec64 now; 1907 int ret; 1908 1909 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1910 if (ret) 1911 return; 1912 1913 now = current_time(d_inode(path.dentry)); 1914 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1915 path_put(&path); 1916 } 1917 1918 static int btrfs_rm_dev_item(struct btrfs_device *device) 1919 { 1920 struct btrfs_root *root = device->fs_info->chunk_root; 1921 int ret; 1922 struct btrfs_path *path; 1923 struct btrfs_key key; 1924 struct btrfs_trans_handle *trans; 1925 1926 path = btrfs_alloc_path(); 1927 if (!path) 1928 return -ENOMEM; 1929 1930 trans = btrfs_start_transaction(root, 0); 1931 if (IS_ERR(trans)) { 1932 btrfs_free_path(path); 1933 return PTR_ERR(trans); 1934 } 1935 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1936 key.type = BTRFS_DEV_ITEM_KEY; 1937 key.offset = device->devid; 1938 1939 btrfs_reserve_chunk_metadata(trans, false); 1940 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1941 btrfs_trans_release_chunk_metadata(trans); 1942 if (ret) { 1943 if (ret > 0) 1944 ret = -ENOENT; 1945 btrfs_abort_transaction(trans, ret); 1946 btrfs_end_transaction(trans); 1947 goto out; 1948 } 1949 1950 ret = btrfs_del_item(trans, root, path); 1951 if (ret) { 1952 btrfs_abort_transaction(trans, ret); 1953 btrfs_end_transaction(trans); 1954 } 1955 1956 out: 1957 btrfs_free_path(path); 1958 if (!ret) 1959 ret = btrfs_commit_transaction(trans); 1960 return ret; 1961 } 1962 1963 /* 1964 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1965 * filesystem. It's up to the caller to adjust that number regarding eg. device 1966 * replace. 1967 */ 1968 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1969 u64 num_devices) 1970 { 1971 u64 all_avail; 1972 unsigned seq; 1973 int i; 1974 1975 do { 1976 seq = read_seqbegin(&fs_info->profiles_lock); 1977 1978 all_avail = fs_info->avail_data_alloc_bits | 1979 fs_info->avail_system_alloc_bits | 1980 fs_info->avail_metadata_alloc_bits; 1981 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1982 1983 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1984 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1985 continue; 1986 1987 if (num_devices < btrfs_raid_array[i].devs_min) 1988 return btrfs_raid_array[i].mindev_error; 1989 } 1990 1991 return 0; 1992 } 1993 1994 static struct btrfs_device * btrfs_find_next_active_device( 1995 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1996 { 1997 struct btrfs_device *next_device; 1998 1999 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2000 if (next_device != device && 2001 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2002 && next_device->bdev) 2003 return next_device; 2004 } 2005 2006 return NULL; 2007 } 2008 2009 /* 2010 * Helper function to check if the given device is part of s_bdev / latest_dev 2011 * and replace it with the provided or the next active device, in the context 2012 * where this function called, there should be always be another device (or 2013 * this_dev) which is active. 2014 */ 2015 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2016 struct btrfs_device *next_device) 2017 { 2018 struct btrfs_fs_info *fs_info = device->fs_info; 2019 2020 if (!next_device) 2021 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2022 device); 2023 ASSERT(next_device); 2024 2025 if (fs_info->sb->s_bdev && 2026 (fs_info->sb->s_bdev == device->bdev)) 2027 fs_info->sb->s_bdev = next_device->bdev; 2028 2029 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2030 fs_info->fs_devices->latest_dev = next_device; 2031 } 2032 2033 /* 2034 * Return btrfs_fs_devices::num_devices excluding the device that's being 2035 * currently replaced. 2036 */ 2037 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2038 { 2039 u64 num_devices = fs_info->fs_devices->num_devices; 2040 2041 down_read(&fs_info->dev_replace.rwsem); 2042 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2043 ASSERT(num_devices > 1); 2044 num_devices--; 2045 } 2046 up_read(&fs_info->dev_replace.rwsem); 2047 2048 return num_devices; 2049 } 2050 2051 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2052 struct block_device *bdev, 2053 const char *device_path) 2054 { 2055 struct btrfs_super_block *disk_super; 2056 int copy_num; 2057 2058 if (!bdev) 2059 return; 2060 2061 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2062 struct page *page; 2063 int ret; 2064 2065 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2066 if (IS_ERR(disk_super)) 2067 continue; 2068 2069 if (bdev_is_zoned(bdev)) { 2070 btrfs_reset_sb_log_zones(bdev, copy_num); 2071 continue; 2072 } 2073 2074 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2075 2076 page = virt_to_page(disk_super); 2077 set_page_dirty(page); 2078 lock_page(page); 2079 /* write_on_page() unlocks the page */ 2080 ret = write_one_page(page); 2081 if (ret) 2082 btrfs_warn(fs_info, 2083 "error clearing superblock number %d (%d)", 2084 copy_num, ret); 2085 btrfs_release_disk_super(disk_super); 2086 2087 } 2088 2089 /* Notify udev that device has changed */ 2090 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2091 2092 /* Update ctime/mtime for device path for libblkid */ 2093 update_dev_time(device_path); 2094 } 2095 2096 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2097 struct btrfs_dev_lookup_args *args, 2098 struct block_device **bdev, fmode_t *mode) 2099 { 2100 struct btrfs_device *device; 2101 struct btrfs_fs_devices *cur_devices; 2102 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2103 u64 num_devices; 2104 int ret = 0; 2105 2106 /* 2107 * The device list in fs_devices is accessed without locks (neither 2108 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2109 * filesystem and another device rm cannot run. 2110 */ 2111 num_devices = btrfs_num_devices(fs_info); 2112 2113 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2114 if (ret) 2115 goto out; 2116 2117 device = btrfs_find_device(fs_info->fs_devices, args); 2118 if (!device) { 2119 if (args->missing) 2120 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2121 else 2122 ret = -ENOENT; 2123 goto out; 2124 } 2125 2126 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2127 btrfs_warn_in_rcu(fs_info, 2128 "cannot remove device %s (devid %llu) due to active swapfile", 2129 rcu_str_deref(device->name), device->devid); 2130 ret = -ETXTBSY; 2131 goto out; 2132 } 2133 2134 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2135 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2136 goto out; 2137 } 2138 2139 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2140 fs_info->fs_devices->rw_devices == 1) { 2141 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2142 goto out; 2143 } 2144 2145 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2146 mutex_lock(&fs_info->chunk_mutex); 2147 list_del_init(&device->dev_alloc_list); 2148 device->fs_devices->rw_devices--; 2149 mutex_unlock(&fs_info->chunk_mutex); 2150 } 2151 2152 ret = btrfs_shrink_device(device, 0); 2153 if (!ret) 2154 btrfs_reada_remove_dev(device); 2155 if (ret) 2156 goto error_undo; 2157 2158 /* 2159 * TODO: the superblock still includes this device in its num_devices 2160 * counter although write_all_supers() is not locked out. This 2161 * could give a filesystem state which requires a degraded mount. 2162 */ 2163 ret = btrfs_rm_dev_item(device); 2164 if (ret) 2165 goto error_undo; 2166 2167 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2168 btrfs_scrub_cancel_dev(device); 2169 2170 /* 2171 * the device list mutex makes sure that we don't change 2172 * the device list while someone else is writing out all 2173 * the device supers. Whoever is writing all supers, should 2174 * lock the device list mutex before getting the number of 2175 * devices in the super block (super_copy). Conversely, 2176 * whoever updates the number of devices in the super block 2177 * (super_copy) should hold the device list mutex. 2178 */ 2179 2180 /* 2181 * In normal cases the cur_devices == fs_devices. But in case 2182 * of deleting a seed device, the cur_devices should point to 2183 * its own fs_devices listed under the fs_devices->seed_list. 2184 */ 2185 cur_devices = device->fs_devices; 2186 mutex_lock(&fs_devices->device_list_mutex); 2187 list_del_rcu(&device->dev_list); 2188 2189 cur_devices->num_devices--; 2190 cur_devices->total_devices--; 2191 /* Update total_devices of the parent fs_devices if it's seed */ 2192 if (cur_devices != fs_devices) 2193 fs_devices->total_devices--; 2194 2195 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2196 cur_devices->missing_devices--; 2197 2198 btrfs_assign_next_active_device(device, NULL); 2199 2200 if (device->bdev) { 2201 cur_devices->open_devices--; 2202 /* remove sysfs entry */ 2203 btrfs_sysfs_remove_device(device); 2204 } 2205 2206 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2207 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2208 mutex_unlock(&fs_devices->device_list_mutex); 2209 2210 /* 2211 * At this point, the device is zero sized and detached from the 2212 * devices list. All that's left is to zero out the old supers and 2213 * free the device. 2214 * 2215 * We cannot call btrfs_close_bdev() here because we're holding the sb 2216 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2217 * block device and it's dependencies. Instead just flush the device 2218 * and let the caller do the final blkdev_put. 2219 */ 2220 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2221 btrfs_scratch_superblocks(fs_info, device->bdev, 2222 device->name->str); 2223 if (device->bdev) { 2224 sync_blockdev(device->bdev); 2225 invalidate_bdev(device->bdev); 2226 } 2227 } 2228 2229 *bdev = device->bdev; 2230 *mode = device->mode; 2231 synchronize_rcu(); 2232 btrfs_free_device(device); 2233 2234 /* 2235 * This can happen if cur_devices is the private seed devices list. We 2236 * cannot call close_fs_devices() here because it expects the uuid_mutex 2237 * to be held, but in fact we don't need that for the private 2238 * seed_devices, we can simply decrement cur_devices->opened and then 2239 * remove it from our list and free the fs_devices. 2240 */ 2241 if (cur_devices->num_devices == 0) { 2242 list_del_init(&cur_devices->seed_list); 2243 ASSERT(cur_devices->opened == 1); 2244 cur_devices->opened--; 2245 free_fs_devices(cur_devices); 2246 } 2247 2248 out: 2249 return ret; 2250 2251 error_undo: 2252 btrfs_reada_undo_remove_dev(device); 2253 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2254 mutex_lock(&fs_info->chunk_mutex); 2255 list_add(&device->dev_alloc_list, 2256 &fs_devices->alloc_list); 2257 device->fs_devices->rw_devices++; 2258 mutex_unlock(&fs_info->chunk_mutex); 2259 } 2260 goto out; 2261 } 2262 2263 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2264 { 2265 struct btrfs_fs_devices *fs_devices; 2266 2267 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2268 2269 /* 2270 * in case of fs with no seed, srcdev->fs_devices will point 2271 * to fs_devices of fs_info. However when the dev being replaced is 2272 * a seed dev it will point to the seed's local fs_devices. In short 2273 * srcdev will have its correct fs_devices in both the cases. 2274 */ 2275 fs_devices = srcdev->fs_devices; 2276 2277 list_del_rcu(&srcdev->dev_list); 2278 list_del(&srcdev->dev_alloc_list); 2279 fs_devices->num_devices--; 2280 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2281 fs_devices->missing_devices--; 2282 2283 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2284 fs_devices->rw_devices--; 2285 2286 if (srcdev->bdev) 2287 fs_devices->open_devices--; 2288 } 2289 2290 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2291 { 2292 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2293 2294 mutex_lock(&uuid_mutex); 2295 2296 btrfs_close_bdev(srcdev); 2297 synchronize_rcu(); 2298 btrfs_free_device(srcdev); 2299 2300 /* if this is no devs we rather delete the fs_devices */ 2301 if (!fs_devices->num_devices) { 2302 /* 2303 * On a mounted FS, num_devices can't be zero unless it's a 2304 * seed. In case of a seed device being replaced, the replace 2305 * target added to the sprout FS, so there will be no more 2306 * device left under the seed FS. 2307 */ 2308 ASSERT(fs_devices->seeding); 2309 2310 list_del_init(&fs_devices->seed_list); 2311 close_fs_devices(fs_devices); 2312 free_fs_devices(fs_devices); 2313 } 2314 mutex_unlock(&uuid_mutex); 2315 } 2316 2317 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2318 { 2319 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2320 2321 mutex_lock(&fs_devices->device_list_mutex); 2322 2323 btrfs_sysfs_remove_device(tgtdev); 2324 2325 if (tgtdev->bdev) 2326 fs_devices->open_devices--; 2327 2328 fs_devices->num_devices--; 2329 2330 btrfs_assign_next_active_device(tgtdev, NULL); 2331 2332 list_del_rcu(&tgtdev->dev_list); 2333 2334 mutex_unlock(&fs_devices->device_list_mutex); 2335 2336 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2337 tgtdev->name->str); 2338 2339 btrfs_close_bdev(tgtdev); 2340 synchronize_rcu(); 2341 btrfs_free_device(tgtdev); 2342 } 2343 2344 /** 2345 * Populate args from device at path 2346 * 2347 * @fs_info: the filesystem 2348 * @args: the args to populate 2349 * @path: the path to the device 2350 * 2351 * This will read the super block of the device at @path and populate @args with 2352 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2353 * lookup a device to operate on, but need to do it before we take any locks. 2354 * This properly handles the special case of "missing" that a user may pass in, 2355 * and does some basic sanity checks. The caller must make sure that @path is 2356 * properly NUL terminated before calling in, and must call 2357 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2358 * uuid buffers. 2359 * 2360 * Return: 0 for success, -errno for failure 2361 */ 2362 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2363 struct btrfs_dev_lookup_args *args, 2364 const char *path) 2365 { 2366 struct btrfs_super_block *disk_super; 2367 struct block_device *bdev; 2368 int ret; 2369 2370 if (!path || !path[0]) 2371 return -EINVAL; 2372 if (!strcmp(path, "missing")) { 2373 args->missing = true; 2374 return 0; 2375 } 2376 2377 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2378 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2379 if (!args->uuid || !args->fsid) { 2380 btrfs_put_dev_args_from_path(args); 2381 return -ENOMEM; 2382 } 2383 2384 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2385 &bdev, &disk_super); 2386 if (ret) 2387 return ret; 2388 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2389 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2390 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2391 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2392 else 2393 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2394 btrfs_release_disk_super(disk_super); 2395 blkdev_put(bdev, FMODE_READ); 2396 return 0; 2397 } 2398 2399 /* 2400 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2401 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2402 * that don't need to be freed. 2403 */ 2404 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2405 { 2406 kfree(args->uuid); 2407 kfree(args->fsid); 2408 args->uuid = NULL; 2409 args->fsid = NULL; 2410 } 2411 2412 struct btrfs_device *btrfs_find_device_by_devspec( 2413 struct btrfs_fs_info *fs_info, u64 devid, 2414 const char *device_path) 2415 { 2416 BTRFS_DEV_LOOKUP_ARGS(args); 2417 struct btrfs_device *device; 2418 int ret; 2419 2420 if (devid) { 2421 args.devid = devid; 2422 device = btrfs_find_device(fs_info->fs_devices, &args); 2423 if (!device) 2424 return ERR_PTR(-ENOENT); 2425 return device; 2426 } 2427 2428 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2429 if (ret) 2430 return ERR_PTR(ret); 2431 device = btrfs_find_device(fs_info->fs_devices, &args); 2432 btrfs_put_dev_args_from_path(&args); 2433 if (!device) 2434 return ERR_PTR(-ENOENT); 2435 return device; 2436 } 2437 2438 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2439 { 2440 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2441 struct btrfs_fs_devices *old_devices; 2442 struct btrfs_fs_devices *seed_devices; 2443 2444 lockdep_assert_held(&uuid_mutex); 2445 if (!fs_devices->seeding) 2446 return ERR_PTR(-EINVAL); 2447 2448 /* 2449 * Private copy of the seed devices, anchored at 2450 * fs_info->fs_devices->seed_list 2451 */ 2452 seed_devices = alloc_fs_devices(NULL, NULL); 2453 if (IS_ERR(seed_devices)) 2454 return seed_devices; 2455 2456 /* 2457 * It's necessary to retain a copy of the original seed fs_devices in 2458 * fs_uuids so that filesystems which have been seeded can successfully 2459 * reference the seed device from open_seed_devices. This also supports 2460 * multiple fs seed. 2461 */ 2462 old_devices = clone_fs_devices(fs_devices); 2463 if (IS_ERR(old_devices)) { 2464 kfree(seed_devices); 2465 return old_devices; 2466 } 2467 2468 list_add(&old_devices->fs_list, &fs_uuids); 2469 2470 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2471 seed_devices->opened = 1; 2472 INIT_LIST_HEAD(&seed_devices->devices); 2473 INIT_LIST_HEAD(&seed_devices->alloc_list); 2474 mutex_init(&seed_devices->device_list_mutex); 2475 2476 return seed_devices; 2477 } 2478 2479 /* 2480 * Splice seed devices into the sprout fs_devices. 2481 * Generate a new fsid for the sprouted read-write filesystem. 2482 */ 2483 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2484 struct btrfs_fs_devices *seed_devices) 2485 { 2486 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2487 struct btrfs_super_block *disk_super = fs_info->super_copy; 2488 struct btrfs_device *device; 2489 u64 super_flags; 2490 2491 /* 2492 * We are updating the fsid, the thread leading to device_list_add() 2493 * could race, so uuid_mutex is needed. 2494 */ 2495 lockdep_assert_held(&uuid_mutex); 2496 2497 /* 2498 * The threads listed below may traverse dev_list but can do that without 2499 * device_list_mutex: 2500 * - All device ops and balance - as we are in btrfs_exclop_start. 2501 * - Various dev_list readers - are using RCU. 2502 * - btrfs_ioctl_fitrim() - is using RCU. 2503 * 2504 * For-read threads as below are using device_list_mutex: 2505 * - Readonly scrub btrfs_scrub_dev() 2506 * - Readonly scrub btrfs_scrub_progress() 2507 * - btrfs_get_dev_stats() 2508 */ 2509 lockdep_assert_held(&fs_devices->device_list_mutex); 2510 2511 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2512 synchronize_rcu); 2513 list_for_each_entry(device, &seed_devices->devices, dev_list) 2514 device->fs_devices = seed_devices; 2515 2516 fs_devices->seeding = false; 2517 fs_devices->num_devices = 0; 2518 fs_devices->open_devices = 0; 2519 fs_devices->missing_devices = 0; 2520 fs_devices->rotating = false; 2521 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2522 2523 generate_random_uuid(fs_devices->fsid); 2524 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2525 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2526 2527 super_flags = btrfs_super_flags(disk_super) & 2528 ~BTRFS_SUPER_FLAG_SEEDING; 2529 btrfs_set_super_flags(disk_super, super_flags); 2530 } 2531 2532 /* 2533 * Store the expected generation for seed devices in device items. 2534 */ 2535 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2536 { 2537 BTRFS_DEV_LOOKUP_ARGS(args); 2538 struct btrfs_fs_info *fs_info = trans->fs_info; 2539 struct btrfs_root *root = fs_info->chunk_root; 2540 struct btrfs_path *path; 2541 struct extent_buffer *leaf; 2542 struct btrfs_dev_item *dev_item; 2543 struct btrfs_device *device; 2544 struct btrfs_key key; 2545 u8 fs_uuid[BTRFS_FSID_SIZE]; 2546 u8 dev_uuid[BTRFS_UUID_SIZE]; 2547 int ret; 2548 2549 path = btrfs_alloc_path(); 2550 if (!path) 2551 return -ENOMEM; 2552 2553 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2554 key.offset = 0; 2555 key.type = BTRFS_DEV_ITEM_KEY; 2556 2557 while (1) { 2558 btrfs_reserve_chunk_metadata(trans, false); 2559 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2560 btrfs_trans_release_chunk_metadata(trans); 2561 if (ret < 0) 2562 goto error; 2563 2564 leaf = path->nodes[0]; 2565 next_slot: 2566 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2567 ret = btrfs_next_leaf(root, path); 2568 if (ret > 0) 2569 break; 2570 if (ret < 0) 2571 goto error; 2572 leaf = path->nodes[0]; 2573 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2574 btrfs_release_path(path); 2575 continue; 2576 } 2577 2578 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2579 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2580 key.type != BTRFS_DEV_ITEM_KEY) 2581 break; 2582 2583 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2584 struct btrfs_dev_item); 2585 args.devid = btrfs_device_id(leaf, dev_item); 2586 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2587 BTRFS_UUID_SIZE); 2588 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2589 BTRFS_FSID_SIZE); 2590 args.uuid = dev_uuid; 2591 args.fsid = fs_uuid; 2592 device = btrfs_find_device(fs_info->fs_devices, &args); 2593 BUG_ON(!device); /* Logic error */ 2594 2595 if (device->fs_devices->seeding) { 2596 btrfs_set_device_generation(leaf, dev_item, 2597 device->generation); 2598 btrfs_mark_buffer_dirty(leaf); 2599 } 2600 2601 path->slots[0]++; 2602 goto next_slot; 2603 } 2604 ret = 0; 2605 error: 2606 btrfs_free_path(path); 2607 return ret; 2608 } 2609 2610 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2611 { 2612 struct btrfs_root *root = fs_info->dev_root; 2613 struct request_queue *q; 2614 struct btrfs_trans_handle *trans; 2615 struct btrfs_device *device; 2616 struct block_device *bdev; 2617 struct super_block *sb = fs_info->sb; 2618 struct rcu_string *name; 2619 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2620 struct btrfs_fs_devices *seed_devices; 2621 u64 orig_super_total_bytes; 2622 u64 orig_super_num_devices; 2623 int ret = 0; 2624 bool seeding_dev = false; 2625 bool locked = false; 2626 2627 if (sb_rdonly(sb) && !fs_devices->seeding) 2628 return -EROFS; 2629 2630 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2631 fs_info->bdev_holder); 2632 if (IS_ERR(bdev)) 2633 return PTR_ERR(bdev); 2634 2635 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2636 ret = -EINVAL; 2637 goto error; 2638 } 2639 2640 if (fs_devices->seeding) { 2641 seeding_dev = true; 2642 down_write(&sb->s_umount); 2643 mutex_lock(&uuid_mutex); 2644 locked = true; 2645 } 2646 2647 sync_blockdev(bdev); 2648 2649 rcu_read_lock(); 2650 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2651 if (device->bdev == bdev) { 2652 ret = -EEXIST; 2653 rcu_read_unlock(); 2654 goto error; 2655 } 2656 } 2657 rcu_read_unlock(); 2658 2659 device = btrfs_alloc_device(fs_info, NULL, NULL); 2660 if (IS_ERR(device)) { 2661 /* we can safely leave the fs_devices entry around */ 2662 ret = PTR_ERR(device); 2663 goto error; 2664 } 2665 2666 name = rcu_string_strdup(device_path, GFP_KERNEL); 2667 if (!name) { 2668 ret = -ENOMEM; 2669 goto error_free_device; 2670 } 2671 rcu_assign_pointer(device->name, name); 2672 2673 device->fs_info = fs_info; 2674 device->bdev = bdev; 2675 2676 ret = btrfs_get_dev_zone_info(device, false); 2677 if (ret) 2678 goto error_free_device; 2679 2680 trans = btrfs_start_transaction(root, 0); 2681 if (IS_ERR(trans)) { 2682 ret = PTR_ERR(trans); 2683 goto error_free_zone; 2684 } 2685 2686 q = bdev_get_queue(bdev); 2687 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2688 device->generation = trans->transid; 2689 device->io_width = fs_info->sectorsize; 2690 device->io_align = fs_info->sectorsize; 2691 device->sector_size = fs_info->sectorsize; 2692 device->total_bytes = 2693 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2694 device->disk_total_bytes = device->total_bytes; 2695 device->commit_total_bytes = device->total_bytes; 2696 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2697 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2698 device->mode = FMODE_EXCL; 2699 device->dev_stats_valid = 1; 2700 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2701 2702 if (seeding_dev) { 2703 btrfs_clear_sb_rdonly(sb); 2704 2705 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2706 seed_devices = btrfs_init_sprout(fs_info); 2707 if (IS_ERR(seed_devices)) { 2708 ret = PTR_ERR(seed_devices); 2709 btrfs_abort_transaction(trans, ret); 2710 goto error_trans; 2711 } 2712 } 2713 2714 mutex_lock(&fs_devices->device_list_mutex); 2715 if (seeding_dev) { 2716 btrfs_setup_sprout(fs_info, seed_devices); 2717 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2718 device); 2719 } 2720 2721 device->fs_devices = fs_devices; 2722 2723 mutex_lock(&fs_info->chunk_mutex); 2724 list_add_rcu(&device->dev_list, &fs_devices->devices); 2725 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2726 fs_devices->num_devices++; 2727 fs_devices->open_devices++; 2728 fs_devices->rw_devices++; 2729 fs_devices->total_devices++; 2730 fs_devices->total_rw_bytes += device->total_bytes; 2731 2732 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2733 2734 if (!blk_queue_nonrot(q)) 2735 fs_devices->rotating = true; 2736 2737 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2738 btrfs_set_super_total_bytes(fs_info->super_copy, 2739 round_down(orig_super_total_bytes + device->total_bytes, 2740 fs_info->sectorsize)); 2741 2742 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2743 btrfs_set_super_num_devices(fs_info->super_copy, 2744 orig_super_num_devices + 1); 2745 2746 /* 2747 * we've got more storage, clear any full flags on the space 2748 * infos 2749 */ 2750 btrfs_clear_space_info_full(fs_info); 2751 2752 mutex_unlock(&fs_info->chunk_mutex); 2753 2754 /* Add sysfs device entry */ 2755 btrfs_sysfs_add_device(device); 2756 2757 mutex_unlock(&fs_devices->device_list_mutex); 2758 2759 if (seeding_dev) { 2760 mutex_lock(&fs_info->chunk_mutex); 2761 ret = init_first_rw_device(trans); 2762 mutex_unlock(&fs_info->chunk_mutex); 2763 if (ret) { 2764 btrfs_abort_transaction(trans, ret); 2765 goto error_sysfs; 2766 } 2767 } 2768 2769 ret = btrfs_add_dev_item(trans, device); 2770 if (ret) { 2771 btrfs_abort_transaction(trans, ret); 2772 goto error_sysfs; 2773 } 2774 2775 if (seeding_dev) { 2776 ret = btrfs_finish_sprout(trans); 2777 if (ret) { 2778 btrfs_abort_transaction(trans, ret); 2779 goto error_sysfs; 2780 } 2781 2782 /* 2783 * fs_devices now represents the newly sprouted filesystem and 2784 * its fsid has been changed by btrfs_sprout_splice(). 2785 */ 2786 btrfs_sysfs_update_sprout_fsid(fs_devices); 2787 } 2788 2789 ret = btrfs_commit_transaction(trans); 2790 2791 if (seeding_dev) { 2792 mutex_unlock(&uuid_mutex); 2793 up_write(&sb->s_umount); 2794 locked = false; 2795 2796 if (ret) /* transaction commit */ 2797 return ret; 2798 2799 ret = btrfs_relocate_sys_chunks(fs_info); 2800 if (ret < 0) 2801 btrfs_handle_fs_error(fs_info, ret, 2802 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2803 trans = btrfs_attach_transaction(root); 2804 if (IS_ERR(trans)) { 2805 if (PTR_ERR(trans) == -ENOENT) 2806 return 0; 2807 ret = PTR_ERR(trans); 2808 trans = NULL; 2809 goto error_sysfs; 2810 } 2811 ret = btrfs_commit_transaction(trans); 2812 } 2813 2814 /* 2815 * Now that we have written a new super block to this device, check all 2816 * other fs_devices list if device_path alienates any other scanned 2817 * device. 2818 * We can ignore the return value as it typically returns -EINVAL and 2819 * only succeeds if the device was an alien. 2820 */ 2821 btrfs_forget_devices(device_path); 2822 2823 /* Update ctime/mtime for blkid or udev */ 2824 update_dev_time(device_path); 2825 2826 return ret; 2827 2828 error_sysfs: 2829 btrfs_sysfs_remove_device(device); 2830 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2831 mutex_lock(&fs_info->chunk_mutex); 2832 list_del_rcu(&device->dev_list); 2833 list_del(&device->dev_alloc_list); 2834 fs_info->fs_devices->num_devices--; 2835 fs_info->fs_devices->open_devices--; 2836 fs_info->fs_devices->rw_devices--; 2837 fs_info->fs_devices->total_devices--; 2838 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2839 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2840 btrfs_set_super_total_bytes(fs_info->super_copy, 2841 orig_super_total_bytes); 2842 btrfs_set_super_num_devices(fs_info->super_copy, 2843 orig_super_num_devices); 2844 mutex_unlock(&fs_info->chunk_mutex); 2845 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2846 error_trans: 2847 if (seeding_dev) 2848 btrfs_set_sb_rdonly(sb); 2849 if (trans) 2850 btrfs_end_transaction(trans); 2851 error_free_zone: 2852 btrfs_destroy_dev_zone_info(device); 2853 error_free_device: 2854 btrfs_free_device(device); 2855 error: 2856 blkdev_put(bdev, FMODE_EXCL); 2857 if (locked) { 2858 mutex_unlock(&uuid_mutex); 2859 up_write(&sb->s_umount); 2860 } 2861 return ret; 2862 } 2863 2864 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2865 struct btrfs_device *device) 2866 { 2867 int ret; 2868 struct btrfs_path *path; 2869 struct btrfs_root *root = device->fs_info->chunk_root; 2870 struct btrfs_dev_item *dev_item; 2871 struct extent_buffer *leaf; 2872 struct btrfs_key key; 2873 2874 path = btrfs_alloc_path(); 2875 if (!path) 2876 return -ENOMEM; 2877 2878 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2879 key.type = BTRFS_DEV_ITEM_KEY; 2880 key.offset = device->devid; 2881 2882 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2883 if (ret < 0) 2884 goto out; 2885 2886 if (ret > 0) { 2887 ret = -ENOENT; 2888 goto out; 2889 } 2890 2891 leaf = path->nodes[0]; 2892 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2893 2894 btrfs_set_device_id(leaf, dev_item, device->devid); 2895 btrfs_set_device_type(leaf, dev_item, device->type); 2896 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2897 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2898 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2899 btrfs_set_device_total_bytes(leaf, dev_item, 2900 btrfs_device_get_disk_total_bytes(device)); 2901 btrfs_set_device_bytes_used(leaf, dev_item, 2902 btrfs_device_get_bytes_used(device)); 2903 btrfs_mark_buffer_dirty(leaf); 2904 2905 out: 2906 btrfs_free_path(path); 2907 return ret; 2908 } 2909 2910 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2911 struct btrfs_device *device, u64 new_size) 2912 { 2913 struct btrfs_fs_info *fs_info = device->fs_info; 2914 struct btrfs_super_block *super_copy = fs_info->super_copy; 2915 u64 old_total; 2916 u64 diff; 2917 int ret; 2918 2919 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2920 return -EACCES; 2921 2922 new_size = round_down(new_size, fs_info->sectorsize); 2923 2924 mutex_lock(&fs_info->chunk_mutex); 2925 old_total = btrfs_super_total_bytes(super_copy); 2926 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2927 2928 if (new_size <= device->total_bytes || 2929 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2930 mutex_unlock(&fs_info->chunk_mutex); 2931 return -EINVAL; 2932 } 2933 2934 btrfs_set_super_total_bytes(super_copy, 2935 round_down(old_total + diff, fs_info->sectorsize)); 2936 device->fs_devices->total_rw_bytes += diff; 2937 2938 btrfs_device_set_total_bytes(device, new_size); 2939 btrfs_device_set_disk_total_bytes(device, new_size); 2940 btrfs_clear_space_info_full(device->fs_info); 2941 if (list_empty(&device->post_commit_list)) 2942 list_add_tail(&device->post_commit_list, 2943 &trans->transaction->dev_update_list); 2944 mutex_unlock(&fs_info->chunk_mutex); 2945 2946 btrfs_reserve_chunk_metadata(trans, false); 2947 ret = btrfs_update_device(trans, device); 2948 btrfs_trans_release_chunk_metadata(trans); 2949 2950 return ret; 2951 } 2952 2953 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2954 { 2955 struct btrfs_fs_info *fs_info = trans->fs_info; 2956 struct btrfs_root *root = fs_info->chunk_root; 2957 int ret; 2958 struct btrfs_path *path; 2959 struct btrfs_key key; 2960 2961 path = btrfs_alloc_path(); 2962 if (!path) 2963 return -ENOMEM; 2964 2965 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2966 key.offset = chunk_offset; 2967 key.type = BTRFS_CHUNK_ITEM_KEY; 2968 2969 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2970 if (ret < 0) 2971 goto out; 2972 else if (ret > 0) { /* Logic error or corruption */ 2973 btrfs_handle_fs_error(fs_info, -ENOENT, 2974 "Failed lookup while freeing chunk."); 2975 ret = -ENOENT; 2976 goto out; 2977 } 2978 2979 ret = btrfs_del_item(trans, root, path); 2980 if (ret < 0) 2981 btrfs_handle_fs_error(fs_info, ret, 2982 "Failed to delete chunk item."); 2983 out: 2984 btrfs_free_path(path); 2985 return ret; 2986 } 2987 2988 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2989 { 2990 struct btrfs_super_block *super_copy = fs_info->super_copy; 2991 struct btrfs_disk_key *disk_key; 2992 struct btrfs_chunk *chunk; 2993 u8 *ptr; 2994 int ret = 0; 2995 u32 num_stripes; 2996 u32 array_size; 2997 u32 len = 0; 2998 u32 cur; 2999 struct btrfs_key key; 3000 3001 lockdep_assert_held(&fs_info->chunk_mutex); 3002 array_size = btrfs_super_sys_array_size(super_copy); 3003 3004 ptr = super_copy->sys_chunk_array; 3005 cur = 0; 3006 3007 while (cur < array_size) { 3008 disk_key = (struct btrfs_disk_key *)ptr; 3009 btrfs_disk_key_to_cpu(&key, disk_key); 3010 3011 len = sizeof(*disk_key); 3012 3013 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3014 chunk = (struct btrfs_chunk *)(ptr + len); 3015 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3016 len += btrfs_chunk_item_size(num_stripes); 3017 } else { 3018 ret = -EIO; 3019 break; 3020 } 3021 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3022 key.offset == chunk_offset) { 3023 memmove(ptr, ptr + len, array_size - (cur + len)); 3024 array_size -= len; 3025 btrfs_set_super_sys_array_size(super_copy, array_size); 3026 } else { 3027 ptr += len; 3028 cur += len; 3029 } 3030 } 3031 return ret; 3032 } 3033 3034 /* 3035 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3036 * @logical: Logical block offset in bytes. 3037 * @length: Length of extent in bytes. 3038 * 3039 * Return: Chunk mapping or ERR_PTR. 3040 */ 3041 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3042 u64 logical, u64 length) 3043 { 3044 struct extent_map_tree *em_tree; 3045 struct extent_map *em; 3046 3047 em_tree = &fs_info->mapping_tree; 3048 read_lock(&em_tree->lock); 3049 em = lookup_extent_mapping(em_tree, logical, length); 3050 read_unlock(&em_tree->lock); 3051 3052 if (!em) { 3053 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3054 logical, length); 3055 return ERR_PTR(-EINVAL); 3056 } 3057 3058 if (em->start > logical || em->start + em->len < logical) { 3059 btrfs_crit(fs_info, 3060 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3061 logical, length, em->start, em->start + em->len); 3062 free_extent_map(em); 3063 return ERR_PTR(-EINVAL); 3064 } 3065 3066 /* callers are responsible for dropping em's ref. */ 3067 return em; 3068 } 3069 3070 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3071 struct map_lookup *map, u64 chunk_offset) 3072 { 3073 int i; 3074 3075 /* 3076 * Removing chunk items and updating the device items in the chunks btree 3077 * requires holding the chunk_mutex. 3078 * See the comment at btrfs_chunk_alloc() for the details. 3079 */ 3080 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3081 3082 for (i = 0; i < map->num_stripes; i++) { 3083 int ret; 3084 3085 ret = btrfs_update_device(trans, map->stripes[i].dev); 3086 if (ret) 3087 return ret; 3088 } 3089 3090 return btrfs_free_chunk(trans, chunk_offset); 3091 } 3092 3093 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3094 { 3095 struct btrfs_fs_info *fs_info = trans->fs_info; 3096 struct extent_map *em; 3097 struct map_lookup *map; 3098 u64 dev_extent_len = 0; 3099 int i, ret = 0; 3100 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3101 3102 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3103 if (IS_ERR(em)) { 3104 /* 3105 * This is a logic error, but we don't want to just rely on the 3106 * user having built with ASSERT enabled, so if ASSERT doesn't 3107 * do anything we still error out. 3108 */ 3109 ASSERT(0); 3110 return PTR_ERR(em); 3111 } 3112 map = em->map_lookup; 3113 3114 /* 3115 * First delete the device extent items from the devices btree. 3116 * We take the device_list_mutex to avoid racing with the finishing phase 3117 * of a device replace operation. See the comment below before acquiring 3118 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3119 * because that can result in a deadlock when deleting the device extent 3120 * items from the devices btree - COWing an extent buffer from the btree 3121 * may result in allocating a new metadata chunk, which would attempt to 3122 * lock again fs_info->chunk_mutex. 3123 */ 3124 mutex_lock(&fs_devices->device_list_mutex); 3125 for (i = 0; i < map->num_stripes; i++) { 3126 struct btrfs_device *device = map->stripes[i].dev; 3127 ret = btrfs_free_dev_extent(trans, device, 3128 map->stripes[i].physical, 3129 &dev_extent_len); 3130 if (ret) { 3131 mutex_unlock(&fs_devices->device_list_mutex); 3132 btrfs_abort_transaction(trans, ret); 3133 goto out; 3134 } 3135 3136 if (device->bytes_used > 0) { 3137 mutex_lock(&fs_info->chunk_mutex); 3138 btrfs_device_set_bytes_used(device, 3139 device->bytes_used - dev_extent_len); 3140 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3141 btrfs_clear_space_info_full(fs_info); 3142 mutex_unlock(&fs_info->chunk_mutex); 3143 } 3144 } 3145 mutex_unlock(&fs_devices->device_list_mutex); 3146 3147 /* 3148 * We acquire fs_info->chunk_mutex for 2 reasons: 3149 * 3150 * 1) Just like with the first phase of the chunk allocation, we must 3151 * reserve system space, do all chunk btree updates and deletions, and 3152 * update the system chunk array in the superblock while holding this 3153 * mutex. This is for similar reasons as explained on the comment at 3154 * the top of btrfs_chunk_alloc(); 3155 * 3156 * 2) Prevent races with the final phase of a device replace operation 3157 * that replaces the device object associated with the map's stripes, 3158 * because the device object's id can change at any time during that 3159 * final phase of the device replace operation 3160 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3161 * replaced device and then see it with an ID of 3162 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3163 * the device item, which does not exists on the chunk btree. 3164 * The finishing phase of device replace acquires both the 3165 * device_list_mutex and the chunk_mutex, in that order, so we are 3166 * safe by just acquiring the chunk_mutex. 3167 */ 3168 trans->removing_chunk = true; 3169 mutex_lock(&fs_info->chunk_mutex); 3170 3171 check_system_chunk(trans, map->type); 3172 3173 ret = remove_chunk_item(trans, map, chunk_offset); 3174 /* 3175 * Normally we should not get -ENOSPC since we reserved space before 3176 * through the call to check_system_chunk(). 3177 * 3178 * Despite our system space_info having enough free space, we may not 3179 * be able to allocate extents from its block groups, because all have 3180 * an incompatible profile, which will force us to allocate a new system 3181 * block group with the right profile, or right after we called 3182 * check_system_space() above, a scrub turned the only system block group 3183 * with enough free space into RO mode. 3184 * This is explained with more detail at do_chunk_alloc(). 3185 * 3186 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3187 */ 3188 if (ret == -ENOSPC) { 3189 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3190 struct btrfs_block_group *sys_bg; 3191 3192 sys_bg = btrfs_create_chunk(trans, sys_flags); 3193 if (IS_ERR(sys_bg)) { 3194 ret = PTR_ERR(sys_bg); 3195 btrfs_abort_transaction(trans, ret); 3196 goto out; 3197 } 3198 3199 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3200 if (ret) { 3201 btrfs_abort_transaction(trans, ret); 3202 goto out; 3203 } 3204 3205 ret = remove_chunk_item(trans, map, chunk_offset); 3206 if (ret) { 3207 btrfs_abort_transaction(trans, ret); 3208 goto out; 3209 } 3210 } else if (ret) { 3211 btrfs_abort_transaction(trans, ret); 3212 goto out; 3213 } 3214 3215 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3216 3217 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3218 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3219 if (ret) { 3220 btrfs_abort_transaction(trans, ret); 3221 goto out; 3222 } 3223 } 3224 3225 mutex_unlock(&fs_info->chunk_mutex); 3226 trans->removing_chunk = false; 3227 3228 /* 3229 * We are done with chunk btree updates and deletions, so release the 3230 * system space we previously reserved (with check_system_chunk()). 3231 */ 3232 btrfs_trans_release_chunk_metadata(trans); 3233 3234 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3235 if (ret) { 3236 btrfs_abort_transaction(trans, ret); 3237 goto out; 3238 } 3239 3240 out: 3241 if (trans->removing_chunk) { 3242 mutex_unlock(&fs_info->chunk_mutex); 3243 trans->removing_chunk = false; 3244 } 3245 /* once for us */ 3246 free_extent_map(em); 3247 return ret; 3248 } 3249 3250 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3251 { 3252 struct btrfs_root *root = fs_info->chunk_root; 3253 struct btrfs_trans_handle *trans; 3254 struct btrfs_block_group *block_group; 3255 u64 length; 3256 int ret; 3257 3258 /* 3259 * Prevent races with automatic removal of unused block groups. 3260 * After we relocate and before we remove the chunk with offset 3261 * chunk_offset, automatic removal of the block group can kick in, 3262 * resulting in a failure when calling btrfs_remove_chunk() below. 3263 * 3264 * Make sure to acquire this mutex before doing a tree search (dev 3265 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3266 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3267 * we release the path used to search the chunk/dev tree and before 3268 * the current task acquires this mutex and calls us. 3269 */ 3270 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3271 3272 /* step one, relocate all the extents inside this chunk */ 3273 btrfs_scrub_pause(fs_info); 3274 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3275 btrfs_scrub_continue(fs_info); 3276 if (ret) 3277 return ret; 3278 3279 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3280 if (!block_group) 3281 return -ENOENT; 3282 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3283 length = block_group->length; 3284 btrfs_put_block_group(block_group); 3285 3286 /* 3287 * On a zoned file system, discard the whole block group, this will 3288 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3289 * resetting the zone fails, don't treat it as a fatal problem from the 3290 * filesystem's point of view. 3291 */ 3292 if (btrfs_is_zoned(fs_info)) { 3293 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3294 if (ret) 3295 btrfs_info(fs_info, 3296 "failed to reset zone %llu after relocation", 3297 chunk_offset); 3298 } 3299 3300 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3301 chunk_offset); 3302 if (IS_ERR(trans)) { 3303 ret = PTR_ERR(trans); 3304 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3305 return ret; 3306 } 3307 3308 /* 3309 * step two, delete the device extents and the 3310 * chunk tree entries 3311 */ 3312 ret = btrfs_remove_chunk(trans, chunk_offset); 3313 btrfs_end_transaction(trans); 3314 return ret; 3315 } 3316 3317 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3318 { 3319 struct btrfs_root *chunk_root = fs_info->chunk_root; 3320 struct btrfs_path *path; 3321 struct extent_buffer *leaf; 3322 struct btrfs_chunk *chunk; 3323 struct btrfs_key key; 3324 struct btrfs_key found_key; 3325 u64 chunk_type; 3326 bool retried = false; 3327 int failed = 0; 3328 int ret; 3329 3330 path = btrfs_alloc_path(); 3331 if (!path) 3332 return -ENOMEM; 3333 3334 again: 3335 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3336 key.offset = (u64)-1; 3337 key.type = BTRFS_CHUNK_ITEM_KEY; 3338 3339 while (1) { 3340 mutex_lock(&fs_info->reclaim_bgs_lock); 3341 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3342 if (ret < 0) { 3343 mutex_unlock(&fs_info->reclaim_bgs_lock); 3344 goto error; 3345 } 3346 BUG_ON(ret == 0); /* Corruption */ 3347 3348 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3349 key.type); 3350 if (ret) 3351 mutex_unlock(&fs_info->reclaim_bgs_lock); 3352 if (ret < 0) 3353 goto error; 3354 if (ret > 0) 3355 break; 3356 3357 leaf = path->nodes[0]; 3358 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3359 3360 chunk = btrfs_item_ptr(leaf, path->slots[0], 3361 struct btrfs_chunk); 3362 chunk_type = btrfs_chunk_type(leaf, chunk); 3363 btrfs_release_path(path); 3364 3365 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3366 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3367 if (ret == -ENOSPC) 3368 failed++; 3369 else 3370 BUG_ON(ret); 3371 } 3372 mutex_unlock(&fs_info->reclaim_bgs_lock); 3373 3374 if (found_key.offset == 0) 3375 break; 3376 key.offset = found_key.offset - 1; 3377 } 3378 ret = 0; 3379 if (failed && !retried) { 3380 failed = 0; 3381 retried = true; 3382 goto again; 3383 } else if (WARN_ON(failed && retried)) { 3384 ret = -ENOSPC; 3385 } 3386 error: 3387 btrfs_free_path(path); 3388 return ret; 3389 } 3390 3391 /* 3392 * return 1 : allocate a data chunk successfully, 3393 * return <0: errors during allocating a data chunk, 3394 * return 0 : no need to allocate a data chunk. 3395 */ 3396 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3397 u64 chunk_offset) 3398 { 3399 struct btrfs_block_group *cache; 3400 u64 bytes_used; 3401 u64 chunk_type; 3402 3403 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3404 ASSERT(cache); 3405 chunk_type = cache->flags; 3406 btrfs_put_block_group(cache); 3407 3408 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3409 return 0; 3410 3411 spin_lock(&fs_info->data_sinfo->lock); 3412 bytes_used = fs_info->data_sinfo->bytes_used; 3413 spin_unlock(&fs_info->data_sinfo->lock); 3414 3415 if (!bytes_used) { 3416 struct btrfs_trans_handle *trans; 3417 int ret; 3418 3419 trans = btrfs_join_transaction(fs_info->tree_root); 3420 if (IS_ERR(trans)) 3421 return PTR_ERR(trans); 3422 3423 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3424 btrfs_end_transaction(trans); 3425 if (ret < 0) 3426 return ret; 3427 return 1; 3428 } 3429 3430 return 0; 3431 } 3432 3433 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3434 struct btrfs_balance_control *bctl) 3435 { 3436 struct btrfs_root *root = fs_info->tree_root; 3437 struct btrfs_trans_handle *trans; 3438 struct btrfs_balance_item *item; 3439 struct btrfs_disk_balance_args disk_bargs; 3440 struct btrfs_path *path; 3441 struct extent_buffer *leaf; 3442 struct btrfs_key key; 3443 int ret, err; 3444 3445 path = btrfs_alloc_path(); 3446 if (!path) 3447 return -ENOMEM; 3448 3449 trans = btrfs_start_transaction(root, 0); 3450 if (IS_ERR(trans)) { 3451 btrfs_free_path(path); 3452 return PTR_ERR(trans); 3453 } 3454 3455 key.objectid = BTRFS_BALANCE_OBJECTID; 3456 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3457 key.offset = 0; 3458 3459 ret = btrfs_insert_empty_item(trans, root, path, &key, 3460 sizeof(*item)); 3461 if (ret) 3462 goto out; 3463 3464 leaf = path->nodes[0]; 3465 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3466 3467 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3468 3469 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3470 btrfs_set_balance_data(leaf, item, &disk_bargs); 3471 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3472 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3473 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3474 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3475 3476 btrfs_set_balance_flags(leaf, item, bctl->flags); 3477 3478 btrfs_mark_buffer_dirty(leaf); 3479 out: 3480 btrfs_free_path(path); 3481 err = btrfs_commit_transaction(trans); 3482 if (err && !ret) 3483 ret = err; 3484 return ret; 3485 } 3486 3487 static int del_balance_item(struct btrfs_fs_info *fs_info) 3488 { 3489 struct btrfs_root *root = fs_info->tree_root; 3490 struct btrfs_trans_handle *trans; 3491 struct btrfs_path *path; 3492 struct btrfs_key key; 3493 int ret, err; 3494 3495 path = btrfs_alloc_path(); 3496 if (!path) 3497 return -ENOMEM; 3498 3499 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3500 if (IS_ERR(trans)) { 3501 btrfs_free_path(path); 3502 return PTR_ERR(trans); 3503 } 3504 3505 key.objectid = BTRFS_BALANCE_OBJECTID; 3506 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3507 key.offset = 0; 3508 3509 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3510 if (ret < 0) 3511 goto out; 3512 if (ret > 0) { 3513 ret = -ENOENT; 3514 goto out; 3515 } 3516 3517 ret = btrfs_del_item(trans, root, path); 3518 out: 3519 btrfs_free_path(path); 3520 err = btrfs_commit_transaction(trans); 3521 if (err && !ret) 3522 ret = err; 3523 return ret; 3524 } 3525 3526 /* 3527 * This is a heuristic used to reduce the number of chunks balanced on 3528 * resume after balance was interrupted. 3529 */ 3530 static void update_balance_args(struct btrfs_balance_control *bctl) 3531 { 3532 /* 3533 * Turn on soft mode for chunk types that were being converted. 3534 */ 3535 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3536 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3537 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3538 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3539 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3540 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3541 3542 /* 3543 * Turn on usage filter if is not already used. The idea is 3544 * that chunks that we have already balanced should be 3545 * reasonably full. Don't do it for chunks that are being 3546 * converted - that will keep us from relocating unconverted 3547 * (albeit full) chunks. 3548 */ 3549 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3550 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3551 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3552 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3553 bctl->data.usage = 90; 3554 } 3555 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3556 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3557 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3558 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3559 bctl->sys.usage = 90; 3560 } 3561 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3562 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3563 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3564 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3565 bctl->meta.usage = 90; 3566 } 3567 } 3568 3569 /* 3570 * Clear the balance status in fs_info and delete the balance item from disk. 3571 */ 3572 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3573 { 3574 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3575 int ret; 3576 3577 BUG_ON(!fs_info->balance_ctl); 3578 3579 spin_lock(&fs_info->balance_lock); 3580 fs_info->balance_ctl = NULL; 3581 spin_unlock(&fs_info->balance_lock); 3582 3583 kfree(bctl); 3584 ret = del_balance_item(fs_info); 3585 if (ret) 3586 btrfs_handle_fs_error(fs_info, ret, NULL); 3587 } 3588 3589 /* 3590 * Balance filters. Return 1 if chunk should be filtered out 3591 * (should not be balanced). 3592 */ 3593 static int chunk_profiles_filter(u64 chunk_type, 3594 struct btrfs_balance_args *bargs) 3595 { 3596 chunk_type = chunk_to_extended(chunk_type) & 3597 BTRFS_EXTENDED_PROFILE_MASK; 3598 3599 if (bargs->profiles & chunk_type) 3600 return 0; 3601 3602 return 1; 3603 } 3604 3605 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3606 struct btrfs_balance_args *bargs) 3607 { 3608 struct btrfs_block_group *cache; 3609 u64 chunk_used; 3610 u64 user_thresh_min; 3611 u64 user_thresh_max; 3612 int ret = 1; 3613 3614 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3615 chunk_used = cache->used; 3616 3617 if (bargs->usage_min == 0) 3618 user_thresh_min = 0; 3619 else 3620 user_thresh_min = div_factor_fine(cache->length, 3621 bargs->usage_min); 3622 3623 if (bargs->usage_max == 0) 3624 user_thresh_max = 1; 3625 else if (bargs->usage_max > 100) 3626 user_thresh_max = cache->length; 3627 else 3628 user_thresh_max = div_factor_fine(cache->length, 3629 bargs->usage_max); 3630 3631 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3632 ret = 0; 3633 3634 btrfs_put_block_group(cache); 3635 return ret; 3636 } 3637 3638 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3639 u64 chunk_offset, struct btrfs_balance_args *bargs) 3640 { 3641 struct btrfs_block_group *cache; 3642 u64 chunk_used, user_thresh; 3643 int ret = 1; 3644 3645 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3646 chunk_used = cache->used; 3647 3648 if (bargs->usage_min == 0) 3649 user_thresh = 1; 3650 else if (bargs->usage > 100) 3651 user_thresh = cache->length; 3652 else 3653 user_thresh = div_factor_fine(cache->length, bargs->usage); 3654 3655 if (chunk_used < user_thresh) 3656 ret = 0; 3657 3658 btrfs_put_block_group(cache); 3659 return ret; 3660 } 3661 3662 static int chunk_devid_filter(struct extent_buffer *leaf, 3663 struct btrfs_chunk *chunk, 3664 struct btrfs_balance_args *bargs) 3665 { 3666 struct btrfs_stripe *stripe; 3667 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3668 int i; 3669 3670 for (i = 0; i < num_stripes; i++) { 3671 stripe = btrfs_stripe_nr(chunk, i); 3672 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3673 return 0; 3674 } 3675 3676 return 1; 3677 } 3678 3679 static u64 calc_data_stripes(u64 type, int num_stripes) 3680 { 3681 const int index = btrfs_bg_flags_to_raid_index(type); 3682 const int ncopies = btrfs_raid_array[index].ncopies; 3683 const int nparity = btrfs_raid_array[index].nparity; 3684 3685 return (num_stripes - nparity) / ncopies; 3686 } 3687 3688 /* [pstart, pend) */ 3689 static int chunk_drange_filter(struct extent_buffer *leaf, 3690 struct btrfs_chunk *chunk, 3691 struct btrfs_balance_args *bargs) 3692 { 3693 struct btrfs_stripe *stripe; 3694 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3695 u64 stripe_offset; 3696 u64 stripe_length; 3697 u64 type; 3698 int factor; 3699 int i; 3700 3701 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3702 return 0; 3703 3704 type = btrfs_chunk_type(leaf, chunk); 3705 factor = calc_data_stripes(type, num_stripes); 3706 3707 for (i = 0; i < num_stripes; i++) { 3708 stripe = btrfs_stripe_nr(chunk, i); 3709 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3710 continue; 3711 3712 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3713 stripe_length = btrfs_chunk_length(leaf, chunk); 3714 stripe_length = div_u64(stripe_length, factor); 3715 3716 if (stripe_offset < bargs->pend && 3717 stripe_offset + stripe_length > bargs->pstart) 3718 return 0; 3719 } 3720 3721 return 1; 3722 } 3723 3724 /* [vstart, vend) */ 3725 static int chunk_vrange_filter(struct extent_buffer *leaf, 3726 struct btrfs_chunk *chunk, 3727 u64 chunk_offset, 3728 struct btrfs_balance_args *bargs) 3729 { 3730 if (chunk_offset < bargs->vend && 3731 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3732 /* at least part of the chunk is inside this vrange */ 3733 return 0; 3734 3735 return 1; 3736 } 3737 3738 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3739 struct btrfs_chunk *chunk, 3740 struct btrfs_balance_args *bargs) 3741 { 3742 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3743 3744 if (bargs->stripes_min <= num_stripes 3745 && num_stripes <= bargs->stripes_max) 3746 return 0; 3747 3748 return 1; 3749 } 3750 3751 static int chunk_soft_convert_filter(u64 chunk_type, 3752 struct btrfs_balance_args *bargs) 3753 { 3754 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3755 return 0; 3756 3757 chunk_type = chunk_to_extended(chunk_type) & 3758 BTRFS_EXTENDED_PROFILE_MASK; 3759 3760 if (bargs->target == chunk_type) 3761 return 1; 3762 3763 return 0; 3764 } 3765 3766 static int should_balance_chunk(struct extent_buffer *leaf, 3767 struct btrfs_chunk *chunk, u64 chunk_offset) 3768 { 3769 struct btrfs_fs_info *fs_info = leaf->fs_info; 3770 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3771 struct btrfs_balance_args *bargs = NULL; 3772 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3773 3774 /* type filter */ 3775 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3776 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3777 return 0; 3778 } 3779 3780 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3781 bargs = &bctl->data; 3782 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3783 bargs = &bctl->sys; 3784 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3785 bargs = &bctl->meta; 3786 3787 /* profiles filter */ 3788 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3789 chunk_profiles_filter(chunk_type, bargs)) { 3790 return 0; 3791 } 3792 3793 /* usage filter */ 3794 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3795 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3796 return 0; 3797 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3798 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3799 return 0; 3800 } 3801 3802 /* devid filter */ 3803 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3804 chunk_devid_filter(leaf, chunk, bargs)) { 3805 return 0; 3806 } 3807 3808 /* drange filter, makes sense only with devid filter */ 3809 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3810 chunk_drange_filter(leaf, chunk, bargs)) { 3811 return 0; 3812 } 3813 3814 /* vrange filter */ 3815 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3816 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3817 return 0; 3818 } 3819 3820 /* stripes filter */ 3821 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3822 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3823 return 0; 3824 } 3825 3826 /* soft profile changing mode */ 3827 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3828 chunk_soft_convert_filter(chunk_type, bargs)) { 3829 return 0; 3830 } 3831 3832 /* 3833 * limited by count, must be the last filter 3834 */ 3835 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3836 if (bargs->limit == 0) 3837 return 0; 3838 else 3839 bargs->limit--; 3840 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3841 /* 3842 * Same logic as the 'limit' filter; the minimum cannot be 3843 * determined here because we do not have the global information 3844 * about the count of all chunks that satisfy the filters. 3845 */ 3846 if (bargs->limit_max == 0) 3847 return 0; 3848 else 3849 bargs->limit_max--; 3850 } 3851 3852 return 1; 3853 } 3854 3855 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3856 { 3857 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3858 struct btrfs_root *chunk_root = fs_info->chunk_root; 3859 u64 chunk_type; 3860 struct btrfs_chunk *chunk; 3861 struct btrfs_path *path = NULL; 3862 struct btrfs_key key; 3863 struct btrfs_key found_key; 3864 struct extent_buffer *leaf; 3865 int slot; 3866 int ret; 3867 int enospc_errors = 0; 3868 bool counting = true; 3869 /* The single value limit and min/max limits use the same bytes in the */ 3870 u64 limit_data = bctl->data.limit; 3871 u64 limit_meta = bctl->meta.limit; 3872 u64 limit_sys = bctl->sys.limit; 3873 u32 count_data = 0; 3874 u32 count_meta = 0; 3875 u32 count_sys = 0; 3876 int chunk_reserved = 0; 3877 3878 path = btrfs_alloc_path(); 3879 if (!path) { 3880 ret = -ENOMEM; 3881 goto error; 3882 } 3883 3884 /* zero out stat counters */ 3885 spin_lock(&fs_info->balance_lock); 3886 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3887 spin_unlock(&fs_info->balance_lock); 3888 again: 3889 if (!counting) { 3890 /* 3891 * The single value limit and min/max limits use the same bytes 3892 * in the 3893 */ 3894 bctl->data.limit = limit_data; 3895 bctl->meta.limit = limit_meta; 3896 bctl->sys.limit = limit_sys; 3897 } 3898 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3899 key.offset = (u64)-1; 3900 key.type = BTRFS_CHUNK_ITEM_KEY; 3901 3902 while (1) { 3903 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3904 atomic_read(&fs_info->balance_cancel_req)) { 3905 ret = -ECANCELED; 3906 goto error; 3907 } 3908 3909 mutex_lock(&fs_info->reclaim_bgs_lock); 3910 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3911 if (ret < 0) { 3912 mutex_unlock(&fs_info->reclaim_bgs_lock); 3913 goto error; 3914 } 3915 3916 /* 3917 * this shouldn't happen, it means the last relocate 3918 * failed 3919 */ 3920 if (ret == 0) 3921 BUG(); /* FIXME break ? */ 3922 3923 ret = btrfs_previous_item(chunk_root, path, 0, 3924 BTRFS_CHUNK_ITEM_KEY); 3925 if (ret) { 3926 mutex_unlock(&fs_info->reclaim_bgs_lock); 3927 ret = 0; 3928 break; 3929 } 3930 3931 leaf = path->nodes[0]; 3932 slot = path->slots[0]; 3933 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3934 3935 if (found_key.objectid != key.objectid) { 3936 mutex_unlock(&fs_info->reclaim_bgs_lock); 3937 break; 3938 } 3939 3940 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3941 chunk_type = btrfs_chunk_type(leaf, chunk); 3942 3943 if (!counting) { 3944 spin_lock(&fs_info->balance_lock); 3945 bctl->stat.considered++; 3946 spin_unlock(&fs_info->balance_lock); 3947 } 3948 3949 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3950 3951 btrfs_release_path(path); 3952 if (!ret) { 3953 mutex_unlock(&fs_info->reclaim_bgs_lock); 3954 goto loop; 3955 } 3956 3957 if (counting) { 3958 mutex_unlock(&fs_info->reclaim_bgs_lock); 3959 spin_lock(&fs_info->balance_lock); 3960 bctl->stat.expected++; 3961 spin_unlock(&fs_info->balance_lock); 3962 3963 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3964 count_data++; 3965 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3966 count_sys++; 3967 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3968 count_meta++; 3969 3970 goto loop; 3971 } 3972 3973 /* 3974 * Apply limit_min filter, no need to check if the LIMITS 3975 * filter is used, limit_min is 0 by default 3976 */ 3977 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3978 count_data < bctl->data.limit_min) 3979 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3980 count_meta < bctl->meta.limit_min) 3981 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3982 count_sys < bctl->sys.limit_min)) { 3983 mutex_unlock(&fs_info->reclaim_bgs_lock); 3984 goto loop; 3985 } 3986 3987 if (!chunk_reserved) { 3988 /* 3989 * We may be relocating the only data chunk we have, 3990 * which could potentially end up with losing data's 3991 * raid profile, so lets allocate an empty one in 3992 * advance. 3993 */ 3994 ret = btrfs_may_alloc_data_chunk(fs_info, 3995 found_key.offset); 3996 if (ret < 0) { 3997 mutex_unlock(&fs_info->reclaim_bgs_lock); 3998 goto error; 3999 } else if (ret == 1) { 4000 chunk_reserved = 1; 4001 } 4002 } 4003 4004 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4005 mutex_unlock(&fs_info->reclaim_bgs_lock); 4006 if (ret == -ENOSPC) { 4007 enospc_errors++; 4008 } else if (ret == -ETXTBSY) { 4009 btrfs_info(fs_info, 4010 "skipping relocation of block group %llu due to active swapfile", 4011 found_key.offset); 4012 ret = 0; 4013 } else if (ret) { 4014 goto error; 4015 } else { 4016 spin_lock(&fs_info->balance_lock); 4017 bctl->stat.completed++; 4018 spin_unlock(&fs_info->balance_lock); 4019 } 4020 loop: 4021 if (found_key.offset == 0) 4022 break; 4023 key.offset = found_key.offset - 1; 4024 } 4025 4026 if (counting) { 4027 btrfs_release_path(path); 4028 counting = false; 4029 goto again; 4030 } 4031 error: 4032 btrfs_free_path(path); 4033 if (enospc_errors) { 4034 btrfs_info(fs_info, "%d enospc errors during balance", 4035 enospc_errors); 4036 if (!ret) 4037 ret = -ENOSPC; 4038 } 4039 4040 return ret; 4041 } 4042 4043 /** 4044 * alloc_profile_is_valid - see if a given profile is valid and reduced 4045 * @flags: profile to validate 4046 * @extended: if true @flags is treated as an extended profile 4047 */ 4048 static int alloc_profile_is_valid(u64 flags, int extended) 4049 { 4050 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4051 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4052 4053 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4054 4055 /* 1) check that all other bits are zeroed */ 4056 if (flags & ~mask) 4057 return 0; 4058 4059 /* 2) see if profile is reduced */ 4060 if (flags == 0) 4061 return !extended; /* "0" is valid for usual profiles */ 4062 4063 return has_single_bit_set(flags); 4064 } 4065 4066 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4067 { 4068 /* cancel requested || normal exit path */ 4069 return atomic_read(&fs_info->balance_cancel_req) || 4070 (atomic_read(&fs_info->balance_pause_req) == 0 && 4071 atomic_read(&fs_info->balance_cancel_req) == 0); 4072 } 4073 4074 /* 4075 * Validate target profile against allowed profiles and return true if it's OK. 4076 * Otherwise print the error message and return false. 4077 */ 4078 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4079 const struct btrfs_balance_args *bargs, 4080 u64 allowed, const char *type) 4081 { 4082 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4083 return true; 4084 4085 if (fs_info->sectorsize < PAGE_SIZE && 4086 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4087 btrfs_err(fs_info, 4088 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4089 fs_info->sectorsize, PAGE_SIZE); 4090 return false; 4091 } 4092 /* Profile is valid and does not have bits outside of the allowed set */ 4093 if (alloc_profile_is_valid(bargs->target, 1) && 4094 (bargs->target & ~allowed) == 0) 4095 return true; 4096 4097 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4098 type, btrfs_bg_type_to_raid_name(bargs->target)); 4099 return false; 4100 } 4101 4102 /* 4103 * Fill @buf with textual description of balance filter flags @bargs, up to 4104 * @size_buf including the terminating null. The output may be trimmed if it 4105 * does not fit into the provided buffer. 4106 */ 4107 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4108 u32 size_buf) 4109 { 4110 int ret; 4111 u32 size_bp = size_buf; 4112 char *bp = buf; 4113 u64 flags = bargs->flags; 4114 char tmp_buf[128] = {'\0'}; 4115 4116 if (!flags) 4117 return; 4118 4119 #define CHECK_APPEND_NOARG(a) \ 4120 do { \ 4121 ret = snprintf(bp, size_bp, (a)); \ 4122 if (ret < 0 || ret >= size_bp) \ 4123 goto out_overflow; \ 4124 size_bp -= ret; \ 4125 bp += ret; \ 4126 } while (0) 4127 4128 #define CHECK_APPEND_1ARG(a, v1) \ 4129 do { \ 4130 ret = snprintf(bp, size_bp, (a), (v1)); \ 4131 if (ret < 0 || ret >= size_bp) \ 4132 goto out_overflow; \ 4133 size_bp -= ret; \ 4134 bp += ret; \ 4135 } while (0) 4136 4137 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4138 do { \ 4139 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4140 if (ret < 0 || ret >= size_bp) \ 4141 goto out_overflow; \ 4142 size_bp -= ret; \ 4143 bp += ret; \ 4144 } while (0) 4145 4146 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4147 CHECK_APPEND_1ARG("convert=%s,", 4148 btrfs_bg_type_to_raid_name(bargs->target)); 4149 4150 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4151 CHECK_APPEND_NOARG("soft,"); 4152 4153 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4154 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4155 sizeof(tmp_buf)); 4156 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4157 } 4158 4159 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4160 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4161 4162 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4163 CHECK_APPEND_2ARG("usage=%u..%u,", 4164 bargs->usage_min, bargs->usage_max); 4165 4166 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4167 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4168 4169 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4170 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4171 bargs->pstart, bargs->pend); 4172 4173 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4174 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4175 bargs->vstart, bargs->vend); 4176 4177 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4178 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4179 4180 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4181 CHECK_APPEND_2ARG("limit=%u..%u,", 4182 bargs->limit_min, bargs->limit_max); 4183 4184 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4185 CHECK_APPEND_2ARG("stripes=%u..%u,", 4186 bargs->stripes_min, bargs->stripes_max); 4187 4188 #undef CHECK_APPEND_2ARG 4189 #undef CHECK_APPEND_1ARG 4190 #undef CHECK_APPEND_NOARG 4191 4192 out_overflow: 4193 4194 if (size_bp < size_buf) 4195 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4196 else 4197 buf[0] = '\0'; 4198 } 4199 4200 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4201 { 4202 u32 size_buf = 1024; 4203 char tmp_buf[192] = {'\0'}; 4204 char *buf; 4205 char *bp; 4206 u32 size_bp = size_buf; 4207 int ret; 4208 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4209 4210 buf = kzalloc(size_buf, GFP_KERNEL); 4211 if (!buf) 4212 return; 4213 4214 bp = buf; 4215 4216 #define CHECK_APPEND_1ARG(a, v1) \ 4217 do { \ 4218 ret = snprintf(bp, size_bp, (a), (v1)); \ 4219 if (ret < 0 || ret >= size_bp) \ 4220 goto out_overflow; \ 4221 size_bp -= ret; \ 4222 bp += ret; \ 4223 } while (0) 4224 4225 if (bctl->flags & BTRFS_BALANCE_FORCE) 4226 CHECK_APPEND_1ARG("%s", "-f "); 4227 4228 if (bctl->flags & BTRFS_BALANCE_DATA) { 4229 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4230 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4231 } 4232 4233 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4234 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4235 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4236 } 4237 4238 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4239 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4240 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4241 } 4242 4243 #undef CHECK_APPEND_1ARG 4244 4245 out_overflow: 4246 4247 if (size_bp < size_buf) 4248 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4249 btrfs_info(fs_info, "balance: %s %s", 4250 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4251 "resume" : "start", buf); 4252 4253 kfree(buf); 4254 } 4255 4256 /* 4257 * Should be called with balance mutexe held 4258 */ 4259 int btrfs_balance(struct btrfs_fs_info *fs_info, 4260 struct btrfs_balance_control *bctl, 4261 struct btrfs_ioctl_balance_args *bargs) 4262 { 4263 u64 meta_target, data_target; 4264 u64 allowed; 4265 int mixed = 0; 4266 int ret; 4267 u64 num_devices; 4268 unsigned seq; 4269 bool reducing_redundancy; 4270 int i; 4271 4272 if (btrfs_fs_closing(fs_info) || 4273 atomic_read(&fs_info->balance_pause_req) || 4274 btrfs_should_cancel_balance(fs_info)) { 4275 ret = -EINVAL; 4276 goto out; 4277 } 4278 4279 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4280 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4281 mixed = 1; 4282 4283 /* 4284 * In case of mixed groups both data and meta should be picked, 4285 * and identical options should be given for both of them. 4286 */ 4287 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4288 if (mixed && (bctl->flags & allowed)) { 4289 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4290 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4291 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4292 btrfs_err(fs_info, 4293 "balance: mixed groups data and metadata options must be the same"); 4294 ret = -EINVAL; 4295 goto out; 4296 } 4297 } 4298 4299 /* 4300 * rw_devices will not change at the moment, device add/delete/replace 4301 * are exclusive 4302 */ 4303 num_devices = fs_info->fs_devices->rw_devices; 4304 4305 /* 4306 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4307 * special bit for it, to make it easier to distinguish. Thus we need 4308 * to set it manually, or balance would refuse the profile. 4309 */ 4310 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4311 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4312 if (num_devices >= btrfs_raid_array[i].devs_min) 4313 allowed |= btrfs_raid_array[i].bg_flag; 4314 4315 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4316 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4317 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4318 ret = -EINVAL; 4319 goto out; 4320 } 4321 4322 /* 4323 * Allow to reduce metadata or system integrity only if force set for 4324 * profiles with redundancy (copies, parity) 4325 */ 4326 allowed = 0; 4327 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4328 if (btrfs_raid_array[i].ncopies >= 2 || 4329 btrfs_raid_array[i].tolerated_failures >= 1) 4330 allowed |= btrfs_raid_array[i].bg_flag; 4331 } 4332 do { 4333 seq = read_seqbegin(&fs_info->profiles_lock); 4334 4335 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4336 (fs_info->avail_system_alloc_bits & allowed) && 4337 !(bctl->sys.target & allowed)) || 4338 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4339 (fs_info->avail_metadata_alloc_bits & allowed) && 4340 !(bctl->meta.target & allowed))) 4341 reducing_redundancy = true; 4342 else 4343 reducing_redundancy = false; 4344 4345 /* if we're not converting, the target field is uninitialized */ 4346 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4347 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4348 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4349 bctl->data.target : fs_info->avail_data_alloc_bits; 4350 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4351 4352 if (reducing_redundancy) { 4353 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4354 btrfs_info(fs_info, 4355 "balance: force reducing metadata redundancy"); 4356 } else { 4357 btrfs_err(fs_info, 4358 "balance: reduces metadata redundancy, use --force if you want this"); 4359 ret = -EINVAL; 4360 goto out; 4361 } 4362 } 4363 4364 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4365 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4366 btrfs_warn(fs_info, 4367 "balance: metadata profile %s has lower redundancy than data profile %s", 4368 btrfs_bg_type_to_raid_name(meta_target), 4369 btrfs_bg_type_to_raid_name(data_target)); 4370 } 4371 4372 ret = insert_balance_item(fs_info, bctl); 4373 if (ret && ret != -EEXIST) 4374 goto out; 4375 4376 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4377 BUG_ON(ret == -EEXIST); 4378 BUG_ON(fs_info->balance_ctl); 4379 spin_lock(&fs_info->balance_lock); 4380 fs_info->balance_ctl = bctl; 4381 spin_unlock(&fs_info->balance_lock); 4382 } else { 4383 BUG_ON(ret != -EEXIST); 4384 spin_lock(&fs_info->balance_lock); 4385 update_balance_args(bctl); 4386 spin_unlock(&fs_info->balance_lock); 4387 } 4388 4389 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4390 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4391 describe_balance_start_or_resume(fs_info); 4392 mutex_unlock(&fs_info->balance_mutex); 4393 4394 ret = __btrfs_balance(fs_info); 4395 4396 mutex_lock(&fs_info->balance_mutex); 4397 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4398 btrfs_info(fs_info, "balance: paused"); 4399 /* 4400 * Balance can be canceled by: 4401 * 4402 * - Regular cancel request 4403 * Then ret == -ECANCELED and balance_cancel_req > 0 4404 * 4405 * - Fatal signal to "btrfs" process 4406 * Either the signal caught by wait_reserve_ticket() and callers 4407 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4408 * got -ECANCELED. 4409 * Either way, in this case balance_cancel_req = 0, and 4410 * ret == -EINTR or ret == -ECANCELED. 4411 * 4412 * So here we only check the return value to catch canceled balance. 4413 */ 4414 else if (ret == -ECANCELED || ret == -EINTR) 4415 btrfs_info(fs_info, "balance: canceled"); 4416 else 4417 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4418 4419 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4420 4421 if (bargs) { 4422 memset(bargs, 0, sizeof(*bargs)); 4423 btrfs_update_ioctl_balance_args(fs_info, bargs); 4424 } 4425 4426 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4427 balance_need_close(fs_info)) { 4428 reset_balance_state(fs_info); 4429 btrfs_exclop_finish(fs_info); 4430 } 4431 4432 wake_up(&fs_info->balance_wait_q); 4433 4434 return ret; 4435 out: 4436 if (bctl->flags & BTRFS_BALANCE_RESUME) 4437 reset_balance_state(fs_info); 4438 else 4439 kfree(bctl); 4440 btrfs_exclop_finish(fs_info); 4441 4442 return ret; 4443 } 4444 4445 static int balance_kthread(void *data) 4446 { 4447 struct btrfs_fs_info *fs_info = data; 4448 int ret = 0; 4449 4450 mutex_lock(&fs_info->balance_mutex); 4451 if (fs_info->balance_ctl) 4452 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4453 mutex_unlock(&fs_info->balance_mutex); 4454 4455 return ret; 4456 } 4457 4458 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4459 { 4460 struct task_struct *tsk; 4461 4462 mutex_lock(&fs_info->balance_mutex); 4463 if (!fs_info->balance_ctl) { 4464 mutex_unlock(&fs_info->balance_mutex); 4465 return 0; 4466 } 4467 mutex_unlock(&fs_info->balance_mutex); 4468 4469 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4470 btrfs_info(fs_info, "balance: resume skipped"); 4471 return 0; 4472 } 4473 4474 /* 4475 * A ro->rw remount sequence should continue with the paused balance 4476 * regardless of who pauses it, system or the user as of now, so set 4477 * the resume flag. 4478 */ 4479 spin_lock(&fs_info->balance_lock); 4480 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4481 spin_unlock(&fs_info->balance_lock); 4482 4483 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4484 return PTR_ERR_OR_ZERO(tsk); 4485 } 4486 4487 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4488 { 4489 struct btrfs_balance_control *bctl; 4490 struct btrfs_balance_item *item; 4491 struct btrfs_disk_balance_args disk_bargs; 4492 struct btrfs_path *path; 4493 struct extent_buffer *leaf; 4494 struct btrfs_key key; 4495 int ret; 4496 4497 path = btrfs_alloc_path(); 4498 if (!path) 4499 return -ENOMEM; 4500 4501 key.objectid = BTRFS_BALANCE_OBJECTID; 4502 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4503 key.offset = 0; 4504 4505 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4506 if (ret < 0) 4507 goto out; 4508 if (ret > 0) { /* ret = -ENOENT; */ 4509 ret = 0; 4510 goto out; 4511 } 4512 4513 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4514 if (!bctl) { 4515 ret = -ENOMEM; 4516 goto out; 4517 } 4518 4519 leaf = path->nodes[0]; 4520 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4521 4522 bctl->flags = btrfs_balance_flags(leaf, item); 4523 bctl->flags |= BTRFS_BALANCE_RESUME; 4524 4525 btrfs_balance_data(leaf, item, &disk_bargs); 4526 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4527 btrfs_balance_meta(leaf, item, &disk_bargs); 4528 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4529 btrfs_balance_sys(leaf, item, &disk_bargs); 4530 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4531 4532 /* 4533 * This should never happen, as the paused balance state is recovered 4534 * during mount without any chance of other exclusive ops to collide. 4535 * 4536 * This gives the exclusive op status to balance and keeps in paused 4537 * state until user intervention (cancel or umount). If the ownership 4538 * cannot be assigned, show a message but do not fail. The balance 4539 * is in a paused state and must have fs_info::balance_ctl properly 4540 * set up. 4541 */ 4542 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 4543 btrfs_warn(fs_info, 4544 "balance: cannot set exclusive op status, resume manually"); 4545 4546 btrfs_release_path(path); 4547 4548 mutex_lock(&fs_info->balance_mutex); 4549 BUG_ON(fs_info->balance_ctl); 4550 spin_lock(&fs_info->balance_lock); 4551 fs_info->balance_ctl = bctl; 4552 spin_unlock(&fs_info->balance_lock); 4553 mutex_unlock(&fs_info->balance_mutex); 4554 out: 4555 btrfs_free_path(path); 4556 return ret; 4557 } 4558 4559 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4560 { 4561 int ret = 0; 4562 4563 mutex_lock(&fs_info->balance_mutex); 4564 if (!fs_info->balance_ctl) { 4565 mutex_unlock(&fs_info->balance_mutex); 4566 return -ENOTCONN; 4567 } 4568 4569 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4570 atomic_inc(&fs_info->balance_pause_req); 4571 mutex_unlock(&fs_info->balance_mutex); 4572 4573 wait_event(fs_info->balance_wait_q, 4574 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4575 4576 mutex_lock(&fs_info->balance_mutex); 4577 /* we are good with balance_ctl ripped off from under us */ 4578 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4579 atomic_dec(&fs_info->balance_pause_req); 4580 } else { 4581 ret = -ENOTCONN; 4582 } 4583 4584 mutex_unlock(&fs_info->balance_mutex); 4585 return ret; 4586 } 4587 4588 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4589 { 4590 mutex_lock(&fs_info->balance_mutex); 4591 if (!fs_info->balance_ctl) { 4592 mutex_unlock(&fs_info->balance_mutex); 4593 return -ENOTCONN; 4594 } 4595 4596 /* 4597 * A paused balance with the item stored on disk can be resumed at 4598 * mount time if the mount is read-write. Otherwise it's still paused 4599 * and we must not allow cancelling as it deletes the item. 4600 */ 4601 if (sb_rdonly(fs_info->sb)) { 4602 mutex_unlock(&fs_info->balance_mutex); 4603 return -EROFS; 4604 } 4605 4606 atomic_inc(&fs_info->balance_cancel_req); 4607 /* 4608 * if we are running just wait and return, balance item is 4609 * deleted in btrfs_balance in this case 4610 */ 4611 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4612 mutex_unlock(&fs_info->balance_mutex); 4613 wait_event(fs_info->balance_wait_q, 4614 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4615 mutex_lock(&fs_info->balance_mutex); 4616 } else { 4617 mutex_unlock(&fs_info->balance_mutex); 4618 /* 4619 * Lock released to allow other waiters to continue, we'll 4620 * reexamine the status again. 4621 */ 4622 mutex_lock(&fs_info->balance_mutex); 4623 4624 if (fs_info->balance_ctl) { 4625 reset_balance_state(fs_info); 4626 btrfs_exclop_finish(fs_info); 4627 btrfs_info(fs_info, "balance: canceled"); 4628 } 4629 } 4630 4631 BUG_ON(fs_info->balance_ctl || 4632 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4633 atomic_dec(&fs_info->balance_cancel_req); 4634 mutex_unlock(&fs_info->balance_mutex); 4635 return 0; 4636 } 4637 4638 int btrfs_uuid_scan_kthread(void *data) 4639 { 4640 struct btrfs_fs_info *fs_info = data; 4641 struct btrfs_root *root = fs_info->tree_root; 4642 struct btrfs_key key; 4643 struct btrfs_path *path = NULL; 4644 int ret = 0; 4645 struct extent_buffer *eb; 4646 int slot; 4647 struct btrfs_root_item root_item; 4648 u32 item_size; 4649 struct btrfs_trans_handle *trans = NULL; 4650 bool closing = false; 4651 4652 path = btrfs_alloc_path(); 4653 if (!path) { 4654 ret = -ENOMEM; 4655 goto out; 4656 } 4657 4658 key.objectid = 0; 4659 key.type = BTRFS_ROOT_ITEM_KEY; 4660 key.offset = 0; 4661 4662 while (1) { 4663 if (btrfs_fs_closing(fs_info)) { 4664 closing = true; 4665 break; 4666 } 4667 ret = btrfs_search_forward(root, &key, path, 4668 BTRFS_OLDEST_GENERATION); 4669 if (ret) { 4670 if (ret > 0) 4671 ret = 0; 4672 break; 4673 } 4674 4675 if (key.type != BTRFS_ROOT_ITEM_KEY || 4676 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4677 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4678 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4679 goto skip; 4680 4681 eb = path->nodes[0]; 4682 slot = path->slots[0]; 4683 item_size = btrfs_item_size(eb, slot); 4684 if (item_size < sizeof(root_item)) 4685 goto skip; 4686 4687 read_extent_buffer(eb, &root_item, 4688 btrfs_item_ptr_offset(eb, slot), 4689 (int)sizeof(root_item)); 4690 if (btrfs_root_refs(&root_item) == 0) 4691 goto skip; 4692 4693 if (!btrfs_is_empty_uuid(root_item.uuid) || 4694 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4695 if (trans) 4696 goto update_tree; 4697 4698 btrfs_release_path(path); 4699 /* 4700 * 1 - subvol uuid item 4701 * 1 - received_subvol uuid item 4702 */ 4703 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4704 if (IS_ERR(trans)) { 4705 ret = PTR_ERR(trans); 4706 break; 4707 } 4708 continue; 4709 } else { 4710 goto skip; 4711 } 4712 update_tree: 4713 btrfs_release_path(path); 4714 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4715 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4716 BTRFS_UUID_KEY_SUBVOL, 4717 key.objectid); 4718 if (ret < 0) { 4719 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4720 ret); 4721 break; 4722 } 4723 } 4724 4725 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4726 ret = btrfs_uuid_tree_add(trans, 4727 root_item.received_uuid, 4728 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4729 key.objectid); 4730 if (ret < 0) { 4731 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4732 ret); 4733 break; 4734 } 4735 } 4736 4737 skip: 4738 btrfs_release_path(path); 4739 if (trans) { 4740 ret = btrfs_end_transaction(trans); 4741 trans = NULL; 4742 if (ret) 4743 break; 4744 } 4745 4746 if (key.offset < (u64)-1) { 4747 key.offset++; 4748 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4749 key.offset = 0; 4750 key.type = BTRFS_ROOT_ITEM_KEY; 4751 } else if (key.objectid < (u64)-1) { 4752 key.offset = 0; 4753 key.type = BTRFS_ROOT_ITEM_KEY; 4754 key.objectid++; 4755 } else { 4756 break; 4757 } 4758 cond_resched(); 4759 } 4760 4761 out: 4762 btrfs_free_path(path); 4763 if (trans && !IS_ERR(trans)) 4764 btrfs_end_transaction(trans); 4765 if (ret) 4766 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4767 else if (!closing) 4768 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4769 up(&fs_info->uuid_tree_rescan_sem); 4770 return 0; 4771 } 4772 4773 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4774 { 4775 struct btrfs_trans_handle *trans; 4776 struct btrfs_root *tree_root = fs_info->tree_root; 4777 struct btrfs_root *uuid_root; 4778 struct task_struct *task; 4779 int ret; 4780 4781 /* 4782 * 1 - root node 4783 * 1 - root item 4784 */ 4785 trans = btrfs_start_transaction(tree_root, 2); 4786 if (IS_ERR(trans)) 4787 return PTR_ERR(trans); 4788 4789 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4790 if (IS_ERR(uuid_root)) { 4791 ret = PTR_ERR(uuid_root); 4792 btrfs_abort_transaction(trans, ret); 4793 btrfs_end_transaction(trans); 4794 return ret; 4795 } 4796 4797 fs_info->uuid_root = uuid_root; 4798 4799 ret = btrfs_commit_transaction(trans); 4800 if (ret) 4801 return ret; 4802 4803 down(&fs_info->uuid_tree_rescan_sem); 4804 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4805 if (IS_ERR(task)) { 4806 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4807 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4808 up(&fs_info->uuid_tree_rescan_sem); 4809 return PTR_ERR(task); 4810 } 4811 4812 return 0; 4813 } 4814 4815 /* 4816 * shrinking a device means finding all of the device extents past 4817 * the new size, and then following the back refs to the chunks. 4818 * The chunk relocation code actually frees the device extent 4819 */ 4820 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4821 { 4822 struct btrfs_fs_info *fs_info = device->fs_info; 4823 struct btrfs_root *root = fs_info->dev_root; 4824 struct btrfs_trans_handle *trans; 4825 struct btrfs_dev_extent *dev_extent = NULL; 4826 struct btrfs_path *path; 4827 u64 length; 4828 u64 chunk_offset; 4829 int ret; 4830 int slot; 4831 int failed = 0; 4832 bool retried = false; 4833 struct extent_buffer *l; 4834 struct btrfs_key key; 4835 struct btrfs_super_block *super_copy = fs_info->super_copy; 4836 u64 old_total = btrfs_super_total_bytes(super_copy); 4837 u64 old_size = btrfs_device_get_total_bytes(device); 4838 u64 diff; 4839 u64 start; 4840 4841 new_size = round_down(new_size, fs_info->sectorsize); 4842 start = new_size; 4843 diff = round_down(old_size - new_size, fs_info->sectorsize); 4844 4845 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4846 return -EINVAL; 4847 4848 path = btrfs_alloc_path(); 4849 if (!path) 4850 return -ENOMEM; 4851 4852 path->reada = READA_BACK; 4853 4854 trans = btrfs_start_transaction(root, 0); 4855 if (IS_ERR(trans)) { 4856 btrfs_free_path(path); 4857 return PTR_ERR(trans); 4858 } 4859 4860 mutex_lock(&fs_info->chunk_mutex); 4861 4862 btrfs_device_set_total_bytes(device, new_size); 4863 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4864 device->fs_devices->total_rw_bytes -= diff; 4865 atomic64_sub(diff, &fs_info->free_chunk_space); 4866 } 4867 4868 /* 4869 * Once the device's size has been set to the new size, ensure all 4870 * in-memory chunks are synced to disk so that the loop below sees them 4871 * and relocates them accordingly. 4872 */ 4873 if (contains_pending_extent(device, &start, diff)) { 4874 mutex_unlock(&fs_info->chunk_mutex); 4875 ret = btrfs_commit_transaction(trans); 4876 if (ret) 4877 goto done; 4878 } else { 4879 mutex_unlock(&fs_info->chunk_mutex); 4880 btrfs_end_transaction(trans); 4881 } 4882 4883 again: 4884 key.objectid = device->devid; 4885 key.offset = (u64)-1; 4886 key.type = BTRFS_DEV_EXTENT_KEY; 4887 4888 do { 4889 mutex_lock(&fs_info->reclaim_bgs_lock); 4890 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4891 if (ret < 0) { 4892 mutex_unlock(&fs_info->reclaim_bgs_lock); 4893 goto done; 4894 } 4895 4896 ret = btrfs_previous_item(root, path, 0, key.type); 4897 if (ret) { 4898 mutex_unlock(&fs_info->reclaim_bgs_lock); 4899 if (ret < 0) 4900 goto done; 4901 ret = 0; 4902 btrfs_release_path(path); 4903 break; 4904 } 4905 4906 l = path->nodes[0]; 4907 slot = path->slots[0]; 4908 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4909 4910 if (key.objectid != device->devid) { 4911 mutex_unlock(&fs_info->reclaim_bgs_lock); 4912 btrfs_release_path(path); 4913 break; 4914 } 4915 4916 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4917 length = btrfs_dev_extent_length(l, dev_extent); 4918 4919 if (key.offset + length <= new_size) { 4920 mutex_unlock(&fs_info->reclaim_bgs_lock); 4921 btrfs_release_path(path); 4922 break; 4923 } 4924 4925 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4926 btrfs_release_path(path); 4927 4928 /* 4929 * We may be relocating the only data chunk we have, 4930 * which could potentially end up with losing data's 4931 * raid profile, so lets allocate an empty one in 4932 * advance. 4933 */ 4934 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4935 if (ret < 0) { 4936 mutex_unlock(&fs_info->reclaim_bgs_lock); 4937 goto done; 4938 } 4939 4940 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4941 mutex_unlock(&fs_info->reclaim_bgs_lock); 4942 if (ret == -ENOSPC) { 4943 failed++; 4944 } else if (ret) { 4945 if (ret == -ETXTBSY) { 4946 btrfs_warn(fs_info, 4947 "could not shrink block group %llu due to active swapfile", 4948 chunk_offset); 4949 } 4950 goto done; 4951 } 4952 } while (key.offset-- > 0); 4953 4954 if (failed && !retried) { 4955 failed = 0; 4956 retried = true; 4957 goto again; 4958 } else if (failed && retried) { 4959 ret = -ENOSPC; 4960 goto done; 4961 } 4962 4963 /* Shrinking succeeded, else we would be at "done". */ 4964 trans = btrfs_start_transaction(root, 0); 4965 if (IS_ERR(trans)) { 4966 ret = PTR_ERR(trans); 4967 goto done; 4968 } 4969 4970 mutex_lock(&fs_info->chunk_mutex); 4971 /* Clear all state bits beyond the shrunk device size */ 4972 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4973 CHUNK_STATE_MASK); 4974 4975 btrfs_device_set_disk_total_bytes(device, new_size); 4976 if (list_empty(&device->post_commit_list)) 4977 list_add_tail(&device->post_commit_list, 4978 &trans->transaction->dev_update_list); 4979 4980 WARN_ON(diff > old_total); 4981 btrfs_set_super_total_bytes(super_copy, 4982 round_down(old_total - diff, fs_info->sectorsize)); 4983 mutex_unlock(&fs_info->chunk_mutex); 4984 4985 btrfs_reserve_chunk_metadata(trans, false); 4986 /* Now btrfs_update_device() will change the on-disk size. */ 4987 ret = btrfs_update_device(trans, device); 4988 btrfs_trans_release_chunk_metadata(trans); 4989 if (ret < 0) { 4990 btrfs_abort_transaction(trans, ret); 4991 btrfs_end_transaction(trans); 4992 } else { 4993 ret = btrfs_commit_transaction(trans); 4994 } 4995 done: 4996 btrfs_free_path(path); 4997 if (ret) { 4998 mutex_lock(&fs_info->chunk_mutex); 4999 btrfs_device_set_total_bytes(device, old_size); 5000 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5001 device->fs_devices->total_rw_bytes += diff; 5002 atomic64_add(diff, &fs_info->free_chunk_space); 5003 mutex_unlock(&fs_info->chunk_mutex); 5004 } 5005 return ret; 5006 } 5007 5008 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5009 struct btrfs_key *key, 5010 struct btrfs_chunk *chunk, int item_size) 5011 { 5012 struct btrfs_super_block *super_copy = fs_info->super_copy; 5013 struct btrfs_disk_key disk_key; 5014 u32 array_size; 5015 u8 *ptr; 5016 5017 lockdep_assert_held(&fs_info->chunk_mutex); 5018 5019 array_size = btrfs_super_sys_array_size(super_copy); 5020 if (array_size + item_size + sizeof(disk_key) 5021 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5022 return -EFBIG; 5023 5024 ptr = super_copy->sys_chunk_array + array_size; 5025 btrfs_cpu_key_to_disk(&disk_key, key); 5026 memcpy(ptr, &disk_key, sizeof(disk_key)); 5027 ptr += sizeof(disk_key); 5028 memcpy(ptr, chunk, item_size); 5029 item_size += sizeof(disk_key); 5030 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5031 5032 return 0; 5033 } 5034 5035 /* 5036 * sort the devices in descending order by max_avail, total_avail 5037 */ 5038 static int btrfs_cmp_device_info(const void *a, const void *b) 5039 { 5040 const struct btrfs_device_info *di_a = a; 5041 const struct btrfs_device_info *di_b = b; 5042 5043 if (di_a->max_avail > di_b->max_avail) 5044 return -1; 5045 if (di_a->max_avail < di_b->max_avail) 5046 return 1; 5047 if (di_a->total_avail > di_b->total_avail) 5048 return -1; 5049 if (di_a->total_avail < di_b->total_avail) 5050 return 1; 5051 return 0; 5052 } 5053 5054 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5055 { 5056 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5057 return; 5058 5059 btrfs_set_fs_incompat(info, RAID56); 5060 } 5061 5062 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5063 { 5064 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5065 return; 5066 5067 btrfs_set_fs_incompat(info, RAID1C34); 5068 } 5069 5070 /* 5071 * Structure used internally for btrfs_create_chunk() function. 5072 * Wraps needed parameters. 5073 */ 5074 struct alloc_chunk_ctl { 5075 u64 start; 5076 u64 type; 5077 /* Total number of stripes to allocate */ 5078 int num_stripes; 5079 /* sub_stripes info for map */ 5080 int sub_stripes; 5081 /* Stripes per device */ 5082 int dev_stripes; 5083 /* Maximum number of devices to use */ 5084 int devs_max; 5085 /* Minimum number of devices to use */ 5086 int devs_min; 5087 /* ndevs has to be a multiple of this */ 5088 int devs_increment; 5089 /* Number of copies */ 5090 int ncopies; 5091 /* Number of stripes worth of bytes to store parity information */ 5092 int nparity; 5093 u64 max_stripe_size; 5094 u64 max_chunk_size; 5095 u64 dev_extent_min; 5096 u64 stripe_size; 5097 u64 chunk_size; 5098 int ndevs; 5099 }; 5100 5101 static void init_alloc_chunk_ctl_policy_regular( 5102 struct btrfs_fs_devices *fs_devices, 5103 struct alloc_chunk_ctl *ctl) 5104 { 5105 u64 type = ctl->type; 5106 5107 if (type & BTRFS_BLOCK_GROUP_DATA) { 5108 ctl->max_stripe_size = SZ_1G; 5109 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5110 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5111 /* For larger filesystems, use larger metadata chunks */ 5112 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5113 ctl->max_stripe_size = SZ_1G; 5114 else 5115 ctl->max_stripe_size = SZ_256M; 5116 ctl->max_chunk_size = ctl->max_stripe_size; 5117 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5118 ctl->max_stripe_size = SZ_32M; 5119 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5120 ctl->devs_max = min_t(int, ctl->devs_max, 5121 BTRFS_MAX_DEVS_SYS_CHUNK); 5122 } else { 5123 BUG(); 5124 } 5125 5126 /* We don't want a chunk larger than 10% of writable space */ 5127 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5128 ctl->max_chunk_size); 5129 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5130 } 5131 5132 static void init_alloc_chunk_ctl_policy_zoned( 5133 struct btrfs_fs_devices *fs_devices, 5134 struct alloc_chunk_ctl *ctl) 5135 { 5136 u64 zone_size = fs_devices->fs_info->zone_size; 5137 u64 limit; 5138 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5139 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5140 u64 min_chunk_size = min_data_stripes * zone_size; 5141 u64 type = ctl->type; 5142 5143 ctl->max_stripe_size = zone_size; 5144 if (type & BTRFS_BLOCK_GROUP_DATA) { 5145 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5146 zone_size); 5147 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5148 ctl->max_chunk_size = ctl->max_stripe_size; 5149 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5150 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5151 ctl->devs_max = min_t(int, ctl->devs_max, 5152 BTRFS_MAX_DEVS_SYS_CHUNK); 5153 } else { 5154 BUG(); 5155 } 5156 5157 /* We don't want a chunk larger than 10% of writable space */ 5158 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5159 zone_size), 5160 min_chunk_size); 5161 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5162 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5163 } 5164 5165 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5166 struct alloc_chunk_ctl *ctl) 5167 { 5168 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5169 5170 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5171 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5172 ctl->devs_max = btrfs_raid_array[index].devs_max; 5173 if (!ctl->devs_max) 5174 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5175 ctl->devs_min = btrfs_raid_array[index].devs_min; 5176 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5177 ctl->ncopies = btrfs_raid_array[index].ncopies; 5178 ctl->nparity = btrfs_raid_array[index].nparity; 5179 ctl->ndevs = 0; 5180 5181 switch (fs_devices->chunk_alloc_policy) { 5182 case BTRFS_CHUNK_ALLOC_REGULAR: 5183 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5184 break; 5185 case BTRFS_CHUNK_ALLOC_ZONED: 5186 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5187 break; 5188 default: 5189 BUG(); 5190 } 5191 } 5192 5193 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5194 struct alloc_chunk_ctl *ctl, 5195 struct btrfs_device_info *devices_info) 5196 { 5197 struct btrfs_fs_info *info = fs_devices->fs_info; 5198 struct btrfs_device *device; 5199 u64 total_avail; 5200 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5201 int ret; 5202 int ndevs = 0; 5203 u64 max_avail; 5204 u64 dev_offset; 5205 5206 /* 5207 * in the first pass through the devices list, we gather information 5208 * about the available holes on each device. 5209 */ 5210 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5211 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5212 WARN(1, KERN_ERR 5213 "BTRFS: read-only device in alloc_list\n"); 5214 continue; 5215 } 5216 5217 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5218 &device->dev_state) || 5219 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5220 continue; 5221 5222 if (device->total_bytes > device->bytes_used) 5223 total_avail = device->total_bytes - device->bytes_used; 5224 else 5225 total_avail = 0; 5226 5227 /* If there is no space on this device, skip it. */ 5228 if (total_avail < ctl->dev_extent_min) 5229 continue; 5230 5231 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5232 &max_avail); 5233 if (ret && ret != -ENOSPC) 5234 return ret; 5235 5236 if (ret == 0) 5237 max_avail = dev_extent_want; 5238 5239 if (max_avail < ctl->dev_extent_min) { 5240 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5241 btrfs_debug(info, 5242 "%s: devid %llu has no free space, have=%llu want=%llu", 5243 __func__, device->devid, max_avail, 5244 ctl->dev_extent_min); 5245 continue; 5246 } 5247 5248 if (ndevs == fs_devices->rw_devices) { 5249 WARN(1, "%s: found more than %llu devices\n", 5250 __func__, fs_devices->rw_devices); 5251 break; 5252 } 5253 devices_info[ndevs].dev_offset = dev_offset; 5254 devices_info[ndevs].max_avail = max_avail; 5255 devices_info[ndevs].total_avail = total_avail; 5256 devices_info[ndevs].dev = device; 5257 ++ndevs; 5258 } 5259 ctl->ndevs = ndevs; 5260 5261 /* 5262 * now sort the devices by hole size / available space 5263 */ 5264 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5265 btrfs_cmp_device_info, NULL); 5266 5267 return 0; 5268 } 5269 5270 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5271 struct btrfs_device_info *devices_info) 5272 { 5273 /* Number of stripes that count for block group size */ 5274 int data_stripes; 5275 5276 /* 5277 * The primary goal is to maximize the number of stripes, so use as 5278 * many devices as possible, even if the stripes are not maximum sized. 5279 * 5280 * The DUP profile stores more than one stripe per device, the 5281 * max_avail is the total size so we have to adjust. 5282 */ 5283 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5284 ctl->dev_stripes); 5285 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5286 5287 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5288 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5289 5290 /* 5291 * Use the number of data stripes to figure out how big this chunk is 5292 * really going to be in terms of logical address space, and compare 5293 * that answer with the max chunk size. If it's higher, we try to 5294 * reduce stripe_size. 5295 */ 5296 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5297 /* 5298 * Reduce stripe_size, round it up to a 16MB boundary again and 5299 * then use it, unless it ends up being even bigger than the 5300 * previous value we had already. 5301 */ 5302 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5303 data_stripes), SZ_16M), 5304 ctl->stripe_size); 5305 } 5306 5307 /* Align to BTRFS_STRIPE_LEN */ 5308 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5309 ctl->chunk_size = ctl->stripe_size * data_stripes; 5310 5311 return 0; 5312 } 5313 5314 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5315 struct btrfs_device_info *devices_info) 5316 { 5317 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5318 /* Number of stripes that count for block group size */ 5319 int data_stripes; 5320 5321 /* 5322 * It should hold because: 5323 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5324 */ 5325 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5326 5327 ctl->stripe_size = zone_size; 5328 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5329 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5330 5331 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5332 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5333 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5334 ctl->stripe_size) + ctl->nparity, 5335 ctl->dev_stripes); 5336 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5337 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5338 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5339 } 5340 5341 ctl->chunk_size = ctl->stripe_size * data_stripes; 5342 5343 return 0; 5344 } 5345 5346 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5347 struct alloc_chunk_ctl *ctl, 5348 struct btrfs_device_info *devices_info) 5349 { 5350 struct btrfs_fs_info *info = fs_devices->fs_info; 5351 5352 /* 5353 * Round down to number of usable stripes, devs_increment can be any 5354 * number so we can't use round_down() that requires power of 2, while 5355 * rounddown is safe. 5356 */ 5357 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5358 5359 if (ctl->ndevs < ctl->devs_min) { 5360 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5361 btrfs_debug(info, 5362 "%s: not enough devices with free space: have=%d minimum required=%d", 5363 __func__, ctl->ndevs, ctl->devs_min); 5364 } 5365 return -ENOSPC; 5366 } 5367 5368 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5369 5370 switch (fs_devices->chunk_alloc_policy) { 5371 case BTRFS_CHUNK_ALLOC_REGULAR: 5372 return decide_stripe_size_regular(ctl, devices_info); 5373 case BTRFS_CHUNK_ALLOC_ZONED: 5374 return decide_stripe_size_zoned(ctl, devices_info); 5375 default: 5376 BUG(); 5377 } 5378 } 5379 5380 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5381 struct alloc_chunk_ctl *ctl, 5382 struct btrfs_device_info *devices_info) 5383 { 5384 struct btrfs_fs_info *info = trans->fs_info; 5385 struct map_lookup *map = NULL; 5386 struct extent_map_tree *em_tree; 5387 struct btrfs_block_group *block_group; 5388 struct extent_map *em; 5389 u64 start = ctl->start; 5390 u64 type = ctl->type; 5391 int ret; 5392 int i; 5393 int j; 5394 5395 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5396 if (!map) 5397 return ERR_PTR(-ENOMEM); 5398 map->num_stripes = ctl->num_stripes; 5399 5400 for (i = 0; i < ctl->ndevs; ++i) { 5401 for (j = 0; j < ctl->dev_stripes; ++j) { 5402 int s = i * ctl->dev_stripes + j; 5403 map->stripes[s].dev = devices_info[i].dev; 5404 map->stripes[s].physical = devices_info[i].dev_offset + 5405 j * ctl->stripe_size; 5406 } 5407 } 5408 map->stripe_len = BTRFS_STRIPE_LEN; 5409 map->io_align = BTRFS_STRIPE_LEN; 5410 map->io_width = BTRFS_STRIPE_LEN; 5411 map->type = type; 5412 map->sub_stripes = ctl->sub_stripes; 5413 5414 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5415 5416 em = alloc_extent_map(); 5417 if (!em) { 5418 kfree(map); 5419 return ERR_PTR(-ENOMEM); 5420 } 5421 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5422 em->map_lookup = map; 5423 em->start = start; 5424 em->len = ctl->chunk_size; 5425 em->block_start = 0; 5426 em->block_len = em->len; 5427 em->orig_block_len = ctl->stripe_size; 5428 5429 em_tree = &info->mapping_tree; 5430 write_lock(&em_tree->lock); 5431 ret = add_extent_mapping(em_tree, em, 0); 5432 if (ret) { 5433 write_unlock(&em_tree->lock); 5434 free_extent_map(em); 5435 return ERR_PTR(ret); 5436 } 5437 write_unlock(&em_tree->lock); 5438 5439 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5440 if (IS_ERR(block_group)) 5441 goto error_del_extent; 5442 5443 for (i = 0; i < map->num_stripes; i++) { 5444 struct btrfs_device *dev = map->stripes[i].dev; 5445 5446 btrfs_device_set_bytes_used(dev, 5447 dev->bytes_used + ctl->stripe_size); 5448 if (list_empty(&dev->post_commit_list)) 5449 list_add_tail(&dev->post_commit_list, 5450 &trans->transaction->dev_update_list); 5451 } 5452 5453 atomic64_sub(ctl->stripe_size * map->num_stripes, 5454 &info->free_chunk_space); 5455 5456 free_extent_map(em); 5457 check_raid56_incompat_flag(info, type); 5458 check_raid1c34_incompat_flag(info, type); 5459 5460 return block_group; 5461 5462 error_del_extent: 5463 write_lock(&em_tree->lock); 5464 remove_extent_mapping(em_tree, em); 5465 write_unlock(&em_tree->lock); 5466 5467 /* One for our allocation */ 5468 free_extent_map(em); 5469 /* One for the tree reference */ 5470 free_extent_map(em); 5471 5472 return block_group; 5473 } 5474 5475 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5476 u64 type) 5477 { 5478 struct btrfs_fs_info *info = trans->fs_info; 5479 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5480 struct btrfs_device_info *devices_info = NULL; 5481 struct alloc_chunk_ctl ctl; 5482 struct btrfs_block_group *block_group; 5483 int ret; 5484 5485 lockdep_assert_held(&info->chunk_mutex); 5486 5487 if (!alloc_profile_is_valid(type, 0)) { 5488 ASSERT(0); 5489 return ERR_PTR(-EINVAL); 5490 } 5491 5492 if (list_empty(&fs_devices->alloc_list)) { 5493 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5494 btrfs_debug(info, "%s: no writable device", __func__); 5495 return ERR_PTR(-ENOSPC); 5496 } 5497 5498 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5499 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5500 ASSERT(0); 5501 return ERR_PTR(-EINVAL); 5502 } 5503 5504 ctl.start = find_next_chunk(info); 5505 ctl.type = type; 5506 init_alloc_chunk_ctl(fs_devices, &ctl); 5507 5508 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5509 GFP_NOFS); 5510 if (!devices_info) 5511 return ERR_PTR(-ENOMEM); 5512 5513 ret = gather_device_info(fs_devices, &ctl, devices_info); 5514 if (ret < 0) { 5515 block_group = ERR_PTR(ret); 5516 goto out; 5517 } 5518 5519 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5520 if (ret < 0) { 5521 block_group = ERR_PTR(ret); 5522 goto out; 5523 } 5524 5525 block_group = create_chunk(trans, &ctl, devices_info); 5526 5527 out: 5528 kfree(devices_info); 5529 return block_group; 5530 } 5531 5532 /* 5533 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5534 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5535 * chunks. 5536 * 5537 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5538 * phases. 5539 */ 5540 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5541 struct btrfs_block_group *bg) 5542 { 5543 struct btrfs_fs_info *fs_info = trans->fs_info; 5544 struct btrfs_root *chunk_root = fs_info->chunk_root; 5545 struct btrfs_key key; 5546 struct btrfs_chunk *chunk; 5547 struct btrfs_stripe *stripe; 5548 struct extent_map *em; 5549 struct map_lookup *map; 5550 size_t item_size; 5551 int i; 5552 int ret; 5553 5554 /* 5555 * We take the chunk_mutex for 2 reasons: 5556 * 5557 * 1) Updates and insertions in the chunk btree must be done while holding 5558 * the chunk_mutex, as well as updating the system chunk array in the 5559 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5560 * details; 5561 * 5562 * 2) To prevent races with the final phase of a device replace operation 5563 * that replaces the device object associated with the map's stripes, 5564 * because the device object's id can change at any time during that 5565 * final phase of the device replace operation 5566 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5567 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5568 * which would cause a failure when updating the device item, which does 5569 * not exists, or persisting a stripe of the chunk item with such ID. 5570 * Here we can't use the device_list_mutex because our caller already 5571 * has locked the chunk_mutex, and the final phase of device replace 5572 * acquires both mutexes - first the device_list_mutex and then the 5573 * chunk_mutex. Using any of those two mutexes protects us from a 5574 * concurrent device replace. 5575 */ 5576 lockdep_assert_held(&fs_info->chunk_mutex); 5577 5578 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5579 if (IS_ERR(em)) { 5580 ret = PTR_ERR(em); 5581 btrfs_abort_transaction(trans, ret); 5582 return ret; 5583 } 5584 5585 map = em->map_lookup; 5586 item_size = btrfs_chunk_item_size(map->num_stripes); 5587 5588 chunk = kzalloc(item_size, GFP_NOFS); 5589 if (!chunk) { 5590 ret = -ENOMEM; 5591 btrfs_abort_transaction(trans, ret); 5592 goto out; 5593 } 5594 5595 for (i = 0; i < map->num_stripes; i++) { 5596 struct btrfs_device *device = map->stripes[i].dev; 5597 5598 ret = btrfs_update_device(trans, device); 5599 if (ret) 5600 goto out; 5601 } 5602 5603 stripe = &chunk->stripe; 5604 for (i = 0; i < map->num_stripes; i++) { 5605 struct btrfs_device *device = map->stripes[i].dev; 5606 const u64 dev_offset = map->stripes[i].physical; 5607 5608 btrfs_set_stack_stripe_devid(stripe, device->devid); 5609 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5610 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5611 stripe++; 5612 } 5613 5614 btrfs_set_stack_chunk_length(chunk, bg->length); 5615 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5616 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5617 btrfs_set_stack_chunk_type(chunk, map->type); 5618 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5619 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5620 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5621 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5622 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5623 5624 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5625 key.type = BTRFS_CHUNK_ITEM_KEY; 5626 key.offset = bg->start; 5627 5628 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5629 if (ret) 5630 goto out; 5631 5632 bg->chunk_item_inserted = 1; 5633 5634 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5635 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5636 if (ret) 5637 goto out; 5638 } 5639 5640 out: 5641 kfree(chunk); 5642 free_extent_map(em); 5643 return ret; 5644 } 5645 5646 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5647 { 5648 struct btrfs_fs_info *fs_info = trans->fs_info; 5649 u64 alloc_profile; 5650 struct btrfs_block_group *meta_bg; 5651 struct btrfs_block_group *sys_bg; 5652 5653 /* 5654 * When adding a new device for sprouting, the seed device is read-only 5655 * so we must first allocate a metadata and a system chunk. But before 5656 * adding the block group items to the extent, device and chunk btrees, 5657 * we must first: 5658 * 5659 * 1) Create both chunks without doing any changes to the btrees, as 5660 * otherwise we would get -ENOSPC since the block groups from the 5661 * seed device are read-only; 5662 * 5663 * 2) Add the device item for the new sprout device - finishing the setup 5664 * of a new block group requires updating the device item in the chunk 5665 * btree, so it must exist when we attempt to do it. The previous step 5666 * ensures this does not fail with -ENOSPC. 5667 * 5668 * After that we can add the block group items to their btrees: 5669 * update existing device item in the chunk btree, add a new block group 5670 * item to the extent btree, add a new chunk item to the chunk btree and 5671 * finally add the new device extent items to the devices btree. 5672 */ 5673 5674 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5675 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5676 if (IS_ERR(meta_bg)) 5677 return PTR_ERR(meta_bg); 5678 5679 alloc_profile = btrfs_system_alloc_profile(fs_info); 5680 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5681 if (IS_ERR(sys_bg)) 5682 return PTR_ERR(sys_bg); 5683 5684 return 0; 5685 } 5686 5687 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5688 { 5689 const int index = btrfs_bg_flags_to_raid_index(map->type); 5690 5691 return btrfs_raid_array[index].tolerated_failures; 5692 } 5693 5694 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5695 { 5696 struct extent_map *em; 5697 struct map_lookup *map; 5698 int miss_ndevs = 0; 5699 int i; 5700 bool ret = true; 5701 5702 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5703 if (IS_ERR(em)) 5704 return false; 5705 5706 map = em->map_lookup; 5707 for (i = 0; i < map->num_stripes; i++) { 5708 if (test_bit(BTRFS_DEV_STATE_MISSING, 5709 &map->stripes[i].dev->dev_state)) { 5710 miss_ndevs++; 5711 continue; 5712 } 5713 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5714 &map->stripes[i].dev->dev_state)) { 5715 ret = false; 5716 goto end; 5717 } 5718 } 5719 5720 /* 5721 * If the number of missing devices is larger than max errors, we can 5722 * not write the data into that chunk successfully. 5723 */ 5724 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5725 ret = false; 5726 end: 5727 free_extent_map(em); 5728 return ret; 5729 } 5730 5731 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5732 { 5733 struct extent_map *em; 5734 5735 while (1) { 5736 write_lock(&tree->lock); 5737 em = lookup_extent_mapping(tree, 0, (u64)-1); 5738 if (em) 5739 remove_extent_mapping(tree, em); 5740 write_unlock(&tree->lock); 5741 if (!em) 5742 break; 5743 /* once for us */ 5744 free_extent_map(em); 5745 /* once for the tree */ 5746 free_extent_map(em); 5747 } 5748 } 5749 5750 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5751 { 5752 struct extent_map *em; 5753 struct map_lookup *map; 5754 int ret; 5755 5756 em = btrfs_get_chunk_map(fs_info, logical, len); 5757 if (IS_ERR(em)) 5758 /* 5759 * We could return errors for these cases, but that could get 5760 * ugly and we'd probably do the same thing which is just not do 5761 * anything else and exit, so return 1 so the callers don't try 5762 * to use other copies. 5763 */ 5764 return 1; 5765 5766 map = em->map_lookup; 5767 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5768 ret = map->num_stripes; 5769 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5770 ret = map->sub_stripes; 5771 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5772 ret = 2; 5773 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5774 /* 5775 * There could be two corrupted data stripes, we need 5776 * to loop retry in order to rebuild the correct data. 5777 * 5778 * Fail a stripe at a time on every retry except the 5779 * stripe under reconstruction. 5780 */ 5781 ret = map->num_stripes; 5782 else 5783 ret = 1; 5784 free_extent_map(em); 5785 5786 down_read(&fs_info->dev_replace.rwsem); 5787 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5788 fs_info->dev_replace.tgtdev) 5789 ret++; 5790 up_read(&fs_info->dev_replace.rwsem); 5791 5792 return ret; 5793 } 5794 5795 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5796 u64 logical) 5797 { 5798 struct extent_map *em; 5799 struct map_lookup *map; 5800 unsigned long len = fs_info->sectorsize; 5801 5802 em = btrfs_get_chunk_map(fs_info, logical, len); 5803 5804 if (!WARN_ON(IS_ERR(em))) { 5805 map = em->map_lookup; 5806 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5807 len = map->stripe_len * nr_data_stripes(map); 5808 free_extent_map(em); 5809 } 5810 return len; 5811 } 5812 5813 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5814 { 5815 struct extent_map *em; 5816 struct map_lookup *map; 5817 int ret = 0; 5818 5819 em = btrfs_get_chunk_map(fs_info, logical, len); 5820 5821 if(!WARN_ON(IS_ERR(em))) { 5822 map = em->map_lookup; 5823 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5824 ret = 1; 5825 free_extent_map(em); 5826 } 5827 return ret; 5828 } 5829 5830 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5831 struct map_lookup *map, int first, 5832 int dev_replace_is_ongoing) 5833 { 5834 int i; 5835 int num_stripes; 5836 int preferred_mirror; 5837 int tolerance; 5838 struct btrfs_device *srcdev; 5839 5840 ASSERT((map->type & 5841 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5842 5843 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5844 num_stripes = map->sub_stripes; 5845 else 5846 num_stripes = map->num_stripes; 5847 5848 switch (fs_info->fs_devices->read_policy) { 5849 default: 5850 /* Shouldn't happen, just warn and use pid instead of failing */ 5851 btrfs_warn_rl(fs_info, 5852 "unknown read_policy type %u, reset to pid", 5853 fs_info->fs_devices->read_policy); 5854 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5855 fallthrough; 5856 case BTRFS_READ_POLICY_PID: 5857 preferred_mirror = first + (current->pid % num_stripes); 5858 break; 5859 } 5860 5861 if (dev_replace_is_ongoing && 5862 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5863 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5864 srcdev = fs_info->dev_replace.srcdev; 5865 else 5866 srcdev = NULL; 5867 5868 /* 5869 * try to avoid the drive that is the source drive for a 5870 * dev-replace procedure, only choose it if no other non-missing 5871 * mirror is available 5872 */ 5873 for (tolerance = 0; tolerance < 2; tolerance++) { 5874 if (map->stripes[preferred_mirror].dev->bdev && 5875 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5876 return preferred_mirror; 5877 for (i = first; i < first + num_stripes; i++) { 5878 if (map->stripes[i].dev->bdev && 5879 (tolerance || map->stripes[i].dev != srcdev)) 5880 return i; 5881 } 5882 } 5883 5884 /* we couldn't find one that doesn't fail. Just return something 5885 * and the io error handling code will clean up eventually 5886 */ 5887 return preferred_mirror; 5888 } 5889 5890 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5891 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5892 { 5893 int i; 5894 int again = 1; 5895 5896 while (again) { 5897 again = 0; 5898 for (i = 0; i < num_stripes - 1; i++) { 5899 /* Swap if parity is on a smaller index */ 5900 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5901 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5902 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5903 again = 1; 5904 } 5905 } 5906 } 5907 } 5908 5909 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5910 int total_stripes, 5911 int real_stripes) 5912 { 5913 struct btrfs_io_context *bioc = kzalloc( 5914 /* The size of btrfs_io_context */ 5915 sizeof(struct btrfs_io_context) + 5916 /* Plus the variable array for the stripes */ 5917 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5918 /* Plus the variable array for the tgt dev */ 5919 sizeof(int) * (real_stripes) + 5920 /* 5921 * Plus the raid_map, which includes both the tgt dev 5922 * and the stripes. 5923 */ 5924 sizeof(u64) * (total_stripes), 5925 GFP_NOFS|__GFP_NOFAIL); 5926 5927 atomic_set(&bioc->error, 0); 5928 refcount_set(&bioc->refs, 1); 5929 5930 bioc->fs_info = fs_info; 5931 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5932 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5933 5934 return bioc; 5935 } 5936 5937 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5938 { 5939 WARN_ON(!refcount_read(&bioc->refs)); 5940 refcount_inc(&bioc->refs); 5941 } 5942 5943 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5944 { 5945 if (!bioc) 5946 return; 5947 if (refcount_dec_and_test(&bioc->refs)) 5948 kfree(bioc); 5949 } 5950 5951 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5952 /* 5953 * Please note that, discard won't be sent to target device of device 5954 * replace. 5955 */ 5956 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5957 u64 logical, u64 *length_ret, 5958 struct btrfs_io_context **bioc_ret) 5959 { 5960 struct extent_map *em; 5961 struct map_lookup *map; 5962 struct btrfs_io_context *bioc; 5963 u64 length = *length_ret; 5964 u64 offset; 5965 u64 stripe_nr; 5966 u64 stripe_nr_end; 5967 u64 stripe_end_offset; 5968 u64 stripe_cnt; 5969 u64 stripe_len; 5970 u64 stripe_offset; 5971 u64 num_stripes; 5972 u32 stripe_index; 5973 u32 factor = 0; 5974 u32 sub_stripes = 0; 5975 u64 stripes_per_dev = 0; 5976 u32 remaining_stripes = 0; 5977 u32 last_stripe = 0; 5978 int ret = 0; 5979 int i; 5980 5981 /* Discard always returns a bioc. */ 5982 ASSERT(bioc_ret); 5983 5984 em = btrfs_get_chunk_map(fs_info, logical, length); 5985 if (IS_ERR(em)) 5986 return PTR_ERR(em); 5987 5988 map = em->map_lookup; 5989 /* we don't discard raid56 yet */ 5990 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5991 ret = -EOPNOTSUPP; 5992 goto out; 5993 } 5994 5995 offset = logical - em->start; 5996 length = min_t(u64, em->start + em->len - logical, length); 5997 *length_ret = length; 5998 5999 stripe_len = map->stripe_len; 6000 /* 6001 * stripe_nr counts the total number of stripes we have to stride 6002 * to get to this block 6003 */ 6004 stripe_nr = div64_u64(offset, stripe_len); 6005 6006 /* stripe_offset is the offset of this block in its stripe */ 6007 stripe_offset = offset - stripe_nr * stripe_len; 6008 6009 stripe_nr_end = round_up(offset + length, map->stripe_len); 6010 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 6011 stripe_cnt = stripe_nr_end - stripe_nr; 6012 stripe_end_offset = stripe_nr_end * map->stripe_len - 6013 (offset + length); 6014 /* 6015 * after this, stripe_nr is the number of stripes on this 6016 * device we have to walk to find the data, and stripe_index is 6017 * the number of our device in the stripe array 6018 */ 6019 num_stripes = 1; 6020 stripe_index = 0; 6021 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6022 BTRFS_BLOCK_GROUP_RAID10)) { 6023 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6024 sub_stripes = 1; 6025 else 6026 sub_stripes = map->sub_stripes; 6027 6028 factor = map->num_stripes / sub_stripes; 6029 num_stripes = min_t(u64, map->num_stripes, 6030 sub_stripes * stripe_cnt); 6031 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6032 stripe_index *= sub_stripes; 6033 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 6034 &remaining_stripes); 6035 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 6036 last_stripe *= sub_stripes; 6037 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6038 BTRFS_BLOCK_GROUP_DUP)) { 6039 num_stripes = map->num_stripes; 6040 } else { 6041 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6042 &stripe_index); 6043 } 6044 6045 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6046 if (!bioc) { 6047 ret = -ENOMEM; 6048 goto out; 6049 } 6050 6051 for (i = 0; i < num_stripes; i++) { 6052 bioc->stripes[i].physical = 6053 map->stripes[stripe_index].physical + 6054 stripe_offset + stripe_nr * map->stripe_len; 6055 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6056 6057 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6058 BTRFS_BLOCK_GROUP_RAID10)) { 6059 bioc->stripes[i].length = stripes_per_dev * 6060 map->stripe_len; 6061 6062 if (i / sub_stripes < remaining_stripes) 6063 bioc->stripes[i].length += map->stripe_len; 6064 6065 /* 6066 * Special for the first stripe and 6067 * the last stripe: 6068 * 6069 * |-------|...|-------| 6070 * |----------| 6071 * off end_off 6072 */ 6073 if (i < sub_stripes) 6074 bioc->stripes[i].length -= stripe_offset; 6075 6076 if (stripe_index >= last_stripe && 6077 stripe_index <= (last_stripe + 6078 sub_stripes - 1)) 6079 bioc->stripes[i].length -= stripe_end_offset; 6080 6081 if (i == sub_stripes - 1) 6082 stripe_offset = 0; 6083 } else { 6084 bioc->stripes[i].length = length; 6085 } 6086 6087 stripe_index++; 6088 if (stripe_index == map->num_stripes) { 6089 stripe_index = 0; 6090 stripe_nr++; 6091 } 6092 } 6093 6094 *bioc_ret = bioc; 6095 bioc->map_type = map->type; 6096 bioc->num_stripes = num_stripes; 6097 out: 6098 free_extent_map(em); 6099 return ret; 6100 } 6101 6102 /* 6103 * In dev-replace case, for repair case (that's the only case where the mirror 6104 * is selected explicitly when calling btrfs_map_block), blocks left of the 6105 * left cursor can also be read from the target drive. 6106 * 6107 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6108 * array of stripes. 6109 * For READ, it also needs to be supported using the same mirror number. 6110 * 6111 * If the requested block is not left of the left cursor, EIO is returned. This 6112 * can happen because btrfs_num_copies() returns one more in the dev-replace 6113 * case. 6114 */ 6115 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6116 u64 logical, u64 length, 6117 u64 srcdev_devid, int *mirror_num, 6118 u64 *physical) 6119 { 6120 struct btrfs_io_context *bioc = NULL; 6121 int num_stripes; 6122 int index_srcdev = 0; 6123 int found = 0; 6124 u64 physical_of_found = 0; 6125 int i; 6126 int ret = 0; 6127 6128 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6129 logical, &length, &bioc, 0, 0); 6130 if (ret) { 6131 ASSERT(bioc == NULL); 6132 return ret; 6133 } 6134 6135 num_stripes = bioc->num_stripes; 6136 if (*mirror_num > num_stripes) { 6137 /* 6138 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6139 * that means that the requested area is not left of the left 6140 * cursor 6141 */ 6142 btrfs_put_bioc(bioc); 6143 return -EIO; 6144 } 6145 6146 /* 6147 * process the rest of the function using the mirror_num of the source 6148 * drive. Therefore look it up first. At the end, patch the device 6149 * pointer to the one of the target drive. 6150 */ 6151 for (i = 0; i < num_stripes; i++) { 6152 if (bioc->stripes[i].dev->devid != srcdev_devid) 6153 continue; 6154 6155 /* 6156 * In case of DUP, in order to keep it simple, only add the 6157 * mirror with the lowest physical address 6158 */ 6159 if (found && 6160 physical_of_found <= bioc->stripes[i].physical) 6161 continue; 6162 6163 index_srcdev = i; 6164 found = 1; 6165 physical_of_found = bioc->stripes[i].physical; 6166 } 6167 6168 btrfs_put_bioc(bioc); 6169 6170 ASSERT(found); 6171 if (!found) 6172 return -EIO; 6173 6174 *mirror_num = index_srcdev + 1; 6175 *physical = physical_of_found; 6176 return ret; 6177 } 6178 6179 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6180 { 6181 struct btrfs_block_group *cache; 6182 bool ret; 6183 6184 /* Non zoned filesystem does not use "to_copy" flag */ 6185 if (!btrfs_is_zoned(fs_info)) 6186 return false; 6187 6188 cache = btrfs_lookup_block_group(fs_info, logical); 6189 6190 spin_lock(&cache->lock); 6191 ret = cache->to_copy; 6192 spin_unlock(&cache->lock); 6193 6194 btrfs_put_block_group(cache); 6195 return ret; 6196 } 6197 6198 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6199 struct btrfs_io_context **bioc_ret, 6200 struct btrfs_dev_replace *dev_replace, 6201 u64 logical, 6202 int *num_stripes_ret, int *max_errors_ret) 6203 { 6204 struct btrfs_io_context *bioc = *bioc_ret; 6205 u64 srcdev_devid = dev_replace->srcdev->devid; 6206 int tgtdev_indexes = 0; 6207 int num_stripes = *num_stripes_ret; 6208 int max_errors = *max_errors_ret; 6209 int i; 6210 6211 if (op == BTRFS_MAP_WRITE) { 6212 int index_where_to_add; 6213 6214 /* 6215 * A block group which have "to_copy" set will eventually 6216 * copied by dev-replace process. We can avoid cloning IO here. 6217 */ 6218 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6219 return; 6220 6221 /* 6222 * duplicate the write operations while the dev replace 6223 * procedure is running. Since the copying of the old disk to 6224 * the new disk takes place at run time while the filesystem is 6225 * mounted writable, the regular write operations to the old 6226 * disk have to be duplicated to go to the new disk as well. 6227 * 6228 * Note that device->missing is handled by the caller, and that 6229 * the write to the old disk is already set up in the stripes 6230 * array. 6231 */ 6232 index_where_to_add = num_stripes; 6233 for (i = 0; i < num_stripes; i++) { 6234 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6235 /* write to new disk, too */ 6236 struct btrfs_io_stripe *new = 6237 bioc->stripes + index_where_to_add; 6238 struct btrfs_io_stripe *old = 6239 bioc->stripes + i; 6240 6241 new->physical = old->physical; 6242 new->length = old->length; 6243 new->dev = dev_replace->tgtdev; 6244 bioc->tgtdev_map[i] = index_where_to_add; 6245 index_where_to_add++; 6246 max_errors++; 6247 tgtdev_indexes++; 6248 } 6249 } 6250 num_stripes = index_where_to_add; 6251 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6252 int index_srcdev = 0; 6253 int found = 0; 6254 u64 physical_of_found = 0; 6255 6256 /* 6257 * During the dev-replace procedure, the target drive can also 6258 * be used to read data in case it is needed to repair a corrupt 6259 * block elsewhere. This is possible if the requested area is 6260 * left of the left cursor. In this area, the target drive is a 6261 * full copy of the source drive. 6262 */ 6263 for (i = 0; i < num_stripes; i++) { 6264 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6265 /* 6266 * In case of DUP, in order to keep it simple, 6267 * only add the mirror with the lowest physical 6268 * address 6269 */ 6270 if (found && 6271 physical_of_found <= bioc->stripes[i].physical) 6272 continue; 6273 index_srcdev = i; 6274 found = 1; 6275 physical_of_found = bioc->stripes[i].physical; 6276 } 6277 } 6278 if (found) { 6279 struct btrfs_io_stripe *tgtdev_stripe = 6280 bioc->stripes + num_stripes; 6281 6282 tgtdev_stripe->physical = physical_of_found; 6283 tgtdev_stripe->length = 6284 bioc->stripes[index_srcdev].length; 6285 tgtdev_stripe->dev = dev_replace->tgtdev; 6286 bioc->tgtdev_map[index_srcdev] = num_stripes; 6287 6288 tgtdev_indexes++; 6289 num_stripes++; 6290 } 6291 } 6292 6293 *num_stripes_ret = num_stripes; 6294 *max_errors_ret = max_errors; 6295 bioc->num_tgtdevs = tgtdev_indexes; 6296 *bioc_ret = bioc; 6297 } 6298 6299 static bool need_full_stripe(enum btrfs_map_op op) 6300 { 6301 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6302 } 6303 6304 /* 6305 * Calculate the geometry of a particular (address, len) tuple. This 6306 * information is used to calculate how big a particular bio can get before it 6307 * straddles a stripe. 6308 * 6309 * @fs_info: the filesystem 6310 * @em: mapping containing the logical extent 6311 * @op: type of operation - write or read 6312 * @logical: address that we want to figure out the geometry of 6313 * @io_geom: pointer used to return values 6314 * 6315 * Returns < 0 in case a chunk for the given logical address cannot be found, 6316 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6317 */ 6318 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6319 enum btrfs_map_op op, u64 logical, 6320 struct btrfs_io_geometry *io_geom) 6321 { 6322 struct map_lookup *map; 6323 u64 len; 6324 u64 offset; 6325 u64 stripe_offset; 6326 u64 stripe_nr; 6327 u64 stripe_len; 6328 u64 raid56_full_stripe_start = (u64)-1; 6329 int data_stripes; 6330 6331 ASSERT(op != BTRFS_MAP_DISCARD); 6332 6333 map = em->map_lookup; 6334 /* Offset of this logical address in the chunk */ 6335 offset = logical - em->start; 6336 /* Len of a stripe in a chunk */ 6337 stripe_len = map->stripe_len; 6338 /* Stripe where this block falls in */ 6339 stripe_nr = div64_u64(offset, stripe_len); 6340 /* Offset of stripe in the chunk */ 6341 stripe_offset = stripe_nr * stripe_len; 6342 if (offset < stripe_offset) { 6343 btrfs_crit(fs_info, 6344 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6345 stripe_offset, offset, em->start, logical, stripe_len); 6346 return -EINVAL; 6347 } 6348 6349 /* stripe_offset is the offset of this block in its stripe */ 6350 stripe_offset = offset - stripe_offset; 6351 data_stripes = nr_data_stripes(map); 6352 6353 /* Only stripe based profiles needs to check against stripe length. */ 6354 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) { 6355 u64 max_len = stripe_len - stripe_offset; 6356 6357 /* 6358 * In case of raid56, we need to know the stripe aligned start 6359 */ 6360 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6361 unsigned long full_stripe_len = stripe_len * data_stripes; 6362 raid56_full_stripe_start = offset; 6363 6364 /* 6365 * Allow a write of a full stripe, but make sure we 6366 * don't allow straddling of stripes 6367 */ 6368 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6369 full_stripe_len); 6370 raid56_full_stripe_start *= full_stripe_len; 6371 6372 /* 6373 * For writes to RAID[56], allow a full stripeset across 6374 * all disks. For other RAID types and for RAID[56] 6375 * reads, just allow a single stripe (on a single disk). 6376 */ 6377 if (op == BTRFS_MAP_WRITE) { 6378 max_len = stripe_len * data_stripes - 6379 (offset - raid56_full_stripe_start); 6380 } 6381 } 6382 len = min_t(u64, em->len - offset, max_len); 6383 } else { 6384 len = em->len - offset; 6385 } 6386 6387 io_geom->len = len; 6388 io_geom->offset = offset; 6389 io_geom->stripe_len = stripe_len; 6390 io_geom->stripe_nr = stripe_nr; 6391 io_geom->stripe_offset = stripe_offset; 6392 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6393 6394 return 0; 6395 } 6396 6397 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6398 enum btrfs_map_op op, 6399 u64 logical, u64 *length, 6400 struct btrfs_io_context **bioc_ret, 6401 int mirror_num, int need_raid_map) 6402 { 6403 struct extent_map *em; 6404 struct map_lookup *map; 6405 u64 stripe_offset; 6406 u64 stripe_nr; 6407 u64 stripe_len; 6408 u32 stripe_index; 6409 int data_stripes; 6410 int i; 6411 int ret = 0; 6412 int num_stripes; 6413 int max_errors = 0; 6414 int tgtdev_indexes = 0; 6415 struct btrfs_io_context *bioc = NULL; 6416 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6417 int dev_replace_is_ongoing = 0; 6418 int num_alloc_stripes; 6419 int patch_the_first_stripe_for_dev_replace = 0; 6420 u64 physical_to_patch_in_first_stripe = 0; 6421 u64 raid56_full_stripe_start = (u64)-1; 6422 struct btrfs_io_geometry geom; 6423 6424 ASSERT(bioc_ret); 6425 ASSERT(op != BTRFS_MAP_DISCARD); 6426 6427 em = btrfs_get_chunk_map(fs_info, logical, *length); 6428 ASSERT(!IS_ERR(em)); 6429 6430 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6431 if (ret < 0) 6432 return ret; 6433 6434 map = em->map_lookup; 6435 6436 *length = geom.len; 6437 stripe_len = geom.stripe_len; 6438 stripe_nr = geom.stripe_nr; 6439 stripe_offset = geom.stripe_offset; 6440 raid56_full_stripe_start = geom.raid56_stripe_offset; 6441 data_stripes = nr_data_stripes(map); 6442 6443 down_read(&dev_replace->rwsem); 6444 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6445 /* 6446 * Hold the semaphore for read during the whole operation, write is 6447 * requested at commit time but must wait. 6448 */ 6449 if (!dev_replace_is_ongoing) 6450 up_read(&dev_replace->rwsem); 6451 6452 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6453 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6454 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6455 dev_replace->srcdev->devid, 6456 &mirror_num, 6457 &physical_to_patch_in_first_stripe); 6458 if (ret) 6459 goto out; 6460 else 6461 patch_the_first_stripe_for_dev_replace = 1; 6462 } else if (mirror_num > map->num_stripes) { 6463 mirror_num = 0; 6464 } 6465 6466 num_stripes = 1; 6467 stripe_index = 0; 6468 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6469 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6470 &stripe_index); 6471 if (!need_full_stripe(op)) 6472 mirror_num = 1; 6473 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6474 if (need_full_stripe(op)) 6475 num_stripes = map->num_stripes; 6476 else if (mirror_num) 6477 stripe_index = mirror_num - 1; 6478 else { 6479 stripe_index = find_live_mirror(fs_info, map, 0, 6480 dev_replace_is_ongoing); 6481 mirror_num = stripe_index + 1; 6482 } 6483 6484 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6485 if (need_full_stripe(op)) { 6486 num_stripes = map->num_stripes; 6487 } else if (mirror_num) { 6488 stripe_index = mirror_num - 1; 6489 } else { 6490 mirror_num = 1; 6491 } 6492 6493 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6494 u32 factor = map->num_stripes / map->sub_stripes; 6495 6496 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6497 stripe_index *= map->sub_stripes; 6498 6499 if (need_full_stripe(op)) 6500 num_stripes = map->sub_stripes; 6501 else if (mirror_num) 6502 stripe_index += mirror_num - 1; 6503 else { 6504 int old_stripe_index = stripe_index; 6505 stripe_index = find_live_mirror(fs_info, map, 6506 stripe_index, 6507 dev_replace_is_ongoing); 6508 mirror_num = stripe_index - old_stripe_index + 1; 6509 } 6510 6511 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6512 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6513 /* push stripe_nr back to the start of the full stripe */ 6514 stripe_nr = div64_u64(raid56_full_stripe_start, 6515 stripe_len * data_stripes); 6516 6517 /* RAID[56] write or recovery. Return all stripes */ 6518 num_stripes = map->num_stripes; 6519 max_errors = nr_parity_stripes(map); 6520 6521 *length = map->stripe_len; 6522 stripe_index = 0; 6523 stripe_offset = 0; 6524 } else { 6525 /* 6526 * Mirror #0 or #1 means the original data block. 6527 * Mirror #2 is RAID5 parity block. 6528 * Mirror #3 is RAID6 Q block. 6529 */ 6530 stripe_nr = div_u64_rem(stripe_nr, 6531 data_stripes, &stripe_index); 6532 if (mirror_num > 1) 6533 stripe_index = data_stripes + mirror_num - 2; 6534 6535 /* We distribute the parity blocks across stripes */ 6536 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6537 &stripe_index); 6538 if (!need_full_stripe(op) && mirror_num <= 1) 6539 mirror_num = 1; 6540 } 6541 } else { 6542 /* 6543 * after this, stripe_nr is the number of stripes on this 6544 * device we have to walk to find the data, and stripe_index is 6545 * the number of our device in the stripe array 6546 */ 6547 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6548 &stripe_index); 6549 mirror_num = stripe_index + 1; 6550 } 6551 if (stripe_index >= map->num_stripes) { 6552 btrfs_crit(fs_info, 6553 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6554 stripe_index, map->num_stripes); 6555 ret = -EINVAL; 6556 goto out; 6557 } 6558 6559 num_alloc_stripes = num_stripes; 6560 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6561 if (op == BTRFS_MAP_WRITE) 6562 num_alloc_stripes <<= 1; 6563 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6564 num_alloc_stripes++; 6565 tgtdev_indexes = num_stripes; 6566 } 6567 6568 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6569 if (!bioc) { 6570 ret = -ENOMEM; 6571 goto out; 6572 } 6573 6574 for (i = 0; i < num_stripes; i++) { 6575 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6576 stripe_offset + stripe_nr * map->stripe_len; 6577 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6578 stripe_index++; 6579 } 6580 6581 /* Build raid_map */ 6582 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6583 (need_full_stripe(op) || mirror_num > 1)) { 6584 u64 tmp; 6585 unsigned rot; 6586 6587 /* Work out the disk rotation on this stripe-set */ 6588 div_u64_rem(stripe_nr, num_stripes, &rot); 6589 6590 /* Fill in the logical address of each stripe */ 6591 tmp = stripe_nr * data_stripes; 6592 for (i = 0; i < data_stripes; i++) 6593 bioc->raid_map[(i + rot) % num_stripes] = 6594 em->start + (tmp + i) * map->stripe_len; 6595 6596 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6597 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6598 bioc->raid_map[(i + rot + 1) % num_stripes] = 6599 RAID6_Q_STRIPE; 6600 6601 sort_parity_stripes(bioc, num_stripes); 6602 } 6603 6604 if (need_full_stripe(op)) 6605 max_errors = btrfs_chunk_max_errors(map); 6606 6607 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6608 need_full_stripe(op)) { 6609 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6610 &num_stripes, &max_errors); 6611 } 6612 6613 *bioc_ret = bioc; 6614 bioc->map_type = map->type; 6615 bioc->num_stripes = num_stripes; 6616 bioc->max_errors = max_errors; 6617 bioc->mirror_num = mirror_num; 6618 6619 /* 6620 * this is the case that REQ_READ && dev_replace_is_ongoing && 6621 * mirror_num == num_stripes + 1 && dev_replace target drive is 6622 * available as a mirror 6623 */ 6624 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6625 WARN_ON(num_stripes > 1); 6626 bioc->stripes[0].dev = dev_replace->tgtdev; 6627 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6628 bioc->mirror_num = map->num_stripes + 1; 6629 } 6630 out: 6631 if (dev_replace_is_ongoing) { 6632 lockdep_assert_held(&dev_replace->rwsem); 6633 /* Unlock and let waiting writers proceed */ 6634 up_read(&dev_replace->rwsem); 6635 } 6636 free_extent_map(em); 6637 return ret; 6638 } 6639 6640 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6641 u64 logical, u64 *length, 6642 struct btrfs_io_context **bioc_ret, int mirror_num) 6643 { 6644 if (op == BTRFS_MAP_DISCARD) 6645 return __btrfs_map_block_for_discard(fs_info, logical, 6646 length, bioc_ret); 6647 6648 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6649 mirror_num, 0); 6650 } 6651 6652 /* For Scrub/replace */ 6653 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6654 u64 logical, u64 *length, 6655 struct btrfs_io_context **bioc_ret) 6656 { 6657 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6658 } 6659 6660 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6661 { 6662 bio->bi_private = bioc->private; 6663 bio->bi_end_io = bioc->end_io; 6664 bio_endio(bio); 6665 6666 btrfs_put_bioc(bioc); 6667 } 6668 6669 static void btrfs_end_bio(struct bio *bio) 6670 { 6671 struct btrfs_io_context *bioc = bio->bi_private; 6672 int is_orig_bio = 0; 6673 6674 if (bio->bi_status) { 6675 atomic_inc(&bioc->error); 6676 if (bio->bi_status == BLK_STS_IOERR || 6677 bio->bi_status == BLK_STS_TARGET) { 6678 struct btrfs_device *dev = btrfs_bio(bio)->device; 6679 6680 ASSERT(dev->bdev); 6681 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6682 btrfs_dev_stat_inc_and_print(dev, 6683 BTRFS_DEV_STAT_WRITE_ERRS); 6684 else if (!(bio->bi_opf & REQ_RAHEAD)) 6685 btrfs_dev_stat_inc_and_print(dev, 6686 BTRFS_DEV_STAT_READ_ERRS); 6687 if (bio->bi_opf & REQ_PREFLUSH) 6688 btrfs_dev_stat_inc_and_print(dev, 6689 BTRFS_DEV_STAT_FLUSH_ERRS); 6690 } 6691 } 6692 6693 if (bio == bioc->orig_bio) 6694 is_orig_bio = 1; 6695 6696 btrfs_bio_counter_dec(bioc->fs_info); 6697 6698 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6699 if (!is_orig_bio) { 6700 bio_put(bio); 6701 bio = bioc->orig_bio; 6702 } 6703 6704 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6705 /* only send an error to the higher layers if it is 6706 * beyond the tolerance of the btrfs bio 6707 */ 6708 if (atomic_read(&bioc->error) > bioc->max_errors) { 6709 bio->bi_status = BLK_STS_IOERR; 6710 } else { 6711 /* 6712 * this bio is actually up to date, we didn't 6713 * go over the max number of errors 6714 */ 6715 bio->bi_status = BLK_STS_OK; 6716 } 6717 6718 btrfs_end_bioc(bioc, bio); 6719 } else if (!is_orig_bio) { 6720 bio_put(bio); 6721 } 6722 } 6723 6724 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6725 u64 physical, struct btrfs_device *dev) 6726 { 6727 struct btrfs_fs_info *fs_info = bioc->fs_info; 6728 6729 bio->bi_private = bioc; 6730 btrfs_bio(bio)->device = dev; 6731 bio->bi_end_io = btrfs_end_bio; 6732 bio->bi_iter.bi_sector = physical >> 9; 6733 /* 6734 * For zone append writing, bi_sector must point the beginning of the 6735 * zone 6736 */ 6737 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6738 if (btrfs_dev_is_sequential(dev, physical)) { 6739 u64 zone_start = round_down(physical, fs_info->zone_size); 6740 6741 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6742 } else { 6743 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6744 bio->bi_opf |= REQ_OP_WRITE; 6745 } 6746 } 6747 btrfs_debug_in_rcu(fs_info, 6748 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6749 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6750 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6751 dev->devid, bio->bi_iter.bi_size); 6752 bio_set_dev(bio, dev->bdev); 6753 6754 btrfs_bio_counter_inc_noblocked(fs_info); 6755 6756 btrfsic_submit_bio(bio); 6757 } 6758 6759 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6760 { 6761 atomic_inc(&bioc->error); 6762 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6763 /* Should be the original bio. */ 6764 WARN_ON(bio != bioc->orig_bio); 6765 6766 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6767 bio->bi_iter.bi_sector = logical >> 9; 6768 if (atomic_read(&bioc->error) > bioc->max_errors) 6769 bio->bi_status = BLK_STS_IOERR; 6770 else 6771 bio->bi_status = BLK_STS_OK; 6772 btrfs_end_bioc(bioc, bio); 6773 } 6774 } 6775 6776 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6777 int mirror_num) 6778 { 6779 struct btrfs_device *dev; 6780 struct bio *first_bio = bio; 6781 u64 logical = bio->bi_iter.bi_sector << 9; 6782 u64 length = 0; 6783 u64 map_length; 6784 int ret; 6785 int dev_nr; 6786 int total_devs; 6787 struct btrfs_io_context *bioc = NULL; 6788 6789 length = bio->bi_iter.bi_size; 6790 map_length = length; 6791 6792 btrfs_bio_counter_inc_blocked(fs_info); 6793 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6794 &map_length, &bioc, mirror_num, 1); 6795 if (ret) { 6796 btrfs_bio_counter_dec(fs_info); 6797 return errno_to_blk_status(ret); 6798 } 6799 6800 total_devs = bioc->num_stripes; 6801 bioc->orig_bio = first_bio; 6802 bioc->private = first_bio->bi_private; 6803 bioc->end_io = first_bio->bi_end_io; 6804 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6805 6806 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6807 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6808 /* In this case, map_length has been set to the length of 6809 a single stripe; not the whole write */ 6810 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6811 ret = raid56_parity_write(bio, bioc, map_length); 6812 } else { 6813 ret = raid56_parity_recover(bio, bioc, map_length, 6814 mirror_num, 1); 6815 } 6816 6817 btrfs_bio_counter_dec(fs_info); 6818 return errno_to_blk_status(ret); 6819 } 6820 6821 if (map_length < length) { 6822 btrfs_crit(fs_info, 6823 "mapping failed logical %llu bio len %llu len %llu", 6824 logical, length, map_length); 6825 BUG(); 6826 } 6827 6828 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6829 dev = bioc->stripes[dev_nr].dev; 6830 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6831 &dev->dev_state) || 6832 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6833 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6834 bioc_error(bioc, first_bio, logical); 6835 continue; 6836 } 6837 6838 if (dev_nr < total_devs - 1) 6839 bio = btrfs_bio_clone(first_bio); 6840 else 6841 bio = first_bio; 6842 6843 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6844 } 6845 btrfs_bio_counter_dec(fs_info); 6846 return BLK_STS_OK; 6847 } 6848 6849 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6850 const struct btrfs_fs_devices *fs_devices) 6851 { 6852 if (args->fsid == NULL) 6853 return true; 6854 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6855 return true; 6856 return false; 6857 } 6858 6859 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6860 const struct btrfs_device *device) 6861 { 6862 ASSERT((args->devid != (u64)-1) || args->missing); 6863 6864 if ((args->devid != (u64)-1) && device->devid != args->devid) 6865 return false; 6866 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6867 return false; 6868 if (!args->missing) 6869 return true; 6870 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6871 !device->bdev) 6872 return true; 6873 return false; 6874 } 6875 6876 /* 6877 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6878 * return NULL. 6879 * 6880 * If devid and uuid are both specified, the match must be exact, otherwise 6881 * only devid is used. 6882 */ 6883 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6884 const struct btrfs_dev_lookup_args *args) 6885 { 6886 struct btrfs_device *device; 6887 struct btrfs_fs_devices *seed_devs; 6888 6889 if (dev_args_match_fs_devices(args, fs_devices)) { 6890 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6891 if (dev_args_match_device(args, device)) 6892 return device; 6893 } 6894 } 6895 6896 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6897 if (!dev_args_match_fs_devices(args, seed_devs)) 6898 continue; 6899 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6900 if (dev_args_match_device(args, device)) 6901 return device; 6902 } 6903 } 6904 6905 return NULL; 6906 } 6907 6908 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6909 u64 devid, u8 *dev_uuid) 6910 { 6911 struct btrfs_device *device; 6912 unsigned int nofs_flag; 6913 6914 /* 6915 * We call this under the chunk_mutex, so we want to use NOFS for this 6916 * allocation, however we don't want to change btrfs_alloc_device() to 6917 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6918 * places. 6919 */ 6920 nofs_flag = memalloc_nofs_save(); 6921 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6922 memalloc_nofs_restore(nofs_flag); 6923 if (IS_ERR(device)) 6924 return device; 6925 6926 list_add(&device->dev_list, &fs_devices->devices); 6927 device->fs_devices = fs_devices; 6928 fs_devices->num_devices++; 6929 6930 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6931 fs_devices->missing_devices++; 6932 6933 return device; 6934 } 6935 6936 /** 6937 * btrfs_alloc_device - allocate struct btrfs_device 6938 * @fs_info: used only for generating a new devid, can be NULL if 6939 * devid is provided (i.e. @devid != NULL). 6940 * @devid: a pointer to devid for this device. If NULL a new devid 6941 * is generated. 6942 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6943 * is generated. 6944 * 6945 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6946 * on error. Returned struct is not linked onto any lists and must be 6947 * destroyed with btrfs_free_device. 6948 */ 6949 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6950 const u64 *devid, 6951 const u8 *uuid) 6952 { 6953 struct btrfs_device *dev; 6954 u64 tmp; 6955 6956 if (WARN_ON(!devid && !fs_info)) 6957 return ERR_PTR(-EINVAL); 6958 6959 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6960 if (!dev) 6961 return ERR_PTR(-ENOMEM); 6962 6963 /* 6964 * Preallocate a bio that's always going to be used for flushing device 6965 * barriers and matches the device lifespan 6966 */ 6967 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6968 if (!dev->flush_bio) { 6969 kfree(dev); 6970 return ERR_PTR(-ENOMEM); 6971 } 6972 6973 INIT_LIST_HEAD(&dev->dev_list); 6974 INIT_LIST_HEAD(&dev->dev_alloc_list); 6975 INIT_LIST_HEAD(&dev->post_commit_list); 6976 6977 atomic_set(&dev->reada_in_flight, 0); 6978 atomic_set(&dev->dev_stats_ccnt, 0); 6979 btrfs_device_data_ordered_init(dev); 6980 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6981 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6982 extent_io_tree_init(fs_info, &dev->alloc_state, 6983 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6984 6985 if (devid) 6986 tmp = *devid; 6987 else { 6988 int ret; 6989 6990 ret = find_next_devid(fs_info, &tmp); 6991 if (ret) { 6992 btrfs_free_device(dev); 6993 return ERR_PTR(ret); 6994 } 6995 } 6996 dev->devid = tmp; 6997 6998 if (uuid) 6999 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 7000 else 7001 generate_random_uuid(dev->uuid); 7002 7003 return dev; 7004 } 7005 7006 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 7007 u64 devid, u8 *uuid, bool error) 7008 { 7009 if (error) 7010 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 7011 devid, uuid); 7012 else 7013 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 7014 devid, uuid); 7015 } 7016 7017 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 7018 { 7019 const int data_stripes = calc_data_stripes(type, num_stripes); 7020 7021 return div_u64(chunk_len, data_stripes); 7022 } 7023 7024 #if BITS_PER_LONG == 32 7025 /* 7026 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7027 * can't be accessed on 32bit systems. 7028 * 7029 * This function do mount time check to reject the fs if it already has 7030 * metadata chunk beyond that limit. 7031 */ 7032 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7033 u64 logical, u64 length, u64 type) 7034 { 7035 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7036 return 0; 7037 7038 if (logical + length < MAX_LFS_FILESIZE) 7039 return 0; 7040 7041 btrfs_err_32bit_limit(fs_info); 7042 return -EOVERFLOW; 7043 } 7044 7045 /* 7046 * This is to give early warning for any metadata chunk reaching 7047 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7048 * Although we can still access the metadata, it's not going to be possible 7049 * once the limit is reached. 7050 */ 7051 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7052 u64 logical, u64 length, u64 type) 7053 { 7054 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7055 return; 7056 7057 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7058 return; 7059 7060 btrfs_warn_32bit_limit(fs_info); 7061 } 7062 #endif 7063 7064 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7065 struct btrfs_chunk *chunk) 7066 { 7067 BTRFS_DEV_LOOKUP_ARGS(args); 7068 struct btrfs_fs_info *fs_info = leaf->fs_info; 7069 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7070 struct map_lookup *map; 7071 struct extent_map *em; 7072 u64 logical; 7073 u64 length; 7074 u64 devid; 7075 u64 type; 7076 u8 uuid[BTRFS_UUID_SIZE]; 7077 int num_stripes; 7078 int ret; 7079 int i; 7080 7081 logical = key->offset; 7082 length = btrfs_chunk_length(leaf, chunk); 7083 type = btrfs_chunk_type(leaf, chunk); 7084 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7085 7086 #if BITS_PER_LONG == 32 7087 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7088 if (ret < 0) 7089 return ret; 7090 warn_32bit_meta_chunk(fs_info, logical, length, type); 7091 #endif 7092 7093 /* 7094 * Only need to verify chunk item if we're reading from sys chunk array, 7095 * as chunk item in tree block is already verified by tree-checker. 7096 */ 7097 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7098 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7099 if (ret) 7100 return ret; 7101 } 7102 7103 read_lock(&map_tree->lock); 7104 em = lookup_extent_mapping(map_tree, logical, 1); 7105 read_unlock(&map_tree->lock); 7106 7107 /* already mapped? */ 7108 if (em && em->start <= logical && em->start + em->len > logical) { 7109 free_extent_map(em); 7110 return 0; 7111 } else if (em) { 7112 free_extent_map(em); 7113 } 7114 7115 em = alloc_extent_map(); 7116 if (!em) 7117 return -ENOMEM; 7118 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7119 if (!map) { 7120 free_extent_map(em); 7121 return -ENOMEM; 7122 } 7123 7124 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7125 em->map_lookup = map; 7126 em->start = logical; 7127 em->len = length; 7128 em->orig_start = 0; 7129 em->block_start = 0; 7130 em->block_len = em->len; 7131 7132 map->num_stripes = num_stripes; 7133 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7134 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7135 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7136 map->type = type; 7137 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7138 map->verified_stripes = 0; 7139 em->orig_block_len = calc_stripe_length(type, em->len, 7140 map->num_stripes); 7141 for (i = 0; i < num_stripes; i++) { 7142 map->stripes[i].physical = 7143 btrfs_stripe_offset_nr(leaf, chunk, i); 7144 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7145 args.devid = devid; 7146 read_extent_buffer(leaf, uuid, (unsigned long) 7147 btrfs_stripe_dev_uuid_nr(chunk, i), 7148 BTRFS_UUID_SIZE); 7149 args.uuid = uuid; 7150 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7151 if (!map->stripes[i].dev && 7152 !btrfs_test_opt(fs_info, DEGRADED)) { 7153 free_extent_map(em); 7154 btrfs_report_missing_device(fs_info, devid, uuid, true); 7155 return -ENOENT; 7156 } 7157 if (!map->stripes[i].dev) { 7158 map->stripes[i].dev = 7159 add_missing_dev(fs_info->fs_devices, devid, 7160 uuid); 7161 if (IS_ERR(map->stripes[i].dev)) { 7162 free_extent_map(em); 7163 btrfs_err(fs_info, 7164 "failed to init missing dev %llu: %ld", 7165 devid, PTR_ERR(map->stripes[i].dev)); 7166 return PTR_ERR(map->stripes[i].dev); 7167 } 7168 btrfs_report_missing_device(fs_info, devid, uuid, false); 7169 } 7170 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7171 &(map->stripes[i].dev->dev_state)); 7172 7173 } 7174 7175 write_lock(&map_tree->lock); 7176 ret = add_extent_mapping(map_tree, em, 0); 7177 write_unlock(&map_tree->lock); 7178 if (ret < 0) { 7179 btrfs_err(fs_info, 7180 "failed to add chunk map, start=%llu len=%llu: %d", 7181 em->start, em->len, ret); 7182 } 7183 free_extent_map(em); 7184 7185 return ret; 7186 } 7187 7188 static void fill_device_from_item(struct extent_buffer *leaf, 7189 struct btrfs_dev_item *dev_item, 7190 struct btrfs_device *device) 7191 { 7192 unsigned long ptr; 7193 7194 device->devid = btrfs_device_id(leaf, dev_item); 7195 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7196 device->total_bytes = device->disk_total_bytes; 7197 device->commit_total_bytes = device->disk_total_bytes; 7198 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7199 device->commit_bytes_used = device->bytes_used; 7200 device->type = btrfs_device_type(leaf, dev_item); 7201 device->io_align = btrfs_device_io_align(leaf, dev_item); 7202 device->io_width = btrfs_device_io_width(leaf, dev_item); 7203 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7204 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7205 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7206 7207 ptr = btrfs_device_uuid(dev_item); 7208 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7209 } 7210 7211 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7212 u8 *fsid) 7213 { 7214 struct btrfs_fs_devices *fs_devices; 7215 int ret; 7216 7217 lockdep_assert_held(&uuid_mutex); 7218 ASSERT(fsid); 7219 7220 /* This will match only for multi-device seed fs */ 7221 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7222 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7223 return fs_devices; 7224 7225 7226 fs_devices = find_fsid(fsid, NULL); 7227 if (!fs_devices) { 7228 if (!btrfs_test_opt(fs_info, DEGRADED)) 7229 return ERR_PTR(-ENOENT); 7230 7231 fs_devices = alloc_fs_devices(fsid, NULL); 7232 if (IS_ERR(fs_devices)) 7233 return fs_devices; 7234 7235 fs_devices->seeding = true; 7236 fs_devices->opened = 1; 7237 return fs_devices; 7238 } 7239 7240 /* 7241 * Upon first call for a seed fs fsid, just create a private copy of the 7242 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7243 */ 7244 fs_devices = clone_fs_devices(fs_devices); 7245 if (IS_ERR(fs_devices)) 7246 return fs_devices; 7247 7248 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7249 if (ret) { 7250 free_fs_devices(fs_devices); 7251 return ERR_PTR(ret); 7252 } 7253 7254 if (!fs_devices->seeding) { 7255 close_fs_devices(fs_devices); 7256 free_fs_devices(fs_devices); 7257 return ERR_PTR(-EINVAL); 7258 } 7259 7260 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7261 7262 return fs_devices; 7263 } 7264 7265 static int read_one_dev(struct extent_buffer *leaf, 7266 struct btrfs_dev_item *dev_item) 7267 { 7268 BTRFS_DEV_LOOKUP_ARGS(args); 7269 struct btrfs_fs_info *fs_info = leaf->fs_info; 7270 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7271 struct btrfs_device *device; 7272 u64 devid; 7273 int ret; 7274 u8 fs_uuid[BTRFS_FSID_SIZE]; 7275 u8 dev_uuid[BTRFS_UUID_SIZE]; 7276 7277 devid = args.devid = btrfs_device_id(leaf, dev_item); 7278 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7279 BTRFS_UUID_SIZE); 7280 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7281 BTRFS_FSID_SIZE); 7282 args.uuid = dev_uuid; 7283 args.fsid = fs_uuid; 7284 7285 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7286 fs_devices = open_seed_devices(fs_info, fs_uuid); 7287 if (IS_ERR(fs_devices)) 7288 return PTR_ERR(fs_devices); 7289 } 7290 7291 device = btrfs_find_device(fs_info->fs_devices, &args); 7292 if (!device) { 7293 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7294 btrfs_report_missing_device(fs_info, devid, 7295 dev_uuid, true); 7296 return -ENOENT; 7297 } 7298 7299 device = add_missing_dev(fs_devices, devid, dev_uuid); 7300 if (IS_ERR(device)) { 7301 btrfs_err(fs_info, 7302 "failed to add missing dev %llu: %ld", 7303 devid, PTR_ERR(device)); 7304 return PTR_ERR(device); 7305 } 7306 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7307 } else { 7308 if (!device->bdev) { 7309 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7310 btrfs_report_missing_device(fs_info, 7311 devid, dev_uuid, true); 7312 return -ENOENT; 7313 } 7314 btrfs_report_missing_device(fs_info, devid, 7315 dev_uuid, false); 7316 } 7317 7318 if (!device->bdev && 7319 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7320 /* 7321 * this happens when a device that was properly setup 7322 * in the device info lists suddenly goes bad. 7323 * device->bdev is NULL, and so we have to set 7324 * device->missing to one here 7325 */ 7326 device->fs_devices->missing_devices++; 7327 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7328 } 7329 7330 /* Move the device to its own fs_devices */ 7331 if (device->fs_devices != fs_devices) { 7332 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7333 &device->dev_state)); 7334 7335 list_move(&device->dev_list, &fs_devices->devices); 7336 device->fs_devices->num_devices--; 7337 fs_devices->num_devices++; 7338 7339 device->fs_devices->missing_devices--; 7340 fs_devices->missing_devices++; 7341 7342 device->fs_devices = fs_devices; 7343 } 7344 } 7345 7346 if (device->fs_devices != fs_info->fs_devices) { 7347 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7348 if (device->generation != 7349 btrfs_device_generation(leaf, dev_item)) 7350 return -EINVAL; 7351 } 7352 7353 fill_device_from_item(leaf, dev_item, device); 7354 if (device->bdev) { 7355 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7356 7357 if (device->total_bytes > max_total_bytes) { 7358 btrfs_err(fs_info, 7359 "device total_bytes should be at most %llu but found %llu", 7360 max_total_bytes, device->total_bytes); 7361 return -EINVAL; 7362 } 7363 } 7364 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7365 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7366 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7367 device->fs_devices->total_rw_bytes += device->total_bytes; 7368 atomic64_add(device->total_bytes - device->bytes_used, 7369 &fs_info->free_chunk_space); 7370 } 7371 ret = 0; 7372 return ret; 7373 } 7374 7375 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7376 { 7377 struct btrfs_root *root = fs_info->tree_root; 7378 struct btrfs_super_block *super_copy = fs_info->super_copy; 7379 struct extent_buffer *sb; 7380 struct btrfs_disk_key *disk_key; 7381 struct btrfs_chunk *chunk; 7382 u8 *array_ptr; 7383 unsigned long sb_array_offset; 7384 int ret = 0; 7385 u32 num_stripes; 7386 u32 array_size; 7387 u32 len = 0; 7388 u32 cur_offset; 7389 u64 type; 7390 struct btrfs_key key; 7391 7392 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7393 /* 7394 * This will create extent buffer of nodesize, superblock size is 7395 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7396 * overallocate but we can keep it as-is, only the first page is used. 7397 */ 7398 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7399 root->root_key.objectid, 0); 7400 if (IS_ERR(sb)) 7401 return PTR_ERR(sb); 7402 set_extent_buffer_uptodate(sb); 7403 /* 7404 * The sb extent buffer is artificial and just used to read the system array. 7405 * set_extent_buffer_uptodate() call does not properly mark all it's 7406 * pages up-to-date when the page is larger: extent does not cover the 7407 * whole page and consequently check_page_uptodate does not find all 7408 * the page's extents up-to-date (the hole beyond sb), 7409 * write_extent_buffer then triggers a WARN_ON. 7410 * 7411 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7412 * but sb spans only this function. Add an explicit SetPageUptodate call 7413 * to silence the warning eg. on PowerPC 64. 7414 */ 7415 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7416 SetPageUptodate(sb->pages[0]); 7417 7418 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7419 array_size = btrfs_super_sys_array_size(super_copy); 7420 7421 array_ptr = super_copy->sys_chunk_array; 7422 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7423 cur_offset = 0; 7424 7425 while (cur_offset < array_size) { 7426 disk_key = (struct btrfs_disk_key *)array_ptr; 7427 len = sizeof(*disk_key); 7428 if (cur_offset + len > array_size) 7429 goto out_short_read; 7430 7431 btrfs_disk_key_to_cpu(&key, disk_key); 7432 7433 array_ptr += len; 7434 sb_array_offset += len; 7435 cur_offset += len; 7436 7437 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7438 btrfs_err(fs_info, 7439 "unexpected item type %u in sys_array at offset %u", 7440 (u32)key.type, cur_offset); 7441 ret = -EIO; 7442 break; 7443 } 7444 7445 chunk = (struct btrfs_chunk *)sb_array_offset; 7446 /* 7447 * At least one btrfs_chunk with one stripe must be present, 7448 * exact stripe count check comes afterwards 7449 */ 7450 len = btrfs_chunk_item_size(1); 7451 if (cur_offset + len > array_size) 7452 goto out_short_read; 7453 7454 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7455 if (!num_stripes) { 7456 btrfs_err(fs_info, 7457 "invalid number of stripes %u in sys_array at offset %u", 7458 num_stripes, cur_offset); 7459 ret = -EIO; 7460 break; 7461 } 7462 7463 type = btrfs_chunk_type(sb, chunk); 7464 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7465 btrfs_err(fs_info, 7466 "invalid chunk type %llu in sys_array at offset %u", 7467 type, cur_offset); 7468 ret = -EIO; 7469 break; 7470 } 7471 7472 len = btrfs_chunk_item_size(num_stripes); 7473 if (cur_offset + len > array_size) 7474 goto out_short_read; 7475 7476 ret = read_one_chunk(&key, sb, chunk); 7477 if (ret) 7478 break; 7479 7480 array_ptr += len; 7481 sb_array_offset += len; 7482 cur_offset += len; 7483 } 7484 clear_extent_buffer_uptodate(sb); 7485 free_extent_buffer_stale(sb); 7486 return ret; 7487 7488 out_short_read: 7489 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7490 len, cur_offset); 7491 clear_extent_buffer_uptodate(sb); 7492 free_extent_buffer_stale(sb); 7493 return -EIO; 7494 } 7495 7496 /* 7497 * Check if all chunks in the fs are OK for read-write degraded mount 7498 * 7499 * If the @failing_dev is specified, it's accounted as missing. 7500 * 7501 * Return true if all chunks meet the minimal RW mount requirements. 7502 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7503 */ 7504 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7505 struct btrfs_device *failing_dev) 7506 { 7507 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7508 struct extent_map *em; 7509 u64 next_start = 0; 7510 bool ret = true; 7511 7512 read_lock(&map_tree->lock); 7513 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7514 read_unlock(&map_tree->lock); 7515 /* No chunk at all? Return false anyway */ 7516 if (!em) { 7517 ret = false; 7518 goto out; 7519 } 7520 while (em) { 7521 struct map_lookup *map; 7522 int missing = 0; 7523 int max_tolerated; 7524 int i; 7525 7526 map = em->map_lookup; 7527 max_tolerated = 7528 btrfs_get_num_tolerated_disk_barrier_failures( 7529 map->type); 7530 for (i = 0; i < map->num_stripes; i++) { 7531 struct btrfs_device *dev = map->stripes[i].dev; 7532 7533 if (!dev || !dev->bdev || 7534 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7535 dev->last_flush_error) 7536 missing++; 7537 else if (failing_dev && failing_dev == dev) 7538 missing++; 7539 } 7540 if (missing > max_tolerated) { 7541 if (!failing_dev) 7542 btrfs_warn(fs_info, 7543 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7544 em->start, missing, max_tolerated); 7545 free_extent_map(em); 7546 ret = false; 7547 goto out; 7548 } 7549 next_start = extent_map_end(em); 7550 free_extent_map(em); 7551 7552 read_lock(&map_tree->lock); 7553 em = lookup_extent_mapping(map_tree, next_start, 7554 (u64)(-1) - next_start); 7555 read_unlock(&map_tree->lock); 7556 } 7557 out: 7558 return ret; 7559 } 7560 7561 static void readahead_tree_node_children(struct extent_buffer *node) 7562 { 7563 int i; 7564 const int nr_items = btrfs_header_nritems(node); 7565 7566 for (i = 0; i < nr_items; i++) 7567 btrfs_readahead_node_child(node, i); 7568 } 7569 7570 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7571 { 7572 struct btrfs_root *root = fs_info->chunk_root; 7573 struct btrfs_path *path; 7574 struct extent_buffer *leaf; 7575 struct btrfs_key key; 7576 struct btrfs_key found_key; 7577 int ret; 7578 int slot; 7579 u64 total_dev = 0; 7580 u64 last_ra_node = 0; 7581 7582 path = btrfs_alloc_path(); 7583 if (!path) 7584 return -ENOMEM; 7585 7586 /* 7587 * uuid_mutex is needed only if we are mounting a sprout FS 7588 * otherwise we don't need it. 7589 */ 7590 mutex_lock(&uuid_mutex); 7591 7592 /* 7593 * It is possible for mount and umount to race in such a way that 7594 * we execute this code path, but open_fs_devices failed to clear 7595 * total_rw_bytes. We certainly want it cleared before reading the 7596 * device items, so clear it here. 7597 */ 7598 fs_info->fs_devices->total_rw_bytes = 0; 7599 7600 /* 7601 * Lockdep complains about possible circular locking dependency between 7602 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7603 * used for freeze procection of a fs (struct super_block.s_writers), 7604 * which we take when starting a transaction, and extent buffers of the 7605 * chunk tree if we call read_one_dev() while holding a lock on an 7606 * extent buffer of the chunk tree. Since we are mounting the filesystem 7607 * and at this point there can't be any concurrent task modifying the 7608 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7609 */ 7610 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7611 path->skip_locking = 1; 7612 7613 /* 7614 * Read all device items, and then all the chunk items. All 7615 * device items are found before any chunk item (their object id 7616 * is smaller than the lowest possible object id for a chunk 7617 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7618 */ 7619 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7620 key.offset = 0; 7621 key.type = 0; 7622 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7623 if (ret < 0) 7624 goto error; 7625 while (1) { 7626 struct extent_buffer *node; 7627 7628 leaf = path->nodes[0]; 7629 slot = path->slots[0]; 7630 if (slot >= btrfs_header_nritems(leaf)) { 7631 ret = btrfs_next_leaf(root, path); 7632 if (ret == 0) 7633 continue; 7634 if (ret < 0) 7635 goto error; 7636 break; 7637 } 7638 node = path->nodes[1]; 7639 if (node) { 7640 if (last_ra_node != node->start) { 7641 readahead_tree_node_children(node); 7642 last_ra_node = node->start; 7643 } 7644 } 7645 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7646 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7647 struct btrfs_dev_item *dev_item; 7648 dev_item = btrfs_item_ptr(leaf, slot, 7649 struct btrfs_dev_item); 7650 ret = read_one_dev(leaf, dev_item); 7651 if (ret) 7652 goto error; 7653 total_dev++; 7654 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7655 struct btrfs_chunk *chunk; 7656 7657 /* 7658 * We are only called at mount time, so no need to take 7659 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7660 * we always lock first fs_info->chunk_mutex before 7661 * acquiring any locks on the chunk tree. This is a 7662 * requirement for chunk allocation, see the comment on 7663 * top of btrfs_chunk_alloc() for details. 7664 */ 7665 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7666 ret = read_one_chunk(&found_key, leaf, chunk); 7667 if (ret) 7668 goto error; 7669 } 7670 path->slots[0]++; 7671 } 7672 7673 /* 7674 * After loading chunk tree, we've got all device information, 7675 * do another round of validation checks. 7676 */ 7677 if (total_dev != fs_info->fs_devices->total_devices) { 7678 btrfs_err(fs_info, 7679 "super_num_devices %llu mismatch with num_devices %llu found here", 7680 btrfs_super_num_devices(fs_info->super_copy), 7681 total_dev); 7682 ret = -EINVAL; 7683 goto error; 7684 } 7685 if (btrfs_super_total_bytes(fs_info->super_copy) < 7686 fs_info->fs_devices->total_rw_bytes) { 7687 btrfs_err(fs_info, 7688 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7689 btrfs_super_total_bytes(fs_info->super_copy), 7690 fs_info->fs_devices->total_rw_bytes); 7691 ret = -EINVAL; 7692 goto error; 7693 } 7694 ret = 0; 7695 error: 7696 mutex_unlock(&uuid_mutex); 7697 7698 btrfs_free_path(path); 7699 return ret; 7700 } 7701 7702 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7703 { 7704 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7705 struct btrfs_device *device; 7706 7707 fs_devices->fs_info = fs_info; 7708 7709 mutex_lock(&fs_devices->device_list_mutex); 7710 list_for_each_entry(device, &fs_devices->devices, dev_list) 7711 device->fs_info = fs_info; 7712 7713 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7714 list_for_each_entry(device, &seed_devs->devices, dev_list) 7715 device->fs_info = fs_info; 7716 7717 seed_devs->fs_info = fs_info; 7718 } 7719 mutex_unlock(&fs_devices->device_list_mutex); 7720 } 7721 7722 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7723 const struct btrfs_dev_stats_item *ptr, 7724 int index) 7725 { 7726 u64 val; 7727 7728 read_extent_buffer(eb, &val, 7729 offsetof(struct btrfs_dev_stats_item, values) + 7730 ((unsigned long)ptr) + (index * sizeof(u64)), 7731 sizeof(val)); 7732 return val; 7733 } 7734 7735 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7736 struct btrfs_dev_stats_item *ptr, 7737 int index, u64 val) 7738 { 7739 write_extent_buffer(eb, &val, 7740 offsetof(struct btrfs_dev_stats_item, values) + 7741 ((unsigned long)ptr) + (index * sizeof(u64)), 7742 sizeof(val)); 7743 } 7744 7745 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7746 struct btrfs_path *path) 7747 { 7748 struct btrfs_dev_stats_item *ptr; 7749 struct extent_buffer *eb; 7750 struct btrfs_key key; 7751 int item_size; 7752 int i, ret, slot; 7753 7754 if (!device->fs_info->dev_root) 7755 return 0; 7756 7757 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7758 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7759 key.offset = device->devid; 7760 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7761 if (ret) { 7762 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7763 btrfs_dev_stat_set(device, i, 0); 7764 device->dev_stats_valid = 1; 7765 btrfs_release_path(path); 7766 return ret < 0 ? ret : 0; 7767 } 7768 slot = path->slots[0]; 7769 eb = path->nodes[0]; 7770 item_size = btrfs_item_size(eb, slot); 7771 7772 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7773 7774 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7775 if (item_size >= (1 + i) * sizeof(__le64)) 7776 btrfs_dev_stat_set(device, i, 7777 btrfs_dev_stats_value(eb, ptr, i)); 7778 else 7779 btrfs_dev_stat_set(device, i, 0); 7780 } 7781 7782 device->dev_stats_valid = 1; 7783 btrfs_dev_stat_print_on_load(device); 7784 btrfs_release_path(path); 7785 7786 return 0; 7787 } 7788 7789 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7790 { 7791 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7792 struct btrfs_device *device; 7793 struct btrfs_path *path = NULL; 7794 int ret = 0; 7795 7796 path = btrfs_alloc_path(); 7797 if (!path) 7798 return -ENOMEM; 7799 7800 mutex_lock(&fs_devices->device_list_mutex); 7801 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7802 ret = btrfs_device_init_dev_stats(device, path); 7803 if (ret) 7804 goto out; 7805 } 7806 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7807 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7808 ret = btrfs_device_init_dev_stats(device, path); 7809 if (ret) 7810 goto out; 7811 } 7812 } 7813 out: 7814 mutex_unlock(&fs_devices->device_list_mutex); 7815 7816 btrfs_free_path(path); 7817 return ret; 7818 } 7819 7820 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7821 struct btrfs_device *device) 7822 { 7823 struct btrfs_fs_info *fs_info = trans->fs_info; 7824 struct btrfs_root *dev_root = fs_info->dev_root; 7825 struct btrfs_path *path; 7826 struct btrfs_key key; 7827 struct extent_buffer *eb; 7828 struct btrfs_dev_stats_item *ptr; 7829 int ret; 7830 int i; 7831 7832 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7833 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7834 key.offset = device->devid; 7835 7836 path = btrfs_alloc_path(); 7837 if (!path) 7838 return -ENOMEM; 7839 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7840 if (ret < 0) { 7841 btrfs_warn_in_rcu(fs_info, 7842 "error %d while searching for dev_stats item for device %s", 7843 ret, rcu_str_deref(device->name)); 7844 goto out; 7845 } 7846 7847 if (ret == 0 && 7848 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7849 /* need to delete old one and insert a new one */ 7850 ret = btrfs_del_item(trans, dev_root, path); 7851 if (ret != 0) { 7852 btrfs_warn_in_rcu(fs_info, 7853 "delete too small dev_stats item for device %s failed %d", 7854 rcu_str_deref(device->name), ret); 7855 goto out; 7856 } 7857 ret = 1; 7858 } 7859 7860 if (ret == 1) { 7861 /* need to insert a new item */ 7862 btrfs_release_path(path); 7863 ret = btrfs_insert_empty_item(trans, dev_root, path, 7864 &key, sizeof(*ptr)); 7865 if (ret < 0) { 7866 btrfs_warn_in_rcu(fs_info, 7867 "insert dev_stats item for device %s failed %d", 7868 rcu_str_deref(device->name), ret); 7869 goto out; 7870 } 7871 } 7872 7873 eb = path->nodes[0]; 7874 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7875 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7876 btrfs_set_dev_stats_value(eb, ptr, i, 7877 btrfs_dev_stat_read(device, i)); 7878 btrfs_mark_buffer_dirty(eb); 7879 7880 out: 7881 btrfs_free_path(path); 7882 return ret; 7883 } 7884 7885 /* 7886 * called from commit_transaction. Writes all changed device stats to disk. 7887 */ 7888 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7889 { 7890 struct btrfs_fs_info *fs_info = trans->fs_info; 7891 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7892 struct btrfs_device *device; 7893 int stats_cnt; 7894 int ret = 0; 7895 7896 mutex_lock(&fs_devices->device_list_mutex); 7897 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7898 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7899 if (!device->dev_stats_valid || stats_cnt == 0) 7900 continue; 7901 7902 7903 /* 7904 * There is a LOAD-LOAD control dependency between the value of 7905 * dev_stats_ccnt and updating the on-disk values which requires 7906 * reading the in-memory counters. Such control dependencies 7907 * require explicit read memory barriers. 7908 * 7909 * This memory barriers pairs with smp_mb__before_atomic in 7910 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7911 * barrier implied by atomic_xchg in 7912 * btrfs_dev_stats_read_and_reset 7913 */ 7914 smp_rmb(); 7915 7916 ret = update_dev_stat_item(trans, device); 7917 if (!ret) 7918 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7919 } 7920 mutex_unlock(&fs_devices->device_list_mutex); 7921 7922 return ret; 7923 } 7924 7925 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7926 { 7927 btrfs_dev_stat_inc(dev, index); 7928 btrfs_dev_stat_print_on_error(dev); 7929 } 7930 7931 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7932 { 7933 if (!dev->dev_stats_valid) 7934 return; 7935 btrfs_err_rl_in_rcu(dev->fs_info, 7936 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7937 rcu_str_deref(dev->name), 7938 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7939 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7940 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7941 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7942 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7943 } 7944 7945 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7946 { 7947 int i; 7948 7949 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7950 if (btrfs_dev_stat_read(dev, i) != 0) 7951 break; 7952 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7953 return; /* all values == 0, suppress message */ 7954 7955 btrfs_info_in_rcu(dev->fs_info, 7956 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7957 rcu_str_deref(dev->name), 7958 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7959 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7960 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7961 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7962 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7963 } 7964 7965 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7966 struct btrfs_ioctl_get_dev_stats *stats) 7967 { 7968 BTRFS_DEV_LOOKUP_ARGS(args); 7969 struct btrfs_device *dev; 7970 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7971 int i; 7972 7973 mutex_lock(&fs_devices->device_list_mutex); 7974 args.devid = stats->devid; 7975 dev = btrfs_find_device(fs_info->fs_devices, &args); 7976 mutex_unlock(&fs_devices->device_list_mutex); 7977 7978 if (!dev) { 7979 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7980 return -ENODEV; 7981 } else if (!dev->dev_stats_valid) { 7982 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7983 return -ENODEV; 7984 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7985 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7986 if (stats->nr_items > i) 7987 stats->values[i] = 7988 btrfs_dev_stat_read_and_reset(dev, i); 7989 else 7990 btrfs_dev_stat_set(dev, i, 0); 7991 } 7992 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7993 current->comm, task_pid_nr(current)); 7994 } else { 7995 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7996 if (stats->nr_items > i) 7997 stats->values[i] = btrfs_dev_stat_read(dev, i); 7998 } 7999 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 8000 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 8001 return 0; 8002 } 8003 8004 /* 8005 * Update the size and bytes used for each device where it changed. This is 8006 * delayed since we would otherwise get errors while writing out the 8007 * superblocks. 8008 * 8009 * Must be invoked during transaction commit. 8010 */ 8011 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 8012 { 8013 struct btrfs_device *curr, *next; 8014 8015 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 8016 8017 if (list_empty(&trans->dev_update_list)) 8018 return; 8019 8020 /* 8021 * We don't need the device_list_mutex here. This list is owned by the 8022 * transaction and the transaction must complete before the device is 8023 * released. 8024 */ 8025 mutex_lock(&trans->fs_info->chunk_mutex); 8026 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 8027 post_commit_list) { 8028 list_del_init(&curr->post_commit_list); 8029 curr->commit_total_bytes = curr->disk_total_bytes; 8030 curr->commit_bytes_used = curr->bytes_used; 8031 } 8032 mutex_unlock(&trans->fs_info->chunk_mutex); 8033 } 8034 8035 /* 8036 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 8037 */ 8038 int btrfs_bg_type_to_factor(u64 flags) 8039 { 8040 const int index = btrfs_bg_flags_to_raid_index(flags); 8041 8042 return btrfs_raid_array[index].ncopies; 8043 } 8044 8045 8046 8047 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 8048 u64 chunk_offset, u64 devid, 8049 u64 physical_offset, u64 physical_len) 8050 { 8051 struct btrfs_dev_lookup_args args = { .devid = devid }; 8052 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8053 struct extent_map *em; 8054 struct map_lookup *map; 8055 struct btrfs_device *dev; 8056 u64 stripe_len; 8057 bool found = false; 8058 int ret = 0; 8059 int i; 8060 8061 read_lock(&em_tree->lock); 8062 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8063 read_unlock(&em_tree->lock); 8064 8065 if (!em) { 8066 btrfs_err(fs_info, 8067 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8068 physical_offset, devid); 8069 ret = -EUCLEAN; 8070 goto out; 8071 } 8072 8073 map = em->map_lookup; 8074 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8075 if (physical_len != stripe_len) { 8076 btrfs_err(fs_info, 8077 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8078 physical_offset, devid, em->start, physical_len, 8079 stripe_len); 8080 ret = -EUCLEAN; 8081 goto out; 8082 } 8083 8084 for (i = 0; i < map->num_stripes; i++) { 8085 if (map->stripes[i].dev->devid == devid && 8086 map->stripes[i].physical == physical_offset) { 8087 found = true; 8088 if (map->verified_stripes >= map->num_stripes) { 8089 btrfs_err(fs_info, 8090 "too many dev extents for chunk %llu found", 8091 em->start); 8092 ret = -EUCLEAN; 8093 goto out; 8094 } 8095 map->verified_stripes++; 8096 break; 8097 } 8098 } 8099 if (!found) { 8100 btrfs_err(fs_info, 8101 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8102 physical_offset, devid); 8103 ret = -EUCLEAN; 8104 } 8105 8106 /* Make sure no dev extent is beyond device boundary */ 8107 dev = btrfs_find_device(fs_info->fs_devices, &args); 8108 if (!dev) { 8109 btrfs_err(fs_info, "failed to find devid %llu", devid); 8110 ret = -EUCLEAN; 8111 goto out; 8112 } 8113 8114 if (physical_offset + physical_len > dev->disk_total_bytes) { 8115 btrfs_err(fs_info, 8116 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8117 devid, physical_offset, physical_len, 8118 dev->disk_total_bytes); 8119 ret = -EUCLEAN; 8120 goto out; 8121 } 8122 8123 if (dev->zone_info) { 8124 u64 zone_size = dev->zone_info->zone_size; 8125 8126 if (!IS_ALIGNED(physical_offset, zone_size) || 8127 !IS_ALIGNED(physical_len, zone_size)) { 8128 btrfs_err(fs_info, 8129 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8130 devid, physical_offset, physical_len); 8131 ret = -EUCLEAN; 8132 goto out; 8133 } 8134 } 8135 8136 out: 8137 free_extent_map(em); 8138 return ret; 8139 } 8140 8141 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8142 { 8143 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8144 struct extent_map *em; 8145 struct rb_node *node; 8146 int ret = 0; 8147 8148 read_lock(&em_tree->lock); 8149 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8150 em = rb_entry(node, struct extent_map, rb_node); 8151 if (em->map_lookup->num_stripes != 8152 em->map_lookup->verified_stripes) { 8153 btrfs_err(fs_info, 8154 "chunk %llu has missing dev extent, have %d expect %d", 8155 em->start, em->map_lookup->verified_stripes, 8156 em->map_lookup->num_stripes); 8157 ret = -EUCLEAN; 8158 goto out; 8159 } 8160 } 8161 out: 8162 read_unlock(&em_tree->lock); 8163 return ret; 8164 } 8165 8166 /* 8167 * Ensure that all dev extents are mapped to correct chunk, otherwise 8168 * later chunk allocation/free would cause unexpected behavior. 8169 * 8170 * NOTE: This will iterate through the whole device tree, which should be of 8171 * the same size level as the chunk tree. This slightly increases mount time. 8172 */ 8173 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8174 { 8175 struct btrfs_path *path; 8176 struct btrfs_root *root = fs_info->dev_root; 8177 struct btrfs_key key; 8178 u64 prev_devid = 0; 8179 u64 prev_dev_ext_end = 0; 8180 int ret = 0; 8181 8182 /* 8183 * We don't have a dev_root because we mounted with ignorebadroots and 8184 * failed to load the root, so we want to skip the verification in this 8185 * case for sure. 8186 * 8187 * However if the dev root is fine, but the tree itself is corrupted 8188 * we'd still fail to mount. This verification is only to make sure 8189 * writes can happen safely, so instead just bypass this check 8190 * completely in the case of IGNOREBADROOTS. 8191 */ 8192 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8193 return 0; 8194 8195 key.objectid = 1; 8196 key.type = BTRFS_DEV_EXTENT_KEY; 8197 key.offset = 0; 8198 8199 path = btrfs_alloc_path(); 8200 if (!path) 8201 return -ENOMEM; 8202 8203 path->reada = READA_FORWARD; 8204 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8205 if (ret < 0) 8206 goto out; 8207 8208 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8209 ret = btrfs_next_leaf(root, path); 8210 if (ret < 0) 8211 goto out; 8212 /* No dev extents at all? Not good */ 8213 if (ret > 0) { 8214 ret = -EUCLEAN; 8215 goto out; 8216 } 8217 } 8218 while (1) { 8219 struct extent_buffer *leaf = path->nodes[0]; 8220 struct btrfs_dev_extent *dext; 8221 int slot = path->slots[0]; 8222 u64 chunk_offset; 8223 u64 physical_offset; 8224 u64 physical_len; 8225 u64 devid; 8226 8227 btrfs_item_key_to_cpu(leaf, &key, slot); 8228 if (key.type != BTRFS_DEV_EXTENT_KEY) 8229 break; 8230 devid = key.objectid; 8231 physical_offset = key.offset; 8232 8233 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8234 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8235 physical_len = btrfs_dev_extent_length(leaf, dext); 8236 8237 /* Check if this dev extent overlaps with the previous one */ 8238 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8239 btrfs_err(fs_info, 8240 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8241 devid, physical_offset, prev_dev_ext_end); 8242 ret = -EUCLEAN; 8243 goto out; 8244 } 8245 8246 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8247 physical_offset, physical_len); 8248 if (ret < 0) 8249 goto out; 8250 prev_devid = devid; 8251 prev_dev_ext_end = physical_offset + physical_len; 8252 8253 ret = btrfs_next_item(root, path); 8254 if (ret < 0) 8255 goto out; 8256 if (ret > 0) { 8257 ret = 0; 8258 break; 8259 } 8260 } 8261 8262 /* Ensure all chunks have corresponding dev extents */ 8263 ret = verify_chunk_dev_extent_mapping(fs_info); 8264 out: 8265 btrfs_free_path(path); 8266 return ret; 8267 } 8268 8269 /* 8270 * Check whether the given block group or device is pinned by any inode being 8271 * used as a swapfile. 8272 */ 8273 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8274 { 8275 struct btrfs_swapfile_pin *sp; 8276 struct rb_node *node; 8277 8278 spin_lock(&fs_info->swapfile_pins_lock); 8279 node = fs_info->swapfile_pins.rb_node; 8280 while (node) { 8281 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8282 if (ptr < sp->ptr) 8283 node = node->rb_left; 8284 else if (ptr > sp->ptr) 8285 node = node->rb_right; 8286 else 8287 break; 8288 } 8289 spin_unlock(&fs_info->swapfile_pins_lock); 8290 return node != NULL; 8291 } 8292 8293 static int relocating_repair_kthread(void *data) 8294 { 8295 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8296 struct btrfs_fs_info *fs_info = cache->fs_info; 8297 u64 target; 8298 int ret = 0; 8299 8300 target = cache->start; 8301 btrfs_put_block_group(cache); 8302 8303 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8304 btrfs_info(fs_info, 8305 "zoned: skip relocating block group %llu to repair: EBUSY", 8306 target); 8307 return -EBUSY; 8308 } 8309 8310 mutex_lock(&fs_info->reclaim_bgs_lock); 8311 8312 /* Ensure block group still exists */ 8313 cache = btrfs_lookup_block_group(fs_info, target); 8314 if (!cache) 8315 goto out; 8316 8317 if (!cache->relocating_repair) 8318 goto out; 8319 8320 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8321 if (ret < 0) 8322 goto out; 8323 8324 btrfs_info(fs_info, 8325 "zoned: relocating block group %llu to repair IO failure", 8326 target); 8327 ret = btrfs_relocate_chunk(fs_info, target); 8328 8329 out: 8330 if (cache) 8331 btrfs_put_block_group(cache); 8332 mutex_unlock(&fs_info->reclaim_bgs_lock); 8333 btrfs_exclop_finish(fs_info); 8334 8335 return ret; 8336 } 8337 8338 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8339 { 8340 struct btrfs_block_group *cache; 8341 8342 /* Do not attempt to repair in degraded state */ 8343 if (btrfs_test_opt(fs_info, DEGRADED)) 8344 return 0; 8345 8346 cache = btrfs_lookup_block_group(fs_info, logical); 8347 if (!cache) 8348 return 0; 8349 8350 spin_lock(&cache->lock); 8351 if (cache->relocating_repair) { 8352 spin_unlock(&cache->lock); 8353 btrfs_put_block_group(cache); 8354 return 0; 8355 } 8356 cache->relocating_repair = 1; 8357 spin_unlock(&cache->lock); 8358 8359 kthread_run(relocating_repair_kthread, cache, 8360 "btrfs-relocating-repair"); 8361 8362 return 0; 8363 } 8364