1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include "misc.h" 18 #include "ctree.h" 19 #include "extent_map.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "print-tree.h" 23 #include "volumes.h" 24 #include "raid56.h" 25 #include "async-thread.h" 26 #include "check-integrity.h" 27 #include "rcu-string.h" 28 #include "dev-replace.h" 29 #include "sysfs.h" 30 #include "tree-checker.h" 31 #include "space-info.h" 32 #include "block-group.h" 33 #include "discard.h" 34 #include "zoned.h" 35 36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 37 [BTRFS_RAID_RAID10] = { 38 .sub_stripes = 2, 39 .dev_stripes = 1, 40 .devs_max = 0, /* 0 == as many as possible */ 41 .devs_min = 2, 42 .tolerated_failures = 1, 43 .devs_increment = 2, 44 .ncopies = 2, 45 .nparity = 0, 46 .raid_name = "raid10", 47 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 48 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 49 }, 50 [BTRFS_RAID_RAID1] = { 51 .sub_stripes = 1, 52 .dev_stripes = 1, 53 .devs_max = 2, 54 .devs_min = 2, 55 .tolerated_failures = 1, 56 .devs_increment = 2, 57 .ncopies = 2, 58 .nparity = 0, 59 .raid_name = "raid1", 60 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 61 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 62 }, 63 [BTRFS_RAID_RAID1C3] = { 64 .sub_stripes = 1, 65 .dev_stripes = 1, 66 .devs_max = 3, 67 .devs_min = 3, 68 .tolerated_failures = 2, 69 .devs_increment = 3, 70 .ncopies = 3, 71 .nparity = 0, 72 .raid_name = "raid1c3", 73 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 74 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 75 }, 76 [BTRFS_RAID_RAID1C4] = { 77 .sub_stripes = 1, 78 .dev_stripes = 1, 79 .devs_max = 4, 80 .devs_min = 4, 81 .tolerated_failures = 3, 82 .devs_increment = 4, 83 .ncopies = 4, 84 .nparity = 0, 85 .raid_name = "raid1c4", 86 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 87 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 88 }, 89 [BTRFS_RAID_DUP] = { 90 .sub_stripes = 1, 91 .dev_stripes = 2, 92 .devs_max = 1, 93 .devs_min = 1, 94 .tolerated_failures = 0, 95 .devs_increment = 1, 96 .ncopies = 2, 97 .nparity = 0, 98 .raid_name = "dup", 99 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 100 .mindev_error = 0, 101 }, 102 [BTRFS_RAID_RAID0] = { 103 .sub_stripes = 1, 104 .dev_stripes = 1, 105 .devs_max = 0, 106 .devs_min = 1, 107 .tolerated_failures = 0, 108 .devs_increment = 1, 109 .ncopies = 1, 110 .nparity = 0, 111 .raid_name = "raid0", 112 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 113 .mindev_error = 0, 114 }, 115 [BTRFS_RAID_SINGLE] = { 116 .sub_stripes = 1, 117 .dev_stripes = 1, 118 .devs_max = 1, 119 .devs_min = 1, 120 .tolerated_failures = 0, 121 .devs_increment = 1, 122 .ncopies = 1, 123 .nparity = 0, 124 .raid_name = "single", 125 .bg_flag = 0, 126 .mindev_error = 0, 127 }, 128 [BTRFS_RAID_RAID5] = { 129 .sub_stripes = 1, 130 .dev_stripes = 1, 131 .devs_max = 0, 132 .devs_min = 2, 133 .tolerated_failures = 1, 134 .devs_increment = 1, 135 .ncopies = 1, 136 .nparity = 1, 137 .raid_name = "raid5", 138 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 139 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 140 }, 141 [BTRFS_RAID_RAID6] = { 142 .sub_stripes = 1, 143 .dev_stripes = 1, 144 .devs_max = 0, 145 .devs_min = 3, 146 .tolerated_failures = 2, 147 .devs_increment = 1, 148 .ncopies = 1, 149 .nparity = 2, 150 .raid_name = "raid6", 151 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 152 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 153 }, 154 }; 155 156 /* 157 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 158 * can be used as index to access btrfs_raid_array[]. 159 */ 160 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 161 { 162 if (flags & BTRFS_BLOCK_GROUP_RAID10) 163 return BTRFS_RAID_RAID10; 164 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 165 return BTRFS_RAID_RAID1; 166 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 167 return BTRFS_RAID_RAID1C3; 168 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 169 return BTRFS_RAID_RAID1C4; 170 else if (flags & BTRFS_BLOCK_GROUP_DUP) 171 return BTRFS_RAID_DUP; 172 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 173 return BTRFS_RAID_RAID0; 174 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 175 return BTRFS_RAID_RAID5; 176 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 177 return BTRFS_RAID_RAID6; 178 179 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 180 } 181 182 const char *btrfs_bg_type_to_raid_name(u64 flags) 183 { 184 const int index = btrfs_bg_flags_to_raid_index(flags); 185 186 if (index >= BTRFS_NR_RAID_TYPES) 187 return NULL; 188 189 return btrfs_raid_array[index].raid_name; 190 } 191 192 /* 193 * Fill @buf with textual description of @bg_flags, no more than @size_buf 194 * bytes including terminating null byte. 195 */ 196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 197 { 198 int i; 199 int ret; 200 char *bp = buf; 201 u64 flags = bg_flags; 202 u32 size_bp = size_buf; 203 204 if (!flags) { 205 strcpy(bp, "NONE"); 206 return; 207 } 208 209 #define DESCRIBE_FLAG(flag, desc) \ 210 do { \ 211 if (flags & (flag)) { \ 212 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 213 if (ret < 0 || ret >= size_bp) \ 214 goto out_overflow; \ 215 size_bp -= ret; \ 216 bp += ret; \ 217 flags &= ~(flag); \ 218 } \ 219 } while (0) 220 221 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 224 225 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 226 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 227 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 228 btrfs_raid_array[i].raid_name); 229 #undef DESCRIBE_FLAG 230 231 if (flags) { 232 ret = snprintf(bp, size_bp, "0x%llx|", flags); 233 size_bp -= ret; 234 } 235 236 if (size_bp < size_buf) 237 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 238 239 /* 240 * The text is trimmed, it's up to the caller to provide sufficiently 241 * large buffer 242 */ 243 out_overflow:; 244 } 245 246 static int init_first_rw_device(struct btrfs_trans_handle *trans); 247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 248 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 249 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 250 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 251 enum btrfs_map_op op, 252 u64 logical, u64 *length, 253 struct btrfs_io_context **bioc_ret, 254 int mirror_num, int need_raid_map); 255 256 /* 257 * Device locking 258 * ============== 259 * 260 * There are several mutexes that protect manipulation of devices and low-level 261 * structures like chunks but not block groups, extents or files 262 * 263 * uuid_mutex (global lock) 264 * ------------------------ 265 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 266 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 267 * device) or requested by the device= mount option 268 * 269 * the mutex can be very coarse and can cover long-running operations 270 * 271 * protects: updates to fs_devices counters like missing devices, rw devices, 272 * seeding, structure cloning, opening/closing devices at mount/umount time 273 * 274 * global::fs_devs - add, remove, updates to the global list 275 * 276 * does not protect: manipulation of the fs_devices::devices list in general 277 * but in mount context it could be used to exclude list modifications by eg. 278 * scan ioctl 279 * 280 * btrfs_device::name - renames (write side), read is RCU 281 * 282 * fs_devices::device_list_mutex (per-fs, with RCU) 283 * ------------------------------------------------ 284 * protects updates to fs_devices::devices, ie. adding and deleting 285 * 286 * simple list traversal with read-only actions can be done with RCU protection 287 * 288 * may be used to exclude some operations from running concurrently without any 289 * modifications to the list (see write_all_supers) 290 * 291 * Is not required at mount and close times, because our device list is 292 * protected by the uuid_mutex at that point. 293 * 294 * balance_mutex 295 * ------------- 296 * protects balance structures (status, state) and context accessed from 297 * several places (internally, ioctl) 298 * 299 * chunk_mutex 300 * ----------- 301 * protects chunks, adding or removing during allocation, trim or when a new 302 * device is added/removed. Additionally it also protects post_commit_list of 303 * individual devices, since they can be added to the transaction's 304 * post_commit_list only with chunk_mutex held. 305 * 306 * cleaner_mutex 307 * ------------- 308 * a big lock that is held by the cleaner thread and prevents running subvolume 309 * cleaning together with relocation or delayed iputs 310 * 311 * 312 * Lock nesting 313 * ============ 314 * 315 * uuid_mutex 316 * device_list_mutex 317 * chunk_mutex 318 * balance_mutex 319 * 320 * 321 * Exclusive operations 322 * ==================== 323 * 324 * Maintains the exclusivity of the following operations that apply to the 325 * whole filesystem and cannot run in parallel. 326 * 327 * - Balance (*) 328 * - Device add 329 * - Device remove 330 * - Device replace (*) 331 * - Resize 332 * 333 * The device operations (as above) can be in one of the following states: 334 * 335 * - Running state 336 * - Paused state 337 * - Completed state 338 * 339 * Only device operations marked with (*) can go into the Paused state for the 340 * following reasons: 341 * 342 * - ioctl (only Balance can be Paused through ioctl) 343 * - filesystem remounted as read-only 344 * - filesystem unmounted and mounted as read-only 345 * - system power-cycle and filesystem mounted as read-only 346 * - filesystem or device errors leading to forced read-only 347 * 348 * The status of exclusive operation is set and cleared atomically. 349 * During the course of Paused state, fs_info::exclusive_operation remains set. 350 * A device operation in Paused or Running state can be canceled or resumed 351 * either by ioctl (Balance only) or when remounted as read-write. 352 * The exclusive status is cleared when the device operation is canceled or 353 * completed. 354 */ 355 356 DEFINE_MUTEX(uuid_mutex); 357 static LIST_HEAD(fs_uuids); 358 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 359 { 360 return &fs_uuids; 361 } 362 363 /* 364 * alloc_fs_devices - allocate struct btrfs_fs_devices 365 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 366 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 367 * 368 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 369 * The returned struct is not linked onto any lists and can be destroyed with 370 * kfree() right away. 371 */ 372 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 373 const u8 *metadata_fsid) 374 { 375 struct btrfs_fs_devices *fs_devs; 376 377 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 378 if (!fs_devs) 379 return ERR_PTR(-ENOMEM); 380 381 mutex_init(&fs_devs->device_list_mutex); 382 383 INIT_LIST_HEAD(&fs_devs->devices); 384 INIT_LIST_HEAD(&fs_devs->alloc_list); 385 INIT_LIST_HEAD(&fs_devs->fs_list); 386 INIT_LIST_HEAD(&fs_devs->seed_list); 387 if (fsid) 388 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 389 390 if (metadata_fsid) 391 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 392 else if (fsid) 393 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 394 395 return fs_devs; 396 } 397 398 void btrfs_free_device(struct btrfs_device *device) 399 { 400 WARN_ON(!list_empty(&device->post_commit_list)); 401 rcu_string_free(device->name); 402 extent_io_tree_release(&device->alloc_state); 403 bio_put(device->flush_bio); 404 btrfs_destroy_dev_zone_info(device); 405 kfree(device); 406 } 407 408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 409 { 410 struct btrfs_device *device; 411 WARN_ON(fs_devices->opened); 412 while (!list_empty(&fs_devices->devices)) { 413 device = list_entry(fs_devices->devices.next, 414 struct btrfs_device, dev_list); 415 list_del(&device->dev_list); 416 btrfs_free_device(device); 417 } 418 kfree(fs_devices); 419 } 420 421 void __exit btrfs_cleanup_fs_uuids(void) 422 { 423 struct btrfs_fs_devices *fs_devices; 424 425 while (!list_empty(&fs_uuids)) { 426 fs_devices = list_entry(fs_uuids.next, 427 struct btrfs_fs_devices, fs_list); 428 list_del(&fs_devices->fs_list); 429 free_fs_devices(fs_devices); 430 } 431 } 432 433 static noinline struct btrfs_fs_devices *find_fsid( 434 const u8 *fsid, const u8 *metadata_fsid) 435 { 436 struct btrfs_fs_devices *fs_devices; 437 438 ASSERT(fsid); 439 440 /* Handle non-split brain cases */ 441 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 442 if (metadata_fsid) { 443 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 444 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 445 BTRFS_FSID_SIZE) == 0) 446 return fs_devices; 447 } else { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 449 return fs_devices; 450 } 451 } 452 return NULL; 453 } 454 455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 456 struct btrfs_super_block *disk_super) 457 { 458 459 struct btrfs_fs_devices *fs_devices; 460 461 /* 462 * Handle scanned device having completed its fsid change but 463 * belonging to a fs_devices that was created by first scanning 464 * a device which didn't have its fsid/metadata_uuid changed 465 * at all and the CHANGING_FSID_V2 flag set. 466 */ 467 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 468 if (fs_devices->fsid_change && 469 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 470 BTRFS_FSID_SIZE) == 0 && 471 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 472 BTRFS_FSID_SIZE) == 0) { 473 return fs_devices; 474 } 475 } 476 /* 477 * Handle scanned device having completed its fsid change but 478 * belonging to a fs_devices that was created by a device that 479 * has an outdated pair of fsid/metadata_uuid and 480 * CHANGING_FSID_V2 flag set. 481 */ 482 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 483 if (fs_devices->fsid_change && 484 memcmp(fs_devices->metadata_uuid, 485 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 486 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 487 BTRFS_FSID_SIZE) == 0) { 488 return fs_devices; 489 } 490 } 491 492 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 493 } 494 495 496 static int 497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 498 int flush, struct block_device **bdev, 499 struct btrfs_super_block **disk_super) 500 { 501 int ret; 502 503 *bdev = blkdev_get_by_path(device_path, flags, holder); 504 505 if (IS_ERR(*bdev)) { 506 ret = PTR_ERR(*bdev); 507 goto error; 508 } 509 510 if (flush) 511 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 512 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 513 if (ret) { 514 blkdev_put(*bdev, flags); 515 goto error; 516 } 517 invalidate_bdev(*bdev); 518 *disk_super = btrfs_read_dev_super(*bdev); 519 if (IS_ERR(*disk_super)) { 520 ret = PTR_ERR(*disk_super); 521 blkdev_put(*bdev, flags); 522 goto error; 523 } 524 525 return 0; 526 527 error: 528 *bdev = NULL; 529 return ret; 530 } 531 532 static bool device_path_matched(const char *path, struct btrfs_device *device) 533 { 534 int found; 535 536 rcu_read_lock(); 537 found = strcmp(rcu_str_deref(device->name), path); 538 rcu_read_unlock(); 539 540 return found == 0; 541 } 542 543 /* 544 * Search and remove all stale (devices which are not mounted) devices. 545 * When both inputs are NULL, it will search and release all stale devices. 546 * path: Optional. When provided will it release all unmounted devices 547 * matching this path only. 548 * skip_dev: Optional. Will skip this device when searching for the stale 549 * devices. 550 * Return: 0 for success or if @path is NULL. 551 * -EBUSY if @path is a mounted device. 552 * -ENOENT if @path does not match any device in the list. 553 */ 554 static int btrfs_free_stale_devices(const char *path, 555 struct btrfs_device *skip_device) 556 { 557 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 558 struct btrfs_device *device, *tmp_device; 559 int ret = 0; 560 561 lockdep_assert_held(&uuid_mutex); 562 563 if (path) 564 ret = -ENOENT; 565 566 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 567 568 mutex_lock(&fs_devices->device_list_mutex); 569 list_for_each_entry_safe(device, tmp_device, 570 &fs_devices->devices, dev_list) { 571 if (skip_device && skip_device == device) 572 continue; 573 if (path && !device->name) 574 continue; 575 if (path && !device_path_matched(path, device)) 576 continue; 577 if (fs_devices->opened) { 578 /* for an already deleted device return 0 */ 579 if (path && ret != 0) 580 ret = -EBUSY; 581 break; 582 } 583 584 /* delete the stale device */ 585 fs_devices->num_devices--; 586 list_del(&device->dev_list); 587 btrfs_free_device(device); 588 589 ret = 0; 590 } 591 mutex_unlock(&fs_devices->device_list_mutex); 592 593 if (fs_devices->num_devices == 0) { 594 btrfs_sysfs_remove_fsid(fs_devices); 595 list_del(&fs_devices->fs_list); 596 free_fs_devices(fs_devices); 597 } 598 } 599 600 return ret; 601 } 602 603 /* 604 * This is only used on mount, and we are protected from competing things 605 * messing with our fs_devices by the uuid_mutex, thus we do not need the 606 * fs_devices->device_list_mutex here. 607 */ 608 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 609 struct btrfs_device *device, fmode_t flags, 610 void *holder) 611 { 612 struct request_queue *q; 613 struct block_device *bdev; 614 struct btrfs_super_block *disk_super; 615 u64 devid; 616 int ret; 617 618 if (device->bdev) 619 return -EINVAL; 620 if (!device->name) 621 return -EINVAL; 622 623 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 624 &bdev, &disk_super); 625 if (ret) 626 return ret; 627 628 devid = btrfs_stack_device_id(&disk_super->dev_item); 629 if (devid != device->devid) 630 goto error_free_page; 631 632 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 633 goto error_free_page; 634 635 device->generation = btrfs_super_generation(disk_super); 636 637 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 638 if (btrfs_super_incompat_flags(disk_super) & 639 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 640 pr_err( 641 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 642 goto error_free_page; 643 } 644 645 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 646 fs_devices->seeding = true; 647 } else { 648 if (bdev_read_only(bdev)) 649 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 650 else 651 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 652 } 653 654 q = bdev_get_queue(bdev); 655 if (!blk_queue_nonrot(q)) 656 fs_devices->rotating = true; 657 658 device->bdev = bdev; 659 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 660 device->mode = flags; 661 662 fs_devices->open_devices++; 663 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 664 device->devid != BTRFS_DEV_REPLACE_DEVID) { 665 fs_devices->rw_devices++; 666 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 667 } 668 btrfs_release_disk_super(disk_super); 669 670 return 0; 671 672 error_free_page: 673 btrfs_release_disk_super(disk_super); 674 blkdev_put(bdev, flags); 675 676 return -EINVAL; 677 } 678 679 /* 680 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 681 * being created with a disk that has already completed its fsid change. Such 682 * disk can belong to an fs which has its FSID changed or to one which doesn't. 683 * Handle both cases here. 684 */ 685 static struct btrfs_fs_devices *find_fsid_inprogress( 686 struct btrfs_super_block *disk_super) 687 { 688 struct btrfs_fs_devices *fs_devices; 689 690 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 691 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 692 BTRFS_FSID_SIZE) != 0 && 693 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 694 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 695 return fs_devices; 696 } 697 } 698 699 return find_fsid(disk_super->fsid, NULL); 700 } 701 702 703 static struct btrfs_fs_devices *find_fsid_changed( 704 struct btrfs_super_block *disk_super) 705 { 706 struct btrfs_fs_devices *fs_devices; 707 708 /* 709 * Handles the case where scanned device is part of an fs that had 710 * multiple successful changes of FSID but currently device didn't 711 * observe it. Meaning our fsid will be different than theirs. We need 712 * to handle two subcases : 713 * 1 - The fs still continues to have different METADATA/FSID uuids. 714 * 2 - The fs is switched back to its original FSID (METADATA/FSID 715 * are equal). 716 */ 717 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 718 /* Changed UUIDs */ 719 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 720 BTRFS_FSID_SIZE) != 0 && 721 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 722 BTRFS_FSID_SIZE) == 0 && 723 memcmp(fs_devices->fsid, disk_super->fsid, 724 BTRFS_FSID_SIZE) != 0) 725 return fs_devices; 726 727 /* Unchanged UUIDs */ 728 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 729 BTRFS_FSID_SIZE) == 0 && 730 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 731 BTRFS_FSID_SIZE) == 0) 732 return fs_devices; 733 } 734 735 return NULL; 736 } 737 738 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 739 struct btrfs_super_block *disk_super) 740 { 741 struct btrfs_fs_devices *fs_devices; 742 743 /* 744 * Handle the case where the scanned device is part of an fs whose last 745 * metadata UUID change reverted it to the original FSID. At the same 746 * time * fs_devices was first created by another constitutent device 747 * which didn't fully observe the operation. This results in an 748 * btrfs_fs_devices created with metadata/fsid different AND 749 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 750 * fs_devices equal to the FSID of the disk. 751 */ 752 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 753 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 754 BTRFS_FSID_SIZE) != 0 && 755 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 756 BTRFS_FSID_SIZE) == 0 && 757 fs_devices->fsid_change) 758 return fs_devices; 759 } 760 761 return NULL; 762 } 763 /* 764 * Add new device to list of registered devices 765 * 766 * Returns: 767 * device pointer which was just added or updated when successful 768 * error pointer when failed 769 */ 770 static noinline struct btrfs_device *device_list_add(const char *path, 771 struct btrfs_super_block *disk_super, 772 bool *new_device_added) 773 { 774 struct btrfs_device *device; 775 struct btrfs_fs_devices *fs_devices = NULL; 776 struct rcu_string *name; 777 u64 found_transid = btrfs_super_generation(disk_super); 778 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 779 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 780 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 781 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 782 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 783 784 if (fsid_change_in_progress) { 785 if (!has_metadata_uuid) 786 fs_devices = find_fsid_inprogress(disk_super); 787 else 788 fs_devices = find_fsid_changed(disk_super); 789 } else if (has_metadata_uuid) { 790 fs_devices = find_fsid_with_metadata_uuid(disk_super); 791 } else { 792 fs_devices = find_fsid_reverted_metadata(disk_super); 793 if (!fs_devices) 794 fs_devices = find_fsid(disk_super->fsid, NULL); 795 } 796 797 798 if (!fs_devices) { 799 if (has_metadata_uuid) 800 fs_devices = alloc_fs_devices(disk_super->fsid, 801 disk_super->metadata_uuid); 802 else 803 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 804 805 if (IS_ERR(fs_devices)) 806 return ERR_CAST(fs_devices); 807 808 fs_devices->fsid_change = fsid_change_in_progress; 809 810 mutex_lock(&fs_devices->device_list_mutex); 811 list_add(&fs_devices->fs_list, &fs_uuids); 812 813 device = NULL; 814 } else { 815 struct btrfs_dev_lookup_args args = { 816 .devid = devid, 817 .uuid = disk_super->dev_item.uuid, 818 }; 819 820 mutex_lock(&fs_devices->device_list_mutex); 821 device = btrfs_find_device(fs_devices, &args); 822 823 /* 824 * If this disk has been pulled into an fs devices created by 825 * a device which had the CHANGING_FSID_V2 flag then replace the 826 * metadata_uuid/fsid values of the fs_devices. 827 */ 828 if (fs_devices->fsid_change && 829 found_transid > fs_devices->latest_generation) { 830 memcpy(fs_devices->fsid, disk_super->fsid, 831 BTRFS_FSID_SIZE); 832 833 if (has_metadata_uuid) 834 memcpy(fs_devices->metadata_uuid, 835 disk_super->metadata_uuid, 836 BTRFS_FSID_SIZE); 837 else 838 memcpy(fs_devices->metadata_uuid, 839 disk_super->fsid, BTRFS_FSID_SIZE); 840 841 fs_devices->fsid_change = false; 842 } 843 } 844 845 if (!device) { 846 if (fs_devices->opened) { 847 mutex_unlock(&fs_devices->device_list_mutex); 848 return ERR_PTR(-EBUSY); 849 } 850 851 device = btrfs_alloc_device(NULL, &devid, 852 disk_super->dev_item.uuid); 853 if (IS_ERR(device)) { 854 mutex_unlock(&fs_devices->device_list_mutex); 855 /* we can safely leave the fs_devices entry around */ 856 return device; 857 } 858 859 name = rcu_string_strdup(path, GFP_NOFS); 860 if (!name) { 861 btrfs_free_device(device); 862 mutex_unlock(&fs_devices->device_list_mutex); 863 return ERR_PTR(-ENOMEM); 864 } 865 rcu_assign_pointer(device->name, name); 866 867 list_add_rcu(&device->dev_list, &fs_devices->devices); 868 fs_devices->num_devices++; 869 870 device->fs_devices = fs_devices; 871 *new_device_added = true; 872 873 if (disk_super->label[0]) 874 pr_info( 875 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 876 disk_super->label, devid, found_transid, path, 877 current->comm, task_pid_nr(current)); 878 else 879 pr_info( 880 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 881 disk_super->fsid, devid, found_transid, path, 882 current->comm, task_pid_nr(current)); 883 884 } else if (!device->name || strcmp(device->name->str, path)) { 885 /* 886 * When FS is already mounted. 887 * 1. If you are here and if the device->name is NULL that 888 * means this device was missing at time of FS mount. 889 * 2. If you are here and if the device->name is different 890 * from 'path' that means either 891 * a. The same device disappeared and reappeared with 892 * different name. or 893 * b. The missing-disk-which-was-replaced, has 894 * reappeared now. 895 * 896 * We must allow 1 and 2a above. But 2b would be a spurious 897 * and unintentional. 898 * 899 * Further in case of 1 and 2a above, the disk at 'path' 900 * would have missed some transaction when it was away and 901 * in case of 2a the stale bdev has to be updated as well. 902 * 2b must not be allowed at all time. 903 */ 904 905 /* 906 * For now, we do allow update to btrfs_fs_device through the 907 * btrfs dev scan cli after FS has been mounted. We're still 908 * tracking a problem where systems fail mount by subvolume id 909 * when we reject replacement on a mounted FS. 910 */ 911 if (!fs_devices->opened && found_transid < device->generation) { 912 /* 913 * That is if the FS is _not_ mounted and if you 914 * are here, that means there is more than one 915 * disk with same uuid and devid.We keep the one 916 * with larger generation number or the last-in if 917 * generation are equal. 918 */ 919 mutex_unlock(&fs_devices->device_list_mutex); 920 return ERR_PTR(-EEXIST); 921 } 922 923 /* 924 * We are going to replace the device path for a given devid, 925 * make sure it's the same device if the device is mounted 926 */ 927 if (device->bdev) { 928 int error; 929 dev_t path_dev; 930 931 error = lookup_bdev(path, &path_dev); 932 if (error) { 933 mutex_unlock(&fs_devices->device_list_mutex); 934 return ERR_PTR(error); 935 } 936 937 if (device->bdev->bd_dev != path_dev) { 938 mutex_unlock(&fs_devices->device_list_mutex); 939 /* 940 * device->fs_info may not be reliable here, so 941 * pass in a NULL instead. This avoids a 942 * possible use-after-free when the fs_info and 943 * fs_info->sb are already torn down. 944 */ 945 btrfs_warn_in_rcu(NULL, 946 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 947 path, devid, found_transid, 948 current->comm, 949 task_pid_nr(current)); 950 return ERR_PTR(-EEXIST); 951 } 952 btrfs_info_in_rcu(device->fs_info, 953 "devid %llu device path %s changed to %s scanned by %s (%d)", 954 devid, rcu_str_deref(device->name), 955 path, current->comm, 956 task_pid_nr(current)); 957 } 958 959 name = rcu_string_strdup(path, GFP_NOFS); 960 if (!name) { 961 mutex_unlock(&fs_devices->device_list_mutex); 962 return ERR_PTR(-ENOMEM); 963 } 964 rcu_string_free(device->name); 965 rcu_assign_pointer(device->name, name); 966 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 967 fs_devices->missing_devices--; 968 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 969 } 970 } 971 972 /* 973 * Unmount does not free the btrfs_device struct but would zero 974 * generation along with most of the other members. So just update 975 * it back. We need it to pick the disk with largest generation 976 * (as above). 977 */ 978 if (!fs_devices->opened) { 979 device->generation = found_transid; 980 fs_devices->latest_generation = max_t(u64, found_transid, 981 fs_devices->latest_generation); 982 } 983 984 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 985 986 mutex_unlock(&fs_devices->device_list_mutex); 987 return device; 988 } 989 990 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 991 { 992 struct btrfs_fs_devices *fs_devices; 993 struct btrfs_device *device; 994 struct btrfs_device *orig_dev; 995 int ret = 0; 996 997 lockdep_assert_held(&uuid_mutex); 998 999 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1000 if (IS_ERR(fs_devices)) 1001 return fs_devices; 1002 1003 fs_devices->total_devices = orig->total_devices; 1004 1005 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1006 struct rcu_string *name; 1007 1008 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1009 orig_dev->uuid); 1010 if (IS_ERR(device)) { 1011 ret = PTR_ERR(device); 1012 goto error; 1013 } 1014 1015 /* 1016 * This is ok to do without rcu read locked because we hold the 1017 * uuid mutex so nothing we touch in here is going to disappear. 1018 */ 1019 if (orig_dev->name) { 1020 name = rcu_string_strdup(orig_dev->name->str, 1021 GFP_KERNEL); 1022 if (!name) { 1023 btrfs_free_device(device); 1024 ret = -ENOMEM; 1025 goto error; 1026 } 1027 rcu_assign_pointer(device->name, name); 1028 } 1029 1030 list_add(&device->dev_list, &fs_devices->devices); 1031 device->fs_devices = fs_devices; 1032 fs_devices->num_devices++; 1033 } 1034 return fs_devices; 1035 error: 1036 free_fs_devices(fs_devices); 1037 return ERR_PTR(ret); 1038 } 1039 1040 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1041 struct btrfs_device **latest_dev) 1042 { 1043 struct btrfs_device *device, *next; 1044 1045 /* This is the initialized path, it is safe to release the devices. */ 1046 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1047 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1048 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1049 &device->dev_state) && 1050 !test_bit(BTRFS_DEV_STATE_MISSING, 1051 &device->dev_state) && 1052 (!*latest_dev || 1053 device->generation > (*latest_dev)->generation)) { 1054 *latest_dev = device; 1055 } 1056 continue; 1057 } 1058 1059 /* 1060 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1061 * in btrfs_init_dev_replace() so just continue. 1062 */ 1063 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1064 continue; 1065 1066 if (device->bdev) { 1067 blkdev_put(device->bdev, device->mode); 1068 device->bdev = NULL; 1069 fs_devices->open_devices--; 1070 } 1071 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1072 list_del_init(&device->dev_alloc_list); 1073 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1074 fs_devices->rw_devices--; 1075 } 1076 list_del_init(&device->dev_list); 1077 fs_devices->num_devices--; 1078 btrfs_free_device(device); 1079 } 1080 1081 } 1082 1083 /* 1084 * After we have read the system tree and know devids belonging to this 1085 * filesystem, remove the device which does not belong there. 1086 */ 1087 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1088 { 1089 struct btrfs_device *latest_dev = NULL; 1090 struct btrfs_fs_devices *seed_dev; 1091 1092 mutex_lock(&uuid_mutex); 1093 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1094 1095 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1096 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1097 1098 fs_devices->latest_dev = latest_dev; 1099 1100 mutex_unlock(&uuid_mutex); 1101 } 1102 1103 static void btrfs_close_bdev(struct btrfs_device *device) 1104 { 1105 if (!device->bdev) 1106 return; 1107 1108 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1109 sync_blockdev(device->bdev); 1110 invalidate_bdev(device->bdev); 1111 } 1112 1113 blkdev_put(device->bdev, device->mode); 1114 } 1115 1116 static void btrfs_close_one_device(struct btrfs_device *device) 1117 { 1118 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1119 1120 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1121 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1122 list_del_init(&device->dev_alloc_list); 1123 fs_devices->rw_devices--; 1124 } 1125 1126 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1127 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1128 1129 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1130 fs_devices->missing_devices--; 1131 1132 btrfs_close_bdev(device); 1133 if (device->bdev) { 1134 fs_devices->open_devices--; 1135 device->bdev = NULL; 1136 } 1137 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1138 btrfs_destroy_dev_zone_info(device); 1139 1140 device->fs_info = NULL; 1141 atomic_set(&device->dev_stats_ccnt, 0); 1142 extent_io_tree_release(&device->alloc_state); 1143 1144 /* 1145 * Reset the flush error record. We might have a transient flush error 1146 * in this mount, and if so we aborted the current transaction and set 1147 * the fs to an error state, guaranteeing no super blocks can be further 1148 * committed. However that error might be transient and if we unmount the 1149 * filesystem and mount it again, we should allow the mount to succeed 1150 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1151 * filesystem again we still get flush errors, then we will again abort 1152 * any transaction and set the error state, guaranteeing no commits of 1153 * unsafe super blocks. 1154 */ 1155 device->last_flush_error = 0; 1156 1157 /* Verify the device is back in a pristine state */ 1158 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1159 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1160 ASSERT(list_empty(&device->dev_alloc_list)); 1161 ASSERT(list_empty(&device->post_commit_list)); 1162 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1163 } 1164 1165 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1166 { 1167 struct btrfs_device *device, *tmp; 1168 1169 lockdep_assert_held(&uuid_mutex); 1170 1171 if (--fs_devices->opened > 0) 1172 return; 1173 1174 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1175 btrfs_close_one_device(device); 1176 1177 WARN_ON(fs_devices->open_devices); 1178 WARN_ON(fs_devices->rw_devices); 1179 fs_devices->opened = 0; 1180 fs_devices->seeding = false; 1181 fs_devices->fs_info = NULL; 1182 } 1183 1184 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1185 { 1186 LIST_HEAD(list); 1187 struct btrfs_fs_devices *tmp; 1188 1189 mutex_lock(&uuid_mutex); 1190 close_fs_devices(fs_devices); 1191 if (!fs_devices->opened) 1192 list_splice_init(&fs_devices->seed_list, &list); 1193 1194 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1195 close_fs_devices(fs_devices); 1196 list_del(&fs_devices->seed_list); 1197 free_fs_devices(fs_devices); 1198 } 1199 mutex_unlock(&uuid_mutex); 1200 } 1201 1202 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1203 fmode_t flags, void *holder) 1204 { 1205 struct btrfs_device *device; 1206 struct btrfs_device *latest_dev = NULL; 1207 struct btrfs_device *tmp_device; 1208 1209 flags |= FMODE_EXCL; 1210 1211 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1212 dev_list) { 1213 int ret; 1214 1215 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1216 if (ret == 0 && 1217 (!latest_dev || device->generation > latest_dev->generation)) { 1218 latest_dev = device; 1219 } else if (ret == -ENODATA) { 1220 fs_devices->num_devices--; 1221 list_del(&device->dev_list); 1222 btrfs_free_device(device); 1223 } 1224 } 1225 if (fs_devices->open_devices == 0) 1226 return -EINVAL; 1227 1228 fs_devices->opened = 1; 1229 fs_devices->latest_dev = latest_dev; 1230 fs_devices->total_rw_bytes = 0; 1231 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1232 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1233 1234 return 0; 1235 } 1236 1237 static int devid_cmp(void *priv, const struct list_head *a, 1238 const struct list_head *b) 1239 { 1240 const struct btrfs_device *dev1, *dev2; 1241 1242 dev1 = list_entry(a, struct btrfs_device, dev_list); 1243 dev2 = list_entry(b, struct btrfs_device, dev_list); 1244 1245 if (dev1->devid < dev2->devid) 1246 return -1; 1247 else if (dev1->devid > dev2->devid) 1248 return 1; 1249 return 0; 1250 } 1251 1252 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1253 fmode_t flags, void *holder) 1254 { 1255 int ret; 1256 1257 lockdep_assert_held(&uuid_mutex); 1258 /* 1259 * The device_list_mutex cannot be taken here in case opening the 1260 * underlying device takes further locks like open_mutex. 1261 * 1262 * We also don't need the lock here as this is called during mount and 1263 * exclusion is provided by uuid_mutex 1264 */ 1265 1266 if (fs_devices->opened) { 1267 fs_devices->opened++; 1268 ret = 0; 1269 } else { 1270 list_sort(NULL, &fs_devices->devices, devid_cmp); 1271 ret = open_fs_devices(fs_devices, flags, holder); 1272 } 1273 1274 return ret; 1275 } 1276 1277 void btrfs_release_disk_super(struct btrfs_super_block *super) 1278 { 1279 struct page *page = virt_to_page(super); 1280 1281 put_page(page); 1282 } 1283 1284 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1285 u64 bytenr, u64 bytenr_orig) 1286 { 1287 struct btrfs_super_block *disk_super; 1288 struct page *page; 1289 void *p; 1290 pgoff_t index; 1291 1292 /* make sure our super fits in the device */ 1293 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1294 return ERR_PTR(-EINVAL); 1295 1296 /* make sure our super fits in the page */ 1297 if (sizeof(*disk_super) > PAGE_SIZE) 1298 return ERR_PTR(-EINVAL); 1299 1300 /* make sure our super doesn't straddle pages on disk */ 1301 index = bytenr >> PAGE_SHIFT; 1302 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1303 return ERR_PTR(-EINVAL); 1304 1305 /* pull in the page with our super */ 1306 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1307 1308 if (IS_ERR(page)) 1309 return ERR_CAST(page); 1310 1311 p = page_address(page); 1312 1313 /* align our pointer to the offset of the super block */ 1314 disk_super = p + offset_in_page(bytenr); 1315 1316 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1317 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1318 btrfs_release_disk_super(p); 1319 return ERR_PTR(-EINVAL); 1320 } 1321 1322 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1323 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1324 1325 return disk_super; 1326 } 1327 1328 int btrfs_forget_devices(const char *path) 1329 { 1330 int ret; 1331 1332 mutex_lock(&uuid_mutex); 1333 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1334 mutex_unlock(&uuid_mutex); 1335 1336 return ret; 1337 } 1338 1339 /* 1340 * Look for a btrfs signature on a device. This may be called out of the mount path 1341 * and we are not allowed to call set_blocksize during the scan. The superblock 1342 * is read via pagecache 1343 */ 1344 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1345 void *holder) 1346 { 1347 struct btrfs_super_block *disk_super; 1348 bool new_device_added = false; 1349 struct btrfs_device *device = NULL; 1350 struct block_device *bdev; 1351 u64 bytenr, bytenr_orig; 1352 int ret; 1353 1354 lockdep_assert_held(&uuid_mutex); 1355 1356 /* 1357 * we would like to check all the supers, but that would make 1358 * a btrfs mount succeed after a mkfs from a different FS. 1359 * So, we need to add a special mount option to scan for 1360 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1361 */ 1362 flags |= FMODE_EXCL; 1363 1364 bdev = blkdev_get_by_path(path, flags, holder); 1365 if (IS_ERR(bdev)) 1366 return ERR_CAST(bdev); 1367 1368 bytenr_orig = btrfs_sb_offset(0); 1369 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1370 if (ret) 1371 return ERR_PTR(ret); 1372 1373 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1374 if (IS_ERR(disk_super)) { 1375 device = ERR_CAST(disk_super); 1376 goto error_bdev_put; 1377 } 1378 1379 device = device_list_add(path, disk_super, &new_device_added); 1380 if (!IS_ERR(device)) { 1381 if (new_device_added) 1382 btrfs_free_stale_devices(path, device); 1383 } 1384 1385 btrfs_release_disk_super(disk_super); 1386 1387 error_bdev_put: 1388 blkdev_put(bdev, flags); 1389 1390 return device; 1391 } 1392 1393 /* 1394 * Try to find a chunk that intersects [start, start + len] range and when one 1395 * such is found, record the end of it in *start 1396 */ 1397 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1398 u64 len) 1399 { 1400 u64 physical_start, physical_end; 1401 1402 lockdep_assert_held(&device->fs_info->chunk_mutex); 1403 1404 if (!find_first_extent_bit(&device->alloc_state, *start, 1405 &physical_start, &physical_end, 1406 CHUNK_ALLOCATED, NULL)) { 1407 1408 if (in_range(physical_start, *start, len) || 1409 in_range(*start, physical_start, 1410 physical_end - physical_start)) { 1411 *start = physical_end + 1; 1412 return true; 1413 } 1414 } 1415 return false; 1416 } 1417 1418 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1419 { 1420 switch (device->fs_devices->chunk_alloc_policy) { 1421 case BTRFS_CHUNK_ALLOC_REGULAR: 1422 /* 1423 * We don't want to overwrite the superblock on the drive nor 1424 * any area used by the boot loader (grub for example), so we 1425 * make sure to start at an offset of at least 1MB. 1426 */ 1427 return max_t(u64, start, SZ_1M); 1428 case BTRFS_CHUNK_ALLOC_ZONED: 1429 /* 1430 * We don't care about the starting region like regular 1431 * allocator, because we anyway use/reserve the first two zones 1432 * for superblock logging. 1433 */ 1434 return ALIGN(start, device->zone_info->zone_size); 1435 default: 1436 BUG(); 1437 } 1438 } 1439 1440 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1441 u64 *hole_start, u64 *hole_size, 1442 u64 num_bytes) 1443 { 1444 u64 zone_size = device->zone_info->zone_size; 1445 u64 pos; 1446 int ret; 1447 bool changed = false; 1448 1449 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1450 1451 while (*hole_size > 0) { 1452 pos = btrfs_find_allocatable_zones(device, *hole_start, 1453 *hole_start + *hole_size, 1454 num_bytes); 1455 if (pos != *hole_start) { 1456 *hole_size = *hole_start + *hole_size - pos; 1457 *hole_start = pos; 1458 changed = true; 1459 if (*hole_size < num_bytes) 1460 break; 1461 } 1462 1463 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1464 1465 /* Range is ensured to be empty */ 1466 if (!ret) 1467 return changed; 1468 1469 /* Given hole range was invalid (outside of device) */ 1470 if (ret == -ERANGE) { 1471 *hole_start += *hole_size; 1472 *hole_size = 0; 1473 return true; 1474 } 1475 1476 *hole_start += zone_size; 1477 *hole_size -= zone_size; 1478 changed = true; 1479 } 1480 1481 return changed; 1482 } 1483 1484 /** 1485 * dev_extent_hole_check - check if specified hole is suitable for allocation 1486 * @device: the device which we have the hole 1487 * @hole_start: starting position of the hole 1488 * @hole_size: the size of the hole 1489 * @num_bytes: the size of the free space that we need 1490 * 1491 * This function may modify @hole_start and @hole_size to reflect the suitable 1492 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1493 */ 1494 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1495 u64 *hole_size, u64 num_bytes) 1496 { 1497 bool changed = false; 1498 u64 hole_end = *hole_start + *hole_size; 1499 1500 for (;;) { 1501 /* 1502 * Check before we set max_hole_start, otherwise we could end up 1503 * sending back this offset anyway. 1504 */ 1505 if (contains_pending_extent(device, hole_start, *hole_size)) { 1506 if (hole_end >= *hole_start) 1507 *hole_size = hole_end - *hole_start; 1508 else 1509 *hole_size = 0; 1510 changed = true; 1511 } 1512 1513 switch (device->fs_devices->chunk_alloc_policy) { 1514 case BTRFS_CHUNK_ALLOC_REGULAR: 1515 /* No extra check */ 1516 break; 1517 case BTRFS_CHUNK_ALLOC_ZONED: 1518 if (dev_extent_hole_check_zoned(device, hole_start, 1519 hole_size, num_bytes)) { 1520 changed = true; 1521 /* 1522 * The changed hole can contain pending extent. 1523 * Loop again to check that. 1524 */ 1525 continue; 1526 } 1527 break; 1528 default: 1529 BUG(); 1530 } 1531 1532 break; 1533 } 1534 1535 return changed; 1536 } 1537 1538 /* 1539 * find_free_dev_extent_start - find free space in the specified device 1540 * @device: the device which we search the free space in 1541 * @num_bytes: the size of the free space that we need 1542 * @search_start: the position from which to begin the search 1543 * @start: store the start of the free space. 1544 * @len: the size of the free space. that we find, or the size 1545 * of the max free space if we don't find suitable free space 1546 * 1547 * this uses a pretty simple search, the expectation is that it is 1548 * called very infrequently and that a given device has a small number 1549 * of extents 1550 * 1551 * @start is used to store the start of the free space if we find. But if we 1552 * don't find suitable free space, it will be used to store the start position 1553 * of the max free space. 1554 * 1555 * @len is used to store the size of the free space that we find. 1556 * But if we don't find suitable free space, it is used to store the size of 1557 * the max free space. 1558 * 1559 * NOTE: This function will search *commit* root of device tree, and does extra 1560 * check to ensure dev extents are not double allocated. 1561 * This makes the function safe to allocate dev extents but may not report 1562 * correct usable device space, as device extent freed in current transaction 1563 * is not reported as available. 1564 */ 1565 static int find_free_dev_extent_start(struct btrfs_device *device, 1566 u64 num_bytes, u64 search_start, u64 *start, 1567 u64 *len) 1568 { 1569 struct btrfs_fs_info *fs_info = device->fs_info; 1570 struct btrfs_root *root = fs_info->dev_root; 1571 struct btrfs_key key; 1572 struct btrfs_dev_extent *dev_extent; 1573 struct btrfs_path *path; 1574 u64 hole_size; 1575 u64 max_hole_start; 1576 u64 max_hole_size; 1577 u64 extent_end; 1578 u64 search_end = device->total_bytes; 1579 int ret; 1580 int slot; 1581 struct extent_buffer *l; 1582 1583 search_start = dev_extent_search_start(device, search_start); 1584 1585 WARN_ON(device->zone_info && 1586 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1587 1588 path = btrfs_alloc_path(); 1589 if (!path) 1590 return -ENOMEM; 1591 1592 max_hole_start = search_start; 1593 max_hole_size = 0; 1594 1595 again: 1596 if (search_start >= search_end || 1597 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1598 ret = -ENOSPC; 1599 goto out; 1600 } 1601 1602 path->reada = READA_FORWARD; 1603 path->search_commit_root = 1; 1604 path->skip_locking = 1; 1605 1606 key.objectid = device->devid; 1607 key.offset = search_start; 1608 key.type = BTRFS_DEV_EXTENT_KEY; 1609 1610 ret = btrfs_search_backwards(root, &key, path); 1611 if (ret < 0) 1612 goto out; 1613 1614 while (1) { 1615 l = path->nodes[0]; 1616 slot = path->slots[0]; 1617 if (slot >= btrfs_header_nritems(l)) { 1618 ret = btrfs_next_leaf(root, path); 1619 if (ret == 0) 1620 continue; 1621 if (ret < 0) 1622 goto out; 1623 1624 break; 1625 } 1626 btrfs_item_key_to_cpu(l, &key, slot); 1627 1628 if (key.objectid < device->devid) 1629 goto next; 1630 1631 if (key.objectid > device->devid) 1632 break; 1633 1634 if (key.type != BTRFS_DEV_EXTENT_KEY) 1635 goto next; 1636 1637 if (key.offset > search_start) { 1638 hole_size = key.offset - search_start; 1639 dev_extent_hole_check(device, &search_start, &hole_size, 1640 num_bytes); 1641 1642 if (hole_size > max_hole_size) { 1643 max_hole_start = search_start; 1644 max_hole_size = hole_size; 1645 } 1646 1647 /* 1648 * If this free space is greater than which we need, 1649 * it must be the max free space that we have found 1650 * until now, so max_hole_start must point to the start 1651 * of this free space and the length of this free space 1652 * is stored in max_hole_size. Thus, we return 1653 * max_hole_start and max_hole_size and go back to the 1654 * caller. 1655 */ 1656 if (hole_size >= num_bytes) { 1657 ret = 0; 1658 goto out; 1659 } 1660 } 1661 1662 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1663 extent_end = key.offset + btrfs_dev_extent_length(l, 1664 dev_extent); 1665 if (extent_end > search_start) 1666 search_start = extent_end; 1667 next: 1668 path->slots[0]++; 1669 cond_resched(); 1670 } 1671 1672 /* 1673 * At this point, search_start should be the end of 1674 * allocated dev extents, and when shrinking the device, 1675 * search_end may be smaller than search_start. 1676 */ 1677 if (search_end > search_start) { 1678 hole_size = search_end - search_start; 1679 if (dev_extent_hole_check(device, &search_start, &hole_size, 1680 num_bytes)) { 1681 btrfs_release_path(path); 1682 goto again; 1683 } 1684 1685 if (hole_size > max_hole_size) { 1686 max_hole_start = search_start; 1687 max_hole_size = hole_size; 1688 } 1689 } 1690 1691 /* See above. */ 1692 if (max_hole_size < num_bytes) 1693 ret = -ENOSPC; 1694 else 1695 ret = 0; 1696 1697 out: 1698 btrfs_free_path(path); 1699 *start = max_hole_start; 1700 if (len) 1701 *len = max_hole_size; 1702 return ret; 1703 } 1704 1705 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1706 u64 *start, u64 *len) 1707 { 1708 /* FIXME use last free of some kind */ 1709 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1710 } 1711 1712 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1713 struct btrfs_device *device, 1714 u64 start, u64 *dev_extent_len) 1715 { 1716 struct btrfs_fs_info *fs_info = device->fs_info; 1717 struct btrfs_root *root = fs_info->dev_root; 1718 int ret; 1719 struct btrfs_path *path; 1720 struct btrfs_key key; 1721 struct btrfs_key found_key; 1722 struct extent_buffer *leaf = NULL; 1723 struct btrfs_dev_extent *extent = NULL; 1724 1725 path = btrfs_alloc_path(); 1726 if (!path) 1727 return -ENOMEM; 1728 1729 key.objectid = device->devid; 1730 key.offset = start; 1731 key.type = BTRFS_DEV_EXTENT_KEY; 1732 again: 1733 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1734 if (ret > 0) { 1735 ret = btrfs_previous_item(root, path, key.objectid, 1736 BTRFS_DEV_EXTENT_KEY); 1737 if (ret) 1738 goto out; 1739 leaf = path->nodes[0]; 1740 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1741 extent = btrfs_item_ptr(leaf, path->slots[0], 1742 struct btrfs_dev_extent); 1743 BUG_ON(found_key.offset > start || found_key.offset + 1744 btrfs_dev_extent_length(leaf, extent) < start); 1745 key = found_key; 1746 btrfs_release_path(path); 1747 goto again; 1748 } else if (ret == 0) { 1749 leaf = path->nodes[0]; 1750 extent = btrfs_item_ptr(leaf, path->slots[0], 1751 struct btrfs_dev_extent); 1752 } else { 1753 goto out; 1754 } 1755 1756 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1757 1758 ret = btrfs_del_item(trans, root, path); 1759 if (ret == 0) 1760 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1761 out: 1762 btrfs_free_path(path); 1763 return ret; 1764 } 1765 1766 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1767 { 1768 struct extent_map_tree *em_tree; 1769 struct extent_map *em; 1770 struct rb_node *n; 1771 u64 ret = 0; 1772 1773 em_tree = &fs_info->mapping_tree; 1774 read_lock(&em_tree->lock); 1775 n = rb_last(&em_tree->map.rb_root); 1776 if (n) { 1777 em = rb_entry(n, struct extent_map, rb_node); 1778 ret = em->start + em->len; 1779 } 1780 read_unlock(&em_tree->lock); 1781 1782 return ret; 1783 } 1784 1785 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1786 u64 *devid_ret) 1787 { 1788 int ret; 1789 struct btrfs_key key; 1790 struct btrfs_key found_key; 1791 struct btrfs_path *path; 1792 1793 path = btrfs_alloc_path(); 1794 if (!path) 1795 return -ENOMEM; 1796 1797 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1798 key.type = BTRFS_DEV_ITEM_KEY; 1799 key.offset = (u64)-1; 1800 1801 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1802 if (ret < 0) 1803 goto error; 1804 1805 if (ret == 0) { 1806 /* Corruption */ 1807 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1808 ret = -EUCLEAN; 1809 goto error; 1810 } 1811 1812 ret = btrfs_previous_item(fs_info->chunk_root, path, 1813 BTRFS_DEV_ITEMS_OBJECTID, 1814 BTRFS_DEV_ITEM_KEY); 1815 if (ret) { 1816 *devid_ret = 1; 1817 } else { 1818 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1819 path->slots[0]); 1820 *devid_ret = found_key.offset + 1; 1821 } 1822 ret = 0; 1823 error: 1824 btrfs_free_path(path); 1825 return ret; 1826 } 1827 1828 /* 1829 * the device information is stored in the chunk root 1830 * the btrfs_device struct should be fully filled in 1831 */ 1832 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1833 struct btrfs_device *device) 1834 { 1835 int ret; 1836 struct btrfs_path *path; 1837 struct btrfs_dev_item *dev_item; 1838 struct extent_buffer *leaf; 1839 struct btrfs_key key; 1840 unsigned long ptr; 1841 1842 path = btrfs_alloc_path(); 1843 if (!path) 1844 return -ENOMEM; 1845 1846 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1847 key.type = BTRFS_DEV_ITEM_KEY; 1848 key.offset = device->devid; 1849 1850 btrfs_reserve_chunk_metadata(trans, true); 1851 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1852 &key, sizeof(*dev_item)); 1853 btrfs_trans_release_chunk_metadata(trans); 1854 if (ret) 1855 goto out; 1856 1857 leaf = path->nodes[0]; 1858 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1859 1860 btrfs_set_device_id(leaf, dev_item, device->devid); 1861 btrfs_set_device_generation(leaf, dev_item, 0); 1862 btrfs_set_device_type(leaf, dev_item, device->type); 1863 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1864 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1865 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1866 btrfs_set_device_total_bytes(leaf, dev_item, 1867 btrfs_device_get_disk_total_bytes(device)); 1868 btrfs_set_device_bytes_used(leaf, dev_item, 1869 btrfs_device_get_bytes_used(device)); 1870 btrfs_set_device_group(leaf, dev_item, 0); 1871 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1872 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1873 btrfs_set_device_start_offset(leaf, dev_item, 0); 1874 1875 ptr = btrfs_device_uuid(dev_item); 1876 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1877 ptr = btrfs_device_fsid(dev_item); 1878 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1879 ptr, BTRFS_FSID_SIZE); 1880 btrfs_mark_buffer_dirty(leaf); 1881 1882 ret = 0; 1883 out: 1884 btrfs_free_path(path); 1885 return ret; 1886 } 1887 1888 /* 1889 * Function to update ctime/mtime for a given device path. 1890 * Mainly used for ctime/mtime based probe like libblkid. 1891 */ 1892 static void update_dev_time(struct block_device *bdev) 1893 { 1894 struct inode *inode = bdev->bd_inode; 1895 struct timespec64 now; 1896 1897 /* Shouldn't happen but just in case. */ 1898 if (!inode) 1899 return; 1900 1901 now = current_time(inode); 1902 generic_update_time(inode, &now, S_MTIME | S_CTIME); 1903 } 1904 1905 static int btrfs_rm_dev_item(struct btrfs_device *device) 1906 { 1907 struct btrfs_root *root = device->fs_info->chunk_root; 1908 int ret; 1909 struct btrfs_path *path; 1910 struct btrfs_key key; 1911 struct btrfs_trans_handle *trans; 1912 1913 path = btrfs_alloc_path(); 1914 if (!path) 1915 return -ENOMEM; 1916 1917 trans = btrfs_start_transaction(root, 0); 1918 if (IS_ERR(trans)) { 1919 btrfs_free_path(path); 1920 return PTR_ERR(trans); 1921 } 1922 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1923 key.type = BTRFS_DEV_ITEM_KEY; 1924 key.offset = device->devid; 1925 1926 btrfs_reserve_chunk_metadata(trans, false); 1927 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1928 btrfs_trans_release_chunk_metadata(trans); 1929 if (ret) { 1930 if (ret > 0) 1931 ret = -ENOENT; 1932 btrfs_abort_transaction(trans, ret); 1933 btrfs_end_transaction(trans); 1934 goto out; 1935 } 1936 1937 ret = btrfs_del_item(trans, root, path); 1938 if (ret) { 1939 btrfs_abort_transaction(trans, ret); 1940 btrfs_end_transaction(trans); 1941 } 1942 1943 out: 1944 btrfs_free_path(path); 1945 if (!ret) 1946 ret = btrfs_commit_transaction(trans); 1947 return ret; 1948 } 1949 1950 /* 1951 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1952 * filesystem. It's up to the caller to adjust that number regarding eg. device 1953 * replace. 1954 */ 1955 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1956 u64 num_devices) 1957 { 1958 u64 all_avail; 1959 unsigned seq; 1960 int i; 1961 1962 do { 1963 seq = read_seqbegin(&fs_info->profiles_lock); 1964 1965 all_avail = fs_info->avail_data_alloc_bits | 1966 fs_info->avail_system_alloc_bits | 1967 fs_info->avail_metadata_alloc_bits; 1968 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1969 1970 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1971 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1972 continue; 1973 1974 if (num_devices < btrfs_raid_array[i].devs_min) 1975 return btrfs_raid_array[i].mindev_error; 1976 } 1977 1978 return 0; 1979 } 1980 1981 static struct btrfs_device * btrfs_find_next_active_device( 1982 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1983 { 1984 struct btrfs_device *next_device; 1985 1986 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1987 if (next_device != device && 1988 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1989 && next_device->bdev) 1990 return next_device; 1991 } 1992 1993 return NULL; 1994 } 1995 1996 /* 1997 * Helper function to check if the given device is part of s_bdev / latest_dev 1998 * and replace it with the provided or the next active device, in the context 1999 * where this function called, there should be always be another device (or 2000 * this_dev) which is active. 2001 */ 2002 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2003 struct btrfs_device *next_device) 2004 { 2005 struct btrfs_fs_info *fs_info = device->fs_info; 2006 2007 if (!next_device) 2008 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2009 device); 2010 ASSERT(next_device); 2011 2012 if (fs_info->sb->s_bdev && 2013 (fs_info->sb->s_bdev == device->bdev)) 2014 fs_info->sb->s_bdev = next_device->bdev; 2015 2016 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2017 fs_info->fs_devices->latest_dev = next_device; 2018 } 2019 2020 /* 2021 * Return btrfs_fs_devices::num_devices excluding the device that's being 2022 * currently replaced. 2023 */ 2024 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2025 { 2026 u64 num_devices = fs_info->fs_devices->num_devices; 2027 2028 down_read(&fs_info->dev_replace.rwsem); 2029 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2030 ASSERT(num_devices > 1); 2031 num_devices--; 2032 } 2033 up_read(&fs_info->dev_replace.rwsem); 2034 2035 return num_devices; 2036 } 2037 2038 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2039 struct block_device *bdev, 2040 const char *device_path) 2041 { 2042 struct btrfs_super_block *disk_super; 2043 int copy_num; 2044 2045 if (!bdev) 2046 return; 2047 2048 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2049 struct page *page; 2050 int ret; 2051 2052 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2053 if (IS_ERR(disk_super)) 2054 continue; 2055 2056 if (bdev_is_zoned(bdev)) { 2057 btrfs_reset_sb_log_zones(bdev, copy_num); 2058 continue; 2059 } 2060 2061 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2062 2063 page = virt_to_page(disk_super); 2064 set_page_dirty(page); 2065 lock_page(page); 2066 /* write_on_page() unlocks the page */ 2067 ret = write_one_page(page); 2068 if (ret) 2069 btrfs_warn(fs_info, 2070 "error clearing superblock number %d (%d)", 2071 copy_num, ret); 2072 btrfs_release_disk_super(disk_super); 2073 2074 } 2075 2076 /* Notify udev that device has changed */ 2077 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2078 2079 /* Update ctime/mtime for device path for libblkid */ 2080 update_dev_time(bdev); 2081 } 2082 2083 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2084 struct btrfs_dev_lookup_args *args, 2085 struct block_device **bdev, fmode_t *mode) 2086 { 2087 struct btrfs_device *device; 2088 struct btrfs_fs_devices *cur_devices; 2089 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2090 u64 num_devices; 2091 int ret = 0; 2092 2093 /* 2094 * The device list in fs_devices is accessed without locks (neither 2095 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2096 * filesystem and another device rm cannot run. 2097 */ 2098 num_devices = btrfs_num_devices(fs_info); 2099 2100 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2101 if (ret) 2102 goto out; 2103 2104 device = btrfs_find_device(fs_info->fs_devices, args); 2105 if (!device) { 2106 if (args->missing) 2107 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2108 else 2109 ret = -ENOENT; 2110 goto out; 2111 } 2112 2113 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2114 btrfs_warn_in_rcu(fs_info, 2115 "cannot remove device %s (devid %llu) due to active swapfile", 2116 rcu_str_deref(device->name), device->devid); 2117 ret = -ETXTBSY; 2118 goto out; 2119 } 2120 2121 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2122 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2123 goto out; 2124 } 2125 2126 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2127 fs_info->fs_devices->rw_devices == 1) { 2128 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2129 goto out; 2130 } 2131 2132 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2133 mutex_lock(&fs_info->chunk_mutex); 2134 list_del_init(&device->dev_alloc_list); 2135 device->fs_devices->rw_devices--; 2136 mutex_unlock(&fs_info->chunk_mutex); 2137 } 2138 2139 ret = btrfs_shrink_device(device, 0); 2140 if (!ret) 2141 btrfs_reada_remove_dev(device); 2142 if (ret) 2143 goto error_undo; 2144 2145 /* 2146 * TODO: the superblock still includes this device in its num_devices 2147 * counter although write_all_supers() is not locked out. This 2148 * could give a filesystem state which requires a degraded mount. 2149 */ 2150 ret = btrfs_rm_dev_item(device); 2151 if (ret) 2152 goto error_undo; 2153 2154 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2155 btrfs_scrub_cancel_dev(device); 2156 2157 /* 2158 * the device list mutex makes sure that we don't change 2159 * the device list while someone else is writing out all 2160 * the device supers. Whoever is writing all supers, should 2161 * lock the device list mutex before getting the number of 2162 * devices in the super block (super_copy). Conversely, 2163 * whoever updates the number of devices in the super block 2164 * (super_copy) should hold the device list mutex. 2165 */ 2166 2167 /* 2168 * In normal cases the cur_devices == fs_devices. But in case 2169 * of deleting a seed device, the cur_devices should point to 2170 * its own fs_devices listed under the fs_devices->seed_list. 2171 */ 2172 cur_devices = device->fs_devices; 2173 mutex_lock(&fs_devices->device_list_mutex); 2174 list_del_rcu(&device->dev_list); 2175 2176 cur_devices->num_devices--; 2177 cur_devices->total_devices--; 2178 /* Update total_devices of the parent fs_devices if it's seed */ 2179 if (cur_devices != fs_devices) 2180 fs_devices->total_devices--; 2181 2182 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2183 cur_devices->missing_devices--; 2184 2185 btrfs_assign_next_active_device(device, NULL); 2186 2187 if (device->bdev) { 2188 cur_devices->open_devices--; 2189 /* remove sysfs entry */ 2190 btrfs_sysfs_remove_device(device); 2191 } 2192 2193 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2194 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2195 mutex_unlock(&fs_devices->device_list_mutex); 2196 2197 /* 2198 * At this point, the device is zero sized and detached from the 2199 * devices list. All that's left is to zero out the old supers and 2200 * free the device. 2201 * 2202 * We cannot call btrfs_close_bdev() here because we're holding the sb 2203 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2204 * block device and it's dependencies. Instead just flush the device 2205 * and let the caller do the final blkdev_put. 2206 */ 2207 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2208 btrfs_scratch_superblocks(fs_info, device->bdev, 2209 device->name->str); 2210 if (device->bdev) { 2211 sync_blockdev(device->bdev); 2212 invalidate_bdev(device->bdev); 2213 } 2214 } 2215 2216 *bdev = device->bdev; 2217 *mode = device->mode; 2218 synchronize_rcu(); 2219 btrfs_free_device(device); 2220 2221 /* 2222 * This can happen if cur_devices is the private seed devices list. We 2223 * cannot call close_fs_devices() here because it expects the uuid_mutex 2224 * to be held, but in fact we don't need that for the private 2225 * seed_devices, we can simply decrement cur_devices->opened and then 2226 * remove it from our list and free the fs_devices. 2227 */ 2228 if (cur_devices->num_devices == 0) { 2229 list_del_init(&cur_devices->seed_list); 2230 ASSERT(cur_devices->opened == 1); 2231 cur_devices->opened--; 2232 free_fs_devices(cur_devices); 2233 } 2234 2235 out: 2236 return ret; 2237 2238 error_undo: 2239 btrfs_reada_undo_remove_dev(device); 2240 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2241 mutex_lock(&fs_info->chunk_mutex); 2242 list_add(&device->dev_alloc_list, 2243 &fs_devices->alloc_list); 2244 device->fs_devices->rw_devices++; 2245 mutex_unlock(&fs_info->chunk_mutex); 2246 } 2247 goto out; 2248 } 2249 2250 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2251 { 2252 struct btrfs_fs_devices *fs_devices; 2253 2254 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2255 2256 /* 2257 * in case of fs with no seed, srcdev->fs_devices will point 2258 * to fs_devices of fs_info. However when the dev being replaced is 2259 * a seed dev it will point to the seed's local fs_devices. In short 2260 * srcdev will have its correct fs_devices in both the cases. 2261 */ 2262 fs_devices = srcdev->fs_devices; 2263 2264 list_del_rcu(&srcdev->dev_list); 2265 list_del(&srcdev->dev_alloc_list); 2266 fs_devices->num_devices--; 2267 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2268 fs_devices->missing_devices--; 2269 2270 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2271 fs_devices->rw_devices--; 2272 2273 if (srcdev->bdev) 2274 fs_devices->open_devices--; 2275 } 2276 2277 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2278 { 2279 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2280 2281 mutex_lock(&uuid_mutex); 2282 2283 btrfs_close_bdev(srcdev); 2284 synchronize_rcu(); 2285 btrfs_free_device(srcdev); 2286 2287 /* if this is no devs we rather delete the fs_devices */ 2288 if (!fs_devices->num_devices) { 2289 /* 2290 * On a mounted FS, num_devices can't be zero unless it's a 2291 * seed. In case of a seed device being replaced, the replace 2292 * target added to the sprout FS, so there will be no more 2293 * device left under the seed FS. 2294 */ 2295 ASSERT(fs_devices->seeding); 2296 2297 list_del_init(&fs_devices->seed_list); 2298 close_fs_devices(fs_devices); 2299 free_fs_devices(fs_devices); 2300 } 2301 mutex_unlock(&uuid_mutex); 2302 } 2303 2304 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2305 { 2306 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2307 2308 mutex_lock(&fs_devices->device_list_mutex); 2309 2310 btrfs_sysfs_remove_device(tgtdev); 2311 2312 if (tgtdev->bdev) 2313 fs_devices->open_devices--; 2314 2315 fs_devices->num_devices--; 2316 2317 btrfs_assign_next_active_device(tgtdev, NULL); 2318 2319 list_del_rcu(&tgtdev->dev_list); 2320 2321 mutex_unlock(&fs_devices->device_list_mutex); 2322 2323 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2324 tgtdev->name->str); 2325 2326 btrfs_close_bdev(tgtdev); 2327 synchronize_rcu(); 2328 btrfs_free_device(tgtdev); 2329 } 2330 2331 /** 2332 * Populate args from device at path 2333 * 2334 * @fs_info: the filesystem 2335 * @args: the args to populate 2336 * @path: the path to the device 2337 * 2338 * This will read the super block of the device at @path and populate @args with 2339 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2340 * lookup a device to operate on, but need to do it before we take any locks. 2341 * This properly handles the special case of "missing" that a user may pass in, 2342 * and does some basic sanity checks. The caller must make sure that @path is 2343 * properly NUL terminated before calling in, and must call 2344 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2345 * uuid buffers. 2346 * 2347 * Return: 0 for success, -errno for failure 2348 */ 2349 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2350 struct btrfs_dev_lookup_args *args, 2351 const char *path) 2352 { 2353 struct btrfs_super_block *disk_super; 2354 struct block_device *bdev; 2355 int ret; 2356 2357 if (!path || !path[0]) 2358 return -EINVAL; 2359 if (!strcmp(path, "missing")) { 2360 args->missing = true; 2361 return 0; 2362 } 2363 2364 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2365 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2366 if (!args->uuid || !args->fsid) { 2367 btrfs_put_dev_args_from_path(args); 2368 return -ENOMEM; 2369 } 2370 2371 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2372 &bdev, &disk_super); 2373 if (ret) 2374 return ret; 2375 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2376 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2377 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2378 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2379 else 2380 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2381 btrfs_release_disk_super(disk_super); 2382 blkdev_put(bdev, FMODE_READ); 2383 return 0; 2384 } 2385 2386 /* 2387 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2388 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2389 * that don't need to be freed. 2390 */ 2391 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2392 { 2393 kfree(args->uuid); 2394 kfree(args->fsid); 2395 args->uuid = NULL; 2396 args->fsid = NULL; 2397 } 2398 2399 struct btrfs_device *btrfs_find_device_by_devspec( 2400 struct btrfs_fs_info *fs_info, u64 devid, 2401 const char *device_path) 2402 { 2403 BTRFS_DEV_LOOKUP_ARGS(args); 2404 struct btrfs_device *device; 2405 int ret; 2406 2407 if (devid) { 2408 args.devid = devid; 2409 device = btrfs_find_device(fs_info->fs_devices, &args); 2410 if (!device) 2411 return ERR_PTR(-ENOENT); 2412 return device; 2413 } 2414 2415 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2416 if (ret) 2417 return ERR_PTR(ret); 2418 device = btrfs_find_device(fs_info->fs_devices, &args); 2419 btrfs_put_dev_args_from_path(&args); 2420 if (!device) 2421 return ERR_PTR(-ENOENT); 2422 return device; 2423 } 2424 2425 /* 2426 * does all the dirty work required for changing file system's UUID. 2427 */ 2428 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2429 { 2430 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2431 struct btrfs_fs_devices *old_devices; 2432 struct btrfs_fs_devices *seed_devices; 2433 struct btrfs_super_block *disk_super = fs_info->super_copy; 2434 struct btrfs_device *device; 2435 u64 super_flags; 2436 2437 lockdep_assert_held(&uuid_mutex); 2438 if (!fs_devices->seeding) 2439 return -EINVAL; 2440 2441 /* 2442 * Private copy of the seed devices, anchored at 2443 * fs_info->fs_devices->seed_list 2444 */ 2445 seed_devices = alloc_fs_devices(NULL, NULL); 2446 if (IS_ERR(seed_devices)) 2447 return PTR_ERR(seed_devices); 2448 2449 /* 2450 * It's necessary to retain a copy of the original seed fs_devices in 2451 * fs_uuids so that filesystems which have been seeded can successfully 2452 * reference the seed device from open_seed_devices. This also supports 2453 * multiple fs seed. 2454 */ 2455 old_devices = clone_fs_devices(fs_devices); 2456 if (IS_ERR(old_devices)) { 2457 kfree(seed_devices); 2458 return PTR_ERR(old_devices); 2459 } 2460 2461 list_add(&old_devices->fs_list, &fs_uuids); 2462 2463 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2464 seed_devices->opened = 1; 2465 INIT_LIST_HEAD(&seed_devices->devices); 2466 INIT_LIST_HEAD(&seed_devices->alloc_list); 2467 mutex_init(&seed_devices->device_list_mutex); 2468 2469 mutex_lock(&fs_devices->device_list_mutex); 2470 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2471 synchronize_rcu); 2472 list_for_each_entry(device, &seed_devices->devices, dev_list) 2473 device->fs_devices = seed_devices; 2474 2475 fs_devices->seeding = false; 2476 fs_devices->num_devices = 0; 2477 fs_devices->open_devices = 0; 2478 fs_devices->missing_devices = 0; 2479 fs_devices->rotating = false; 2480 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2481 2482 generate_random_uuid(fs_devices->fsid); 2483 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2484 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2485 mutex_unlock(&fs_devices->device_list_mutex); 2486 2487 super_flags = btrfs_super_flags(disk_super) & 2488 ~BTRFS_SUPER_FLAG_SEEDING; 2489 btrfs_set_super_flags(disk_super, super_flags); 2490 2491 return 0; 2492 } 2493 2494 /* 2495 * Store the expected generation for seed devices in device items. 2496 */ 2497 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2498 { 2499 BTRFS_DEV_LOOKUP_ARGS(args); 2500 struct btrfs_fs_info *fs_info = trans->fs_info; 2501 struct btrfs_root *root = fs_info->chunk_root; 2502 struct btrfs_path *path; 2503 struct extent_buffer *leaf; 2504 struct btrfs_dev_item *dev_item; 2505 struct btrfs_device *device; 2506 struct btrfs_key key; 2507 u8 fs_uuid[BTRFS_FSID_SIZE]; 2508 u8 dev_uuid[BTRFS_UUID_SIZE]; 2509 int ret; 2510 2511 path = btrfs_alloc_path(); 2512 if (!path) 2513 return -ENOMEM; 2514 2515 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2516 key.offset = 0; 2517 key.type = BTRFS_DEV_ITEM_KEY; 2518 2519 while (1) { 2520 btrfs_reserve_chunk_metadata(trans, false); 2521 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2522 btrfs_trans_release_chunk_metadata(trans); 2523 if (ret < 0) 2524 goto error; 2525 2526 leaf = path->nodes[0]; 2527 next_slot: 2528 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2529 ret = btrfs_next_leaf(root, path); 2530 if (ret > 0) 2531 break; 2532 if (ret < 0) 2533 goto error; 2534 leaf = path->nodes[0]; 2535 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2536 btrfs_release_path(path); 2537 continue; 2538 } 2539 2540 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2541 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2542 key.type != BTRFS_DEV_ITEM_KEY) 2543 break; 2544 2545 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2546 struct btrfs_dev_item); 2547 args.devid = btrfs_device_id(leaf, dev_item); 2548 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2549 BTRFS_UUID_SIZE); 2550 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2551 BTRFS_FSID_SIZE); 2552 args.uuid = dev_uuid; 2553 args.fsid = fs_uuid; 2554 device = btrfs_find_device(fs_info->fs_devices, &args); 2555 BUG_ON(!device); /* Logic error */ 2556 2557 if (device->fs_devices->seeding) { 2558 btrfs_set_device_generation(leaf, dev_item, 2559 device->generation); 2560 btrfs_mark_buffer_dirty(leaf); 2561 } 2562 2563 path->slots[0]++; 2564 goto next_slot; 2565 } 2566 ret = 0; 2567 error: 2568 btrfs_free_path(path); 2569 return ret; 2570 } 2571 2572 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2573 { 2574 struct btrfs_root *root = fs_info->dev_root; 2575 struct request_queue *q; 2576 struct btrfs_trans_handle *trans; 2577 struct btrfs_device *device; 2578 struct block_device *bdev; 2579 struct super_block *sb = fs_info->sb; 2580 struct rcu_string *name; 2581 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2582 u64 orig_super_total_bytes; 2583 u64 orig_super_num_devices; 2584 int seeding_dev = 0; 2585 int ret = 0; 2586 bool locked = false; 2587 2588 if (sb_rdonly(sb) && !fs_devices->seeding) 2589 return -EROFS; 2590 2591 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2592 fs_info->bdev_holder); 2593 if (IS_ERR(bdev)) 2594 return PTR_ERR(bdev); 2595 2596 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2597 ret = -EINVAL; 2598 goto error; 2599 } 2600 2601 if (fs_devices->seeding) { 2602 seeding_dev = 1; 2603 down_write(&sb->s_umount); 2604 mutex_lock(&uuid_mutex); 2605 locked = true; 2606 } 2607 2608 sync_blockdev(bdev); 2609 2610 rcu_read_lock(); 2611 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2612 if (device->bdev == bdev) { 2613 ret = -EEXIST; 2614 rcu_read_unlock(); 2615 goto error; 2616 } 2617 } 2618 rcu_read_unlock(); 2619 2620 device = btrfs_alloc_device(fs_info, NULL, NULL); 2621 if (IS_ERR(device)) { 2622 /* we can safely leave the fs_devices entry around */ 2623 ret = PTR_ERR(device); 2624 goto error; 2625 } 2626 2627 name = rcu_string_strdup(device_path, GFP_KERNEL); 2628 if (!name) { 2629 ret = -ENOMEM; 2630 goto error_free_device; 2631 } 2632 rcu_assign_pointer(device->name, name); 2633 2634 device->fs_info = fs_info; 2635 device->bdev = bdev; 2636 2637 ret = btrfs_get_dev_zone_info(device); 2638 if (ret) 2639 goto error_free_device; 2640 2641 trans = btrfs_start_transaction(root, 0); 2642 if (IS_ERR(trans)) { 2643 ret = PTR_ERR(trans); 2644 goto error_free_zone; 2645 } 2646 2647 q = bdev_get_queue(bdev); 2648 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2649 device->generation = trans->transid; 2650 device->io_width = fs_info->sectorsize; 2651 device->io_align = fs_info->sectorsize; 2652 device->sector_size = fs_info->sectorsize; 2653 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2654 fs_info->sectorsize); 2655 device->disk_total_bytes = device->total_bytes; 2656 device->commit_total_bytes = device->total_bytes; 2657 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2658 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2659 device->mode = FMODE_EXCL; 2660 device->dev_stats_valid = 1; 2661 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2662 2663 if (seeding_dev) { 2664 btrfs_clear_sb_rdonly(sb); 2665 ret = btrfs_prepare_sprout(fs_info); 2666 if (ret) { 2667 btrfs_abort_transaction(trans, ret); 2668 goto error_trans; 2669 } 2670 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2671 device); 2672 } 2673 2674 device->fs_devices = fs_devices; 2675 2676 mutex_lock(&fs_devices->device_list_mutex); 2677 mutex_lock(&fs_info->chunk_mutex); 2678 list_add_rcu(&device->dev_list, &fs_devices->devices); 2679 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2680 fs_devices->num_devices++; 2681 fs_devices->open_devices++; 2682 fs_devices->rw_devices++; 2683 fs_devices->total_devices++; 2684 fs_devices->total_rw_bytes += device->total_bytes; 2685 2686 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2687 2688 if (!blk_queue_nonrot(q)) 2689 fs_devices->rotating = true; 2690 2691 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2692 btrfs_set_super_total_bytes(fs_info->super_copy, 2693 round_down(orig_super_total_bytes + device->total_bytes, 2694 fs_info->sectorsize)); 2695 2696 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2697 btrfs_set_super_num_devices(fs_info->super_copy, 2698 orig_super_num_devices + 1); 2699 2700 /* 2701 * we've got more storage, clear any full flags on the space 2702 * infos 2703 */ 2704 btrfs_clear_space_info_full(fs_info); 2705 2706 mutex_unlock(&fs_info->chunk_mutex); 2707 2708 /* Add sysfs device entry */ 2709 btrfs_sysfs_add_device(device); 2710 2711 mutex_unlock(&fs_devices->device_list_mutex); 2712 2713 if (seeding_dev) { 2714 mutex_lock(&fs_info->chunk_mutex); 2715 ret = init_first_rw_device(trans); 2716 mutex_unlock(&fs_info->chunk_mutex); 2717 if (ret) { 2718 btrfs_abort_transaction(trans, ret); 2719 goto error_sysfs; 2720 } 2721 } 2722 2723 ret = btrfs_add_dev_item(trans, device); 2724 if (ret) { 2725 btrfs_abort_transaction(trans, ret); 2726 goto error_sysfs; 2727 } 2728 2729 if (seeding_dev) { 2730 ret = btrfs_finish_sprout(trans); 2731 if (ret) { 2732 btrfs_abort_transaction(trans, ret); 2733 goto error_sysfs; 2734 } 2735 2736 /* 2737 * fs_devices now represents the newly sprouted filesystem and 2738 * its fsid has been changed by btrfs_prepare_sprout 2739 */ 2740 btrfs_sysfs_update_sprout_fsid(fs_devices); 2741 } 2742 2743 ret = btrfs_commit_transaction(trans); 2744 2745 if (seeding_dev) { 2746 mutex_unlock(&uuid_mutex); 2747 up_write(&sb->s_umount); 2748 locked = false; 2749 2750 if (ret) /* transaction commit */ 2751 return ret; 2752 2753 ret = btrfs_relocate_sys_chunks(fs_info); 2754 if (ret < 0) 2755 btrfs_handle_fs_error(fs_info, ret, 2756 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2757 trans = btrfs_attach_transaction(root); 2758 if (IS_ERR(trans)) { 2759 if (PTR_ERR(trans) == -ENOENT) 2760 return 0; 2761 ret = PTR_ERR(trans); 2762 trans = NULL; 2763 goto error_sysfs; 2764 } 2765 ret = btrfs_commit_transaction(trans); 2766 } 2767 2768 /* 2769 * Now that we have written a new super block to this device, check all 2770 * other fs_devices list if device_path alienates any other scanned 2771 * device. 2772 * We can ignore the return value as it typically returns -EINVAL and 2773 * only succeeds if the device was an alien. 2774 */ 2775 btrfs_forget_devices(device_path); 2776 2777 /* Update ctime/mtime for blkid or udev */ 2778 update_dev_time(bdev); 2779 2780 return ret; 2781 2782 error_sysfs: 2783 btrfs_sysfs_remove_device(device); 2784 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2785 mutex_lock(&fs_info->chunk_mutex); 2786 list_del_rcu(&device->dev_list); 2787 list_del(&device->dev_alloc_list); 2788 fs_info->fs_devices->num_devices--; 2789 fs_info->fs_devices->open_devices--; 2790 fs_info->fs_devices->rw_devices--; 2791 fs_info->fs_devices->total_devices--; 2792 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2793 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2794 btrfs_set_super_total_bytes(fs_info->super_copy, 2795 orig_super_total_bytes); 2796 btrfs_set_super_num_devices(fs_info->super_copy, 2797 orig_super_num_devices); 2798 mutex_unlock(&fs_info->chunk_mutex); 2799 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2800 error_trans: 2801 if (seeding_dev) 2802 btrfs_set_sb_rdonly(sb); 2803 if (trans) 2804 btrfs_end_transaction(trans); 2805 error_free_zone: 2806 btrfs_destroy_dev_zone_info(device); 2807 error_free_device: 2808 btrfs_free_device(device); 2809 error: 2810 blkdev_put(bdev, FMODE_EXCL); 2811 if (locked) { 2812 mutex_unlock(&uuid_mutex); 2813 up_write(&sb->s_umount); 2814 } 2815 return ret; 2816 } 2817 2818 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2819 struct btrfs_device *device) 2820 { 2821 int ret; 2822 struct btrfs_path *path; 2823 struct btrfs_root *root = device->fs_info->chunk_root; 2824 struct btrfs_dev_item *dev_item; 2825 struct extent_buffer *leaf; 2826 struct btrfs_key key; 2827 2828 path = btrfs_alloc_path(); 2829 if (!path) 2830 return -ENOMEM; 2831 2832 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2833 key.type = BTRFS_DEV_ITEM_KEY; 2834 key.offset = device->devid; 2835 2836 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2837 if (ret < 0) 2838 goto out; 2839 2840 if (ret > 0) { 2841 ret = -ENOENT; 2842 goto out; 2843 } 2844 2845 leaf = path->nodes[0]; 2846 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2847 2848 btrfs_set_device_id(leaf, dev_item, device->devid); 2849 btrfs_set_device_type(leaf, dev_item, device->type); 2850 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2851 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2852 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2853 btrfs_set_device_total_bytes(leaf, dev_item, 2854 btrfs_device_get_disk_total_bytes(device)); 2855 btrfs_set_device_bytes_used(leaf, dev_item, 2856 btrfs_device_get_bytes_used(device)); 2857 btrfs_mark_buffer_dirty(leaf); 2858 2859 out: 2860 btrfs_free_path(path); 2861 return ret; 2862 } 2863 2864 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2865 struct btrfs_device *device, u64 new_size) 2866 { 2867 struct btrfs_fs_info *fs_info = device->fs_info; 2868 struct btrfs_super_block *super_copy = fs_info->super_copy; 2869 u64 old_total; 2870 u64 diff; 2871 int ret; 2872 2873 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2874 return -EACCES; 2875 2876 new_size = round_down(new_size, fs_info->sectorsize); 2877 2878 mutex_lock(&fs_info->chunk_mutex); 2879 old_total = btrfs_super_total_bytes(super_copy); 2880 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2881 2882 if (new_size <= device->total_bytes || 2883 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2884 mutex_unlock(&fs_info->chunk_mutex); 2885 return -EINVAL; 2886 } 2887 2888 btrfs_set_super_total_bytes(super_copy, 2889 round_down(old_total + diff, fs_info->sectorsize)); 2890 device->fs_devices->total_rw_bytes += diff; 2891 2892 btrfs_device_set_total_bytes(device, new_size); 2893 btrfs_device_set_disk_total_bytes(device, new_size); 2894 btrfs_clear_space_info_full(device->fs_info); 2895 if (list_empty(&device->post_commit_list)) 2896 list_add_tail(&device->post_commit_list, 2897 &trans->transaction->dev_update_list); 2898 mutex_unlock(&fs_info->chunk_mutex); 2899 2900 btrfs_reserve_chunk_metadata(trans, false); 2901 ret = btrfs_update_device(trans, device); 2902 btrfs_trans_release_chunk_metadata(trans); 2903 2904 return ret; 2905 } 2906 2907 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2908 { 2909 struct btrfs_fs_info *fs_info = trans->fs_info; 2910 struct btrfs_root *root = fs_info->chunk_root; 2911 int ret; 2912 struct btrfs_path *path; 2913 struct btrfs_key key; 2914 2915 path = btrfs_alloc_path(); 2916 if (!path) 2917 return -ENOMEM; 2918 2919 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2920 key.offset = chunk_offset; 2921 key.type = BTRFS_CHUNK_ITEM_KEY; 2922 2923 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2924 if (ret < 0) 2925 goto out; 2926 else if (ret > 0) { /* Logic error or corruption */ 2927 btrfs_handle_fs_error(fs_info, -ENOENT, 2928 "Failed lookup while freeing chunk."); 2929 ret = -ENOENT; 2930 goto out; 2931 } 2932 2933 ret = btrfs_del_item(trans, root, path); 2934 if (ret < 0) 2935 btrfs_handle_fs_error(fs_info, ret, 2936 "Failed to delete chunk item."); 2937 out: 2938 btrfs_free_path(path); 2939 return ret; 2940 } 2941 2942 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2943 { 2944 struct btrfs_super_block *super_copy = fs_info->super_copy; 2945 struct btrfs_disk_key *disk_key; 2946 struct btrfs_chunk *chunk; 2947 u8 *ptr; 2948 int ret = 0; 2949 u32 num_stripes; 2950 u32 array_size; 2951 u32 len = 0; 2952 u32 cur; 2953 struct btrfs_key key; 2954 2955 lockdep_assert_held(&fs_info->chunk_mutex); 2956 array_size = btrfs_super_sys_array_size(super_copy); 2957 2958 ptr = super_copy->sys_chunk_array; 2959 cur = 0; 2960 2961 while (cur < array_size) { 2962 disk_key = (struct btrfs_disk_key *)ptr; 2963 btrfs_disk_key_to_cpu(&key, disk_key); 2964 2965 len = sizeof(*disk_key); 2966 2967 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2968 chunk = (struct btrfs_chunk *)(ptr + len); 2969 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2970 len += btrfs_chunk_item_size(num_stripes); 2971 } else { 2972 ret = -EIO; 2973 break; 2974 } 2975 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2976 key.offset == chunk_offset) { 2977 memmove(ptr, ptr + len, array_size - (cur + len)); 2978 array_size -= len; 2979 btrfs_set_super_sys_array_size(super_copy, array_size); 2980 } else { 2981 ptr += len; 2982 cur += len; 2983 } 2984 } 2985 return ret; 2986 } 2987 2988 /* 2989 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2990 * @logical: Logical block offset in bytes. 2991 * @length: Length of extent in bytes. 2992 * 2993 * Return: Chunk mapping or ERR_PTR. 2994 */ 2995 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 2996 u64 logical, u64 length) 2997 { 2998 struct extent_map_tree *em_tree; 2999 struct extent_map *em; 3000 3001 em_tree = &fs_info->mapping_tree; 3002 read_lock(&em_tree->lock); 3003 em = lookup_extent_mapping(em_tree, logical, length); 3004 read_unlock(&em_tree->lock); 3005 3006 if (!em) { 3007 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3008 logical, length); 3009 return ERR_PTR(-EINVAL); 3010 } 3011 3012 if (em->start > logical || em->start + em->len < logical) { 3013 btrfs_crit(fs_info, 3014 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3015 logical, length, em->start, em->start + em->len); 3016 free_extent_map(em); 3017 return ERR_PTR(-EINVAL); 3018 } 3019 3020 /* callers are responsible for dropping em's ref. */ 3021 return em; 3022 } 3023 3024 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3025 struct map_lookup *map, u64 chunk_offset) 3026 { 3027 int i; 3028 3029 /* 3030 * Removing chunk items and updating the device items in the chunks btree 3031 * requires holding the chunk_mutex. 3032 * See the comment at btrfs_chunk_alloc() for the details. 3033 */ 3034 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3035 3036 for (i = 0; i < map->num_stripes; i++) { 3037 int ret; 3038 3039 ret = btrfs_update_device(trans, map->stripes[i].dev); 3040 if (ret) 3041 return ret; 3042 } 3043 3044 return btrfs_free_chunk(trans, chunk_offset); 3045 } 3046 3047 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3048 { 3049 struct btrfs_fs_info *fs_info = trans->fs_info; 3050 struct extent_map *em; 3051 struct map_lookup *map; 3052 u64 dev_extent_len = 0; 3053 int i, ret = 0; 3054 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3055 3056 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3057 if (IS_ERR(em)) { 3058 /* 3059 * This is a logic error, but we don't want to just rely on the 3060 * user having built with ASSERT enabled, so if ASSERT doesn't 3061 * do anything we still error out. 3062 */ 3063 ASSERT(0); 3064 return PTR_ERR(em); 3065 } 3066 map = em->map_lookup; 3067 3068 /* 3069 * First delete the device extent items from the devices btree. 3070 * We take the device_list_mutex to avoid racing with the finishing phase 3071 * of a device replace operation. See the comment below before acquiring 3072 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3073 * because that can result in a deadlock when deleting the device extent 3074 * items from the devices btree - COWing an extent buffer from the btree 3075 * may result in allocating a new metadata chunk, which would attempt to 3076 * lock again fs_info->chunk_mutex. 3077 */ 3078 mutex_lock(&fs_devices->device_list_mutex); 3079 for (i = 0; i < map->num_stripes; i++) { 3080 struct btrfs_device *device = map->stripes[i].dev; 3081 ret = btrfs_free_dev_extent(trans, device, 3082 map->stripes[i].physical, 3083 &dev_extent_len); 3084 if (ret) { 3085 mutex_unlock(&fs_devices->device_list_mutex); 3086 btrfs_abort_transaction(trans, ret); 3087 goto out; 3088 } 3089 3090 if (device->bytes_used > 0) { 3091 mutex_lock(&fs_info->chunk_mutex); 3092 btrfs_device_set_bytes_used(device, 3093 device->bytes_used - dev_extent_len); 3094 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3095 btrfs_clear_space_info_full(fs_info); 3096 mutex_unlock(&fs_info->chunk_mutex); 3097 } 3098 } 3099 mutex_unlock(&fs_devices->device_list_mutex); 3100 3101 /* 3102 * We acquire fs_info->chunk_mutex for 2 reasons: 3103 * 3104 * 1) Just like with the first phase of the chunk allocation, we must 3105 * reserve system space, do all chunk btree updates and deletions, and 3106 * update the system chunk array in the superblock while holding this 3107 * mutex. This is for similar reasons as explained on the comment at 3108 * the top of btrfs_chunk_alloc(); 3109 * 3110 * 2) Prevent races with the final phase of a device replace operation 3111 * that replaces the device object associated with the map's stripes, 3112 * because the device object's id can change at any time during that 3113 * final phase of the device replace operation 3114 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3115 * replaced device and then see it with an ID of 3116 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3117 * the device item, which does not exists on the chunk btree. 3118 * The finishing phase of device replace acquires both the 3119 * device_list_mutex and the chunk_mutex, in that order, so we are 3120 * safe by just acquiring the chunk_mutex. 3121 */ 3122 trans->removing_chunk = true; 3123 mutex_lock(&fs_info->chunk_mutex); 3124 3125 check_system_chunk(trans, map->type); 3126 3127 ret = remove_chunk_item(trans, map, chunk_offset); 3128 /* 3129 * Normally we should not get -ENOSPC since we reserved space before 3130 * through the call to check_system_chunk(). 3131 * 3132 * Despite our system space_info having enough free space, we may not 3133 * be able to allocate extents from its block groups, because all have 3134 * an incompatible profile, which will force us to allocate a new system 3135 * block group with the right profile, or right after we called 3136 * check_system_space() above, a scrub turned the only system block group 3137 * with enough free space into RO mode. 3138 * This is explained with more detail at do_chunk_alloc(). 3139 * 3140 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3141 */ 3142 if (ret == -ENOSPC) { 3143 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3144 struct btrfs_block_group *sys_bg; 3145 3146 sys_bg = btrfs_create_chunk(trans, sys_flags); 3147 if (IS_ERR(sys_bg)) { 3148 ret = PTR_ERR(sys_bg); 3149 btrfs_abort_transaction(trans, ret); 3150 goto out; 3151 } 3152 3153 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3154 if (ret) { 3155 btrfs_abort_transaction(trans, ret); 3156 goto out; 3157 } 3158 3159 ret = remove_chunk_item(trans, map, chunk_offset); 3160 if (ret) { 3161 btrfs_abort_transaction(trans, ret); 3162 goto out; 3163 } 3164 } else if (ret) { 3165 btrfs_abort_transaction(trans, ret); 3166 goto out; 3167 } 3168 3169 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3170 3171 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3172 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3173 if (ret) { 3174 btrfs_abort_transaction(trans, ret); 3175 goto out; 3176 } 3177 } 3178 3179 mutex_unlock(&fs_info->chunk_mutex); 3180 trans->removing_chunk = false; 3181 3182 /* 3183 * We are done with chunk btree updates and deletions, so release the 3184 * system space we previously reserved (with check_system_chunk()). 3185 */ 3186 btrfs_trans_release_chunk_metadata(trans); 3187 3188 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3189 if (ret) { 3190 btrfs_abort_transaction(trans, ret); 3191 goto out; 3192 } 3193 3194 out: 3195 if (trans->removing_chunk) { 3196 mutex_unlock(&fs_info->chunk_mutex); 3197 trans->removing_chunk = false; 3198 } 3199 /* once for us */ 3200 free_extent_map(em); 3201 return ret; 3202 } 3203 3204 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3205 { 3206 struct btrfs_root *root = fs_info->chunk_root; 3207 struct btrfs_trans_handle *trans; 3208 struct btrfs_block_group *block_group; 3209 u64 length; 3210 int ret; 3211 3212 /* 3213 * Prevent races with automatic removal of unused block groups. 3214 * After we relocate and before we remove the chunk with offset 3215 * chunk_offset, automatic removal of the block group can kick in, 3216 * resulting in a failure when calling btrfs_remove_chunk() below. 3217 * 3218 * Make sure to acquire this mutex before doing a tree search (dev 3219 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3220 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3221 * we release the path used to search the chunk/dev tree and before 3222 * the current task acquires this mutex and calls us. 3223 */ 3224 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3225 3226 /* step one, relocate all the extents inside this chunk */ 3227 btrfs_scrub_pause(fs_info); 3228 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3229 btrfs_scrub_continue(fs_info); 3230 if (ret) 3231 return ret; 3232 3233 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3234 if (!block_group) 3235 return -ENOENT; 3236 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3237 length = block_group->length; 3238 btrfs_put_block_group(block_group); 3239 3240 /* 3241 * On a zoned file system, discard the whole block group, this will 3242 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3243 * resetting the zone fails, don't treat it as a fatal problem from the 3244 * filesystem's point of view. 3245 */ 3246 if (btrfs_is_zoned(fs_info)) { 3247 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3248 if (ret) 3249 btrfs_info(fs_info, 3250 "failed to reset zone %llu after relocation", 3251 chunk_offset); 3252 } 3253 3254 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3255 chunk_offset); 3256 if (IS_ERR(trans)) { 3257 ret = PTR_ERR(trans); 3258 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3259 return ret; 3260 } 3261 3262 /* 3263 * step two, delete the device extents and the 3264 * chunk tree entries 3265 */ 3266 ret = btrfs_remove_chunk(trans, chunk_offset); 3267 btrfs_end_transaction(trans); 3268 return ret; 3269 } 3270 3271 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3272 { 3273 struct btrfs_root *chunk_root = fs_info->chunk_root; 3274 struct btrfs_path *path; 3275 struct extent_buffer *leaf; 3276 struct btrfs_chunk *chunk; 3277 struct btrfs_key key; 3278 struct btrfs_key found_key; 3279 u64 chunk_type; 3280 bool retried = false; 3281 int failed = 0; 3282 int ret; 3283 3284 path = btrfs_alloc_path(); 3285 if (!path) 3286 return -ENOMEM; 3287 3288 again: 3289 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3290 key.offset = (u64)-1; 3291 key.type = BTRFS_CHUNK_ITEM_KEY; 3292 3293 while (1) { 3294 mutex_lock(&fs_info->reclaim_bgs_lock); 3295 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3296 if (ret < 0) { 3297 mutex_unlock(&fs_info->reclaim_bgs_lock); 3298 goto error; 3299 } 3300 BUG_ON(ret == 0); /* Corruption */ 3301 3302 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3303 key.type); 3304 if (ret) 3305 mutex_unlock(&fs_info->reclaim_bgs_lock); 3306 if (ret < 0) 3307 goto error; 3308 if (ret > 0) 3309 break; 3310 3311 leaf = path->nodes[0]; 3312 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3313 3314 chunk = btrfs_item_ptr(leaf, path->slots[0], 3315 struct btrfs_chunk); 3316 chunk_type = btrfs_chunk_type(leaf, chunk); 3317 btrfs_release_path(path); 3318 3319 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3320 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3321 if (ret == -ENOSPC) 3322 failed++; 3323 else 3324 BUG_ON(ret); 3325 } 3326 mutex_unlock(&fs_info->reclaim_bgs_lock); 3327 3328 if (found_key.offset == 0) 3329 break; 3330 key.offset = found_key.offset - 1; 3331 } 3332 ret = 0; 3333 if (failed && !retried) { 3334 failed = 0; 3335 retried = true; 3336 goto again; 3337 } else if (WARN_ON(failed && retried)) { 3338 ret = -ENOSPC; 3339 } 3340 error: 3341 btrfs_free_path(path); 3342 return ret; 3343 } 3344 3345 /* 3346 * return 1 : allocate a data chunk successfully, 3347 * return <0: errors during allocating a data chunk, 3348 * return 0 : no need to allocate a data chunk. 3349 */ 3350 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3351 u64 chunk_offset) 3352 { 3353 struct btrfs_block_group *cache; 3354 u64 bytes_used; 3355 u64 chunk_type; 3356 3357 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3358 ASSERT(cache); 3359 chunk_type = cache->flags; 3360 btrfs_put_block_group(cache); 3361 3362 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3363 return 0; 3364 3365 spin_lock(&fs_info->data_sinfo->lock); 3366 bytes_used = fs_info->data_sinfo->bytes_used; 3367 spin_unlock(&fs_info->data_sinfo->lock); 3368 3369 if (!bytes_used) { 3370 struct btrfs_trans_handle *trans; 3371 int ret; 3372 3373 trans = btrfs_join_transaction(fs_info->tree_root); 3374 if (IS_ERR(trans)) 3375 return PTR_ERR(trans); 3376 3377 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3378 btrfs_end_transaction(trans); 3379 if (ret < 0) 3380 return ret; 3381 return 1; 3382 } 3383 3384 return 0; 3385 } 3386 3387 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3388 struct btrfs_balance_control *bctl) 3389 { 3390 struct btrfs_root *root = fs_info->tree_root; 3391 struct btrfs_trans_handle *trans; 3392 struct btrfs_balance_item *item; 3393 struct btrfs_disk_balance_args disk_bargs; 3394 struct btrfs_path *path; 3395 struct extent_buffer *leaf; 3396 struct btrfs_key key; 3397 int ret, err; 3398 3399 path = btrfs_alloc_path(); 3400 if (!path) 3401 return -ENOMEM; 3402 3403 trans = btrfs_start_transaction(root, 0); 3404 if (IS_ERR(trans)) { 3405 btrfs_free_path(path); 3406 return PTR_ERR(trans); 3407 } 3408 3409 key.objectid = BTRFS_BALANCE_OBJECTID; 3410 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3411 key.offset = 0; 3412 3413 ret = btrfs_insert_empty_item(trans, root, path, &key, 3414 sizeof(*item)); 3415 if (ret) 3416 goto out; 3417 3418 leaf = path->nodes[0]; 3419 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3420 3421 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3422 3423 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3424 btrfs_set_balance_data(leaf, item, &disk_bargs); 3425 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3426 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3427 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3428 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3429 3430 btrfs_set_balance_flags(leaf, item, bctl->flags); 3431 3432 btrfs_mark_buffer_dirty(leaf); 3433 out: 3434 btrfs_free_path(path); 3435 err = btrfs_commit_transaction(trans); 3436 if (err && !ret) 3437 ret = err; 3438 return ret; 3439 } 3440 3441 static int del_balance_item(struct btrfs_fs_info *fs_info) 3442 { 3443 struct btrfs_root *root = fs_info->tree_root; 3444 struct btrfs_trans_handle *trans; 3445 struct btrfs_path *path; 3446 struct btrfs_key key; 3447 int ret, err; 3448 3449 path = btrfs_alloc_path(); 3450 if (!path) 3451 return -ENOMEM; 3452 3453 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3454 if (IS_ERR(trans)) { 3455 btrfs_free_path(path); 3456 return PTR_ERR(trans); 3457 } 3458 3459 key.objectid = BTRFS_BALANCE_OBJECTID; 3460 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3461 key.offset = 0; 3462 3463 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3464 if (ret < 0) 3465 goto out; 3466 if (ret > 0) { 3467 ret = -ENOENT; 3468 goto out; 3469 } 3470 3471 ret = btrfs_del_item(trans, root, path); 3472 out: 3473 btrfs_free_path(path); 3474 err = btrfs_commit_transaction(trans); 3475 if (err && !ret) 3476 ret = err; 3477 return ret; 3478 } 3479 3480 /* 3481 * This is a heuristic used to reduce the number of chunks balanced on 3482 * resume after balance was interrupted. 3483 */ 3484 static void update_balance_args(struct btrfs_balance_control *bctl) 3485 { 3486 /* 3487 * Turn on soft mode for chunk types that were being converted. 3488 */ 3489 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3490 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3491 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3492 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3493 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3494 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3495 3496 /* 3497 * Turn on usage filter if is not already used. The idea is 3498 * that chunks that we have already balanced should be 3499 * reasonably full. Don't do it for chunks that are being 3500 * converted - that will keep us from relocating unconverted 3501 * (albeit full) chunks. 3502 */ 3503 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3504 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3505 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3506 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3507 bctl->data.usage = 90; 3508 } 3509 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3510 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3511 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3512 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3513 bctl->sys.usage = 90; 3514 } 3515 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3516 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3517 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3518 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3519 bctl->meta.usage = 90; 3520 } 3521 } 3522 3523 /* 3524 * Clear the balance status in fs_info and delete the balance item from disk. 3525 */ 3526 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3527 { 3528 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3529 int ret; 3530 3531 BUG_ON(!fs_info->balance_ctl); 3532 3533 spin_lock(&fs_info->balance_lock); 3534 fs_info->balance_ctl = NULL; 3535 spin_unlock(&fs_info->balance_lock); 3536 3537 kfree(bctl); 3538 ret = del_balance_item(fs_info); 3539 if (ret) 3540 btrfs_handle_fs_error(fs_info, ret, NULL); 3541 } 3542 3543 /* 3544 * Balance filters. Return 1 if chunk should be filtered out 3545 * (should not be balanced). 3546 */ 3547 static int chunk_profiles_filter(u64 chunk_type, 3548 struct btrfs_balance_args *bargs) 3549 { 3550 chunk_type = chunk_to_extended(chunk_type) & 3551 BTRFS_EXTENDED_PROFILE_MASK; 3552 3553 if (bargs->profiles & chunk_type) 3554 return 0; 3555 3556 return 1; 3557 } 3558 3559 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3560 struct btrfs_balance_args *bargs) 3561 { 3562 struct btrfs_block_group *cache; 3563 u64 chunk_used; 3564 u64 user_thresh_min; 3565 u64 user_thresh_max; 3566 int ret = 1; 3567 3568 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3569 chunk_used = cache->used; 3570 3571 if (bargs->usage_min == 0) 3572 user_thresh_min = 0; 3573 else 3574 user_thresh_min = div_factor_fine(cache->length, 3575 bargs->usage_min); 3576 3577 if (bargs->usage_max == 0) 3578 user_thresh_max = 1; 3579 else if (bargs->usage_max > 100) 3580 user_thresh_max = cache->length; 3581 else 3582 user_thresh_max = div_factor_fine(cache->length, 3583 bargs->usage_max); 3584 3585 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3586 ret = 0; 3587 3588 btrfs_put_block_group(cache); 3589 return ret; 3590 } 3591 3592 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3593 u64 chunk_offset, struct btrfs_balance_args *bargs) 3594 { 3595 struct btrfs_block_group *cache; 3596 u64 chunk_used, user_thresh; 3597 int ret = 1; 3598 3599 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3600 chunk_used = cache->used; 3601 3602 if (bargs->usage_min == 0) 3603 user_thresh = 1; 3604 else if (bargs->usage > 100) 3605 user_thresh = cache->length; 3606 else 3607 user_thresh = div_factor_fine(cache->length, bargs->usage); 3608 3609 if (chunk_used < user_thresh) 3610 ret = 0; 3611 3612 btrfs_put_block_group(cache); 3613 return ret; 3614 } 3615 3616 static int chunk_devid_filter(struct extent_buffer *leaf, 3617 struct btrfs_chunk *chunk, 3618 struct btrfs_balance_args *bargs) 3619 { 3620 struct btrfs_stripe *stripe; 3621 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3622 int i; 3623 3624 for (i = 0; i < num_stripes; i++) { 3625 stripe = btrfs_stripe_nr(chunk, i); 3626 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3627 return 0; 3628 } 3629 3630 return 1; 3631 } 3632 3633 static u64 calc_data_stripes(u64 type, int num_stripes) 3634 { 3635 const int index = btrfs_bg_flags_to_raid_index(type); 3636 const int ncopies = btrfs_raid_array[index].ncopies; 3637 const int nparity = btrfs_raid_array[index].nparity; 3638 3639 return (num_stripes - nparity) / ncopies; 3640 } 3641 3642 /* [pstart, pend) */ 3643 static int chunk_drange_filter(struct extent_buffer *leaf, 3644 struct btrfs_chunk *chunk, 3645 struct btrfs_balance_args *bargs) 3646 { 3647 struct btrfs_stripe *stripe; 3648 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3649 u64 stripe_offset; 3650 u64 stripe_length; 3651 u64 type; 3652 int factor; 3653 int i; 3654 3655 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3656 return 0; 3657 3658 type = btrfs_chunk_type(leaf, chunk); 3659 factor = calc_data_stripes(type, num_stripes); 3660 3661 for (i = 0; i < num_stripes; i++) { 3662 stripe = btrfs_stripe_nr(chunk, i); 3663 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3664 continue; 3665 3666 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3667 stripe_length = btrfs_chunk_length(leaf, chunk); 3668 stripe_length = div_u64(stripe_length, factor); 3669 3670 if (stripe_offset < bargs->pend && 3671 stripe_offset + stripe_length > bargs->pstart) 3672 return 0; 3673 } 3674 3675 return 1; 3676 } 3677 3678 /* [vstart, vend) */ 3679 static int chunk_vrange_filter(struct extent_buffer *leaf, 3680 struct btrfs_chunk *chunk, 3681 u64 chunk_offset, 3682 struct btrfs_balance_args *bargs) 3683 { 3684 if (chunk_offset < bargs->vend && 3685 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3686 /* at least part of the chunk is inside this vrange */ 3687 return 0; 3688 3689 return 1; 3690 } 3691 3692 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3693 struct btrfs_chunk *chunk, 3694 struct btrfs_balance_args *bargs) 3695 { 3696 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3697 3698 if (bargs->stripes_min <= num_stripes 3699 && num_stripes <= bargs->stripes_max) 3700 return 0; 3701 3702 return 1; 3703 } 3704 3705 static int chunk_soft_convert_filter(u64 chunk_type, 3706 struct btrfs_balance_args *bargs) 3707 { 3708 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3709 return 0; 3710 3711 chunk_type = chunk_to_extended(chunk_type) & 3712 BTRFS_EXTENDED_PROFILE_MASK; 3713 3714 if (bargs->target == chunk_type) 3715 return 1; 3716 3717 return 0; 3718 } 3719 3720 static int should_balance_chunk(struct extent_buffer *leaf, 3721 struct btrfs_chunk *chunk, u64 chunk_offset) 3722 { 3723 struct btrfs_fs_info *fs_info = leaf->fs_info; 3724 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3725 struct btrfs_balance_args *bargs = NULL; 3726 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3727 3728 /* type filter */ 3729 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3730 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3731 return 0; 3732 } 3733 3734 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3735 bargs = &bctl->data; 3736 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3737 bargs = &bctl->sys; 3738 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3739 bargs = &bctl->meta; 3740 3741 /* profiles filter */ 3742 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3743 chunk_profiles_filter(chunk_type, bargs)) { 3744 return 0; 3745 } 3746 3747 /* usage filter */ 3748 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3749 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3750 return 0; 3751 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3752 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3753 return 0; 3754 } 3755 3756 /* devid filter */ 3757 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3758 chunk_devid_filter(leaf, chunk, bargs)) { 3759 return 0; 3760 } 3761 3762 /* drange filter, makes sense only with devid filter */ 3763 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3764 chunk_drange_filter(leaf, chunk, bargs)) { 3765 return 0; 3766 } 3767 3768 /* vrange filter */ 3769 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3770 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3771 return 0; 3772 } 3773 3774 /* stripes filter */ 3775 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3776 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3777 return 0; 3778 } 3779 3780 /* soft profile changing mode */ 3781 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3782 chunk_soft_convert_filter(chunk_type, bargs)) { 3783 return 0; 3784 } 3785 3786 /* 3787 * limited by count, must be the last filter 3788 */ 3789 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3790 if (bargs->limit == 0) 3791 return 0; 3792 else 3793 bargs->limit--; 3794 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3795 /* 3796 * Same logic as the 'limit' filter; the minimum cannot be 3797 * determined here because we do not have the global information 3798 * about the count of all chunks that satisfy the filters. 3799 */ 3800 if (bargs->limit_max == 0) 3801 return 0; 3802 else 3803 bargs->limit_max--; 3804 } 3805 3806 return 1; 3807 } 3808 3809 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3810 { 3811 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3812 struct btrfs_root *chunk_root = fs_info->chunk_root; 3813 u64 chunk_type; 3814 struct btrfs_chunk *chunk; 3815 struct btrfs_path *path = NULL; 3816 struct btrfs_key key; 3817 struct btrfs_key found_key; 3818 struct extent_buffer *leaf; 3819 int slot; 3820 int ret; 3821 int enospc_errors = 0; 3822 bool counting = true; 3823 /* The single value limit and min/max limits use the same bytes in the */ 3824 u64 limit_data = bctl->data.limit; 3825 u64 limit_meta = bctl->meta.limit; 3826 u64 limit_sys = bctl->sys.limit; 3827 u32 count_data = 0; 3828 u32 count_meta = 0; 3829 u32 count_sys = 0; 3830 int chunk_reserved = 0; 3831 3832 path = btrfs_alloc_path(); 3833 if (!path) { 3834 ret = -ENOMEM; 3835 goto error; 3836 } 3837 3838 /* zero out stat counters */ 3839 spin_lock(&fs_info->balance_lock); 3840 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3841 spin_unlock(&fs_info->balance_lock); 3842 again: 3843 if (!counting) { 3844 /* 3845 * The single value limit and min/max limits use the same bytes 3846 * in the 3847 */ 3848 bctl->data.limit = limit_data; 3849 bctl->meta.limit = limit_meta; 3850 bctl->sys.limit = limit_sys; 3851 } 3852 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3853 key.offset = (u64)-1; 3854 key.type = BTRFS_CHUNK_ITEM_KEY; 3855 3856 while (1) { 3857 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3858 atomic_read(&fs_info->balance_cancel_req)) { 3859 ret = -ECANCELED; 3860 goto error; 3861 } 3862 3863 mutex_lock(&fs_info->reclaim_bgs_lock); 3864 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3865 if (ret < 0) { 3866 mutex_unlock(&fs_info->reclaim_bgs_lock); 3867 goto error; 3868 } 3869 3870 /* 3871 * this shouldn't happen, it means the last relocate 3872 * failed 3873 */ 3874 if (ret == 0) 3875 BUG(); /* FIXME break ? */ 3876 3877 ret = btrfs_previous_item(chunk_root, path, 0, 3878 BTRFS_CHUNK_ITEM_KEY); 3879 if (ret) { 3880 mutex_unlock(&fs_info->reclaim_bgs_lock); 3881 ret = 0; 3882 break; 3883 } 3884 3885 leaf = path->nodes[0]; 3886 slot = path->slots[0]; 3887 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3888 3889 if (found_key.objectid != key.objectid) { 3890 mutex_unlock(&fs_info->reclaim_bgs_lock); 3891 break; 3892 } 3893 3894 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3895 chunk_type = btrfs_chunk_type(leaf, chunk); 3896 3897 if (!counting) { 3898 spin_lock(&fs_info->balance_lock); 3899 bctl->stat.considered++; 3900 spin_unlock(&fs_info->balance_lock); 3901 } 3902 3903 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3904 3905 btrfs_release_path(path); 3906 if (!ret) { 3907 mutex_unlock(&fs_info->reclaim_bgs_lock); 3908 goto loop; 3909 } 3910 3911 if (counting) { 3912 mutex_unlock(&fs_info->reclaim_bgs_lock); 3913 spin_lock(&fs_info->balance_lock); 3914 bctl->stat.expected++; 3915 spin_unlock(&fs_info->balance_lock); 3916 3917 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3918 count_data++; 3919 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3920 count_sys++; 3921 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3922 count_meta++; 3923 3924 goto loop; 3925 } 3926 3927 /* 3928 * Apply limit_min filter, no need to check if the LIMITS 3929 * filter is used, limit_min is 0 by default 3930 */ 3931 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3932 count_data < bctl->data.limit_min) 3933 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3934 count_meta < bctl->meta.limit_min) 3935 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3936 count_sys < bctl->sys.limit_min)) { 3937 mutex_unlock(&fs_info->reclaim_bgs_lock); 3938 goto loop; 3939 } 3940 3941 if (!chunk_reserved) { 3942 /* 3943 * We may be relocating the only data chunk we have, 3944 * which could potentially end up with losing data's 3945 * raid profile, so lets allocate an empty one in 3946 * advance. 3947 */ 3948 ret = btrfs_may_alloc_data_chunk(fs_info, 3949 found_key.offset); 3950 if (ret < 0) { 3951 mutex_unlock(&fs_info->reclaim_bgs_lock); 3952 goto error; 3953 } else if (ret == 1) { 3954 chunk_reserved = 1; 3955 } 3956 } 3957 3958 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3959 mutex_unlock(&fs_info->reclaim_bgs_lock); 3960 if (ret == -ENOSPC) { 3961 enospc_errors++; 3962 } else if (ret == -ETXTBSY) { 3963 btrfs_info(fs_info, 3964 "skipping relocation of block group %llu due to active swapfile", 3965 found_key.offset); 3966 ret = 0; 3967 } else if (ret) { 3968 goto error; 3969 } else { 3970 spin_lock(&fs_info->balance_lock); 3971 bctl->stat.completed++; 3972 spin_unlock(&fs_info->balance_lock); 3973 } 3974 loop: 3975 if (found_key.offset == 0) 3976 break; 3977 key.offset = found_key.offset - 1; 3978 } 3979 3980 if (counting) { 3981 btrfs_release_path(path); 3982 counting = false; 3983 goto again; 3984 } 3985 error: 3986 btrfs_free_path(path); 3987 if (enospc_errors) { 3988 btrfs_info(fs_info, "%d enospc errors during balance", 3989 enospc_errors); 3990 if (!ret) 3991 ret = -ENOSPC; 3992 } 3993 3994 return ret; 3995 } 3996 3997 /** 3998 * alloc_profile_is_valid - see if a given profile is valid and reduced 3999 * @flags: profile to validate 4000 * @extended: if true @flags is treated as an extended profile 4001 */ 4002 static int alloc_profile_is_valid(u64 flags, int extended) 4003 { 4004 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4005 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4006 4007 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4008 4009 /* 1) check that all other bits are zeroed */ 4010 if (flags & ~mask) 4011 return 0; 4012 4013 /* 2) see if profile is reduced */ 4014 if (flags == 0) 4015 return !extended; /* "0" is valid for usual profiles */ 4016 4017 return has_single_bit_set(flags); 4018 } 4019 4020 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4021 { 4022 /* cancel requested || normal exit path */ 4023 return atomic_read(&fs_info->balance_cancel_req) || 4024 (atomic_read(&fs_info->balance_pause_req) == 0 && 4025 atomic_read(&fs_info->balance_cancel_req) == 0); 4026 } 4027 4028 /* 4029 * Validate target profile against allowed profiles and return true if it's OK. 4030 * Otherwise print the error message and return false. 4031 */ 4032 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4033 const struct btrfs_balance_args *bargs, 4034 u64 allowed, const char *type) 4035 { 4036 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4037 return true; 4038 4039 if (fs_info->sectorsize < PAGE_SIZE && 4040 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4041 btrfs_err(fs_info, 4042 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4043 fs_info->sectorsize, PAGE_SIZE); 4044 return false; 4045 } 4046 /* Profile is valid and does not have bits outside of the allowed set */ 4047 if (alloc_profile_is_valid(bargs->target, 1) && 4048 (bargs->target & ~allowed) == 0) 4049 return true; 4050 4051 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4052 type, btrfs_bg_type_to_raid_name(bargs->target)); 4053 return false; 4054 } 4055 4056 /* 4057 * Fill @buf with textual description of balance filter flags @bargs, up to 4058 * @size_buf including the terminating null. The output may be trimmed if it 4059 * does not fit into the provided buffer. 4060 */ 4061 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4062 u32 size_buf) 4063 { 4064 int ret; 4065 u32 size_bp = size_buf; 4066 char *bp = buf; 4067 u64 flags = bargs->flags; 4068 char tmp_buf[128] = {'\0'}; 4069 4070 if (!flags) 4071 return; 4072 4073 #define CHECK_APPEND_NOARG(a) \ 4074 do { \ 4075 ret = snprintf(bp, size_bp, (a)); \ 4076 if (ret < 0 || ret >= size_bp) \ 4077 goto out_overflow; \ 4078 size_bp -= ret; \ 4079 bp += ret; \ 4080 } while (0) 4081 4082 #define CHECK_APPEND_1ARG(a, v1) \ 4083 do { \ 4084 ret = snprintf(bp, size_bp, (a), (v1)); \ 4085 if (ret < 0 || ret >= size_bp) \ 4086 goto out_overflow; \ 4087 size_bp -= ret; \ 4088 bp += ret; \ 4089 } while (0) 4090 4091 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4092 do { \ 4093 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4094 if (ret < 0 || ret >= size_bp) \ 4095 goto out_overflow; \ 4096 size_bp -= ret; \ 4097 bp += ret; \ 4098 } while (0) 4099 4100 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4101 CHECK_APPEND_1ARG("convert=%s,", 4102 btrfs_bg_type_to_raid_name(bargs->target)); 4103 4104 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4105 CHECK_APPEND_NOARG("soft,"); 4106 4107 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4108 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4109 sizeof(tmp_buf)); 4110 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4111 } 4112 4113 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4114 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4115 4116 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4117 CHECK_APPEND_2ARG("usage=%u..%u,", 4118 bargs->usage_min, bargs->usage_max); 4119 4120 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4121 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4122 4123 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4124 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4125 bargs->pstart, bargs->pend); 4126 4127 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4128 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4129 bargs->vstart, bargs->vend); 4130 4131 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4132 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4133 4134 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4135 CHECK_APPEND_2ARG("limit=%u..%u,", 4136 bargs->limit_min, bargs->limit_max); 4137 4138 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4139 CHECK_APPEND_2ARG("stripes=%u..%u,", 4140 bargs->stripes_min, bargs->stripes_max); 4141 4142 #undef CHECK_APPEND_2ARG 4143 #undef CHECK_APPEND_1ARG 4144 #undef CHECK_APPEND_NOARG 4145 4146 out_overflow: 4147 4148 if (size_bp < size_buf) 4149 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4150 else 4151 buf[0] = '\0'; 4152 } 4153 4154 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4155 { 4156 u32 size_buf = 1024; 4157 char tmp_buf[192] = {'\0'}; 4158 char *buf; 4159 char *bp; 4160 u32 size_bp = size_buf; 4161 int ret; 4162 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4163 4164 buf = kzalloc(size_buf, GFP_KERNEL); 4165 if (!buf) 4166 return; 4167 4168 bp = buf; 4169 4170 #define CHECK_APPEND_1ARG(a, v1) \ 4171 do { \ 4172 ret = snprintf(bp, size_bp, (a), (v1)); \ 4173 if (ret < 0 || ret >= size_bp) \ 4174 goto out_overflow; \ 4175 size_bp -= ret; \ 4176 bp += ret; \ 4177 } while (0) 4178 4179 if (bctl->flags & BTRFS_BALANCE_FORCE) 4180 CHECK_APPEND_1ARG("%s", "-f "); 4181 4182 if (bctl->flags & BTRFS_BALANCE_DATA) { 4183 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4184 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4185 } 4186 4187 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4188 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4189 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4190 } 4191 4192 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4193 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4194 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4195 } 4196 4197 #undef CHECK_APPEND_1ARG 4198 4199 out_overflow: 4200 4201 if (size_bp < size_buf) 4202 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4203 btrfs_info(fs_info, "balance: %s %s", 4204 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4205 "resume" : "start", buf); 4206 4207 kfree(buf); 4208 } 4209 4210 /* 4211 * Should be called with balance mutexe held 4212 */ 4213 int btrfs_balance(struct btrfs_fs_info *fs_info, 4214 struct btrfs_balance_control *bctl, 4215 struct btrfs_ioctl_balance_args *bargs) 4216 { 4217 u64 meta_target, data_target; 4218 u64 allowed; 4219 int mixed = 0; 4220 int ret; 4221 u64 num_devices; 4222 unsigned seq; 4223 bool reducing_redundancy; 4224 int i; 4225 4226 if (btrfs_fs_closing(fs_info) || 4227 atomic_read(&fs_info->balance_pause_req) || 4228 btrfs_should_cancel_balance(fs_info)) { 4229 ret = -EINVAL; 4230 goto out; 4231 } 4232 4233 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4234 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4235 mixed = 1; 4236 4237 /* 4238 * In case of mixed groups both data and meta should be picked, 4239 * and identical options should be given for both of them. 4240 */ 4241 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4242 if (mixed && (bctl->flags & allowed)) { 4243 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4244 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4245 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4246 btrfs_err(fs_info, 4247 "balance: mixed groups data and metadata options must be the same"); 4248 ret = -EINVAL; 4249 goto out; 4250 } 4251 } 4252 4253 /* 4254 * rw_devices will not change at the moment, device add/delete/replace 4255 * are exclusive 4256 */ 4257 num_devices = fs_info->fs_devices->rw_devices; 4258 4259 /* 4260 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4261 * special bit for it, to make it easier to distinguish. Thus we need 4262 * to set it manually, or balance would refuse the profile. 4263 */ 4264 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4265 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4266 if (num_devices >= btrfs_raid_array[i].devs_min) 4267 allowed |= btrfs_raid_array[i].bg_flag; 4268 4269 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4270 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4271 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4272 ret = -EINVAL; 4273 goto out; 4274 } 4275 4276 /* 4277 * Allow to reduce metadata or system integrity only if force set for 4278 * profiles with redundancy (copies, parity) 4279 */ 4280 allowed = 0; 4281 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4282 if (btrfs_raid_array[i].ncopies >= 2 || 4283 btrfs_raid_array[i].tolerated_failures >= 1) 4284 allowed |= btrfs_raid_array[i].bg_flag; 4285 } 4286 do { 4287 seq = read_seqbegin(&fs_info->profiles_lock); 4288 4289 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4290 (fs_info->avail_system_alloc_bits & allowed) && 4291 !(bctl->sys.target & allowed)) || 4292 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4293 (fs_info->avail_metadata_alloc_bits & allowed) && 4294 !(bctl->meta.target & allowed))) 4295 reducing_redundancy = true; 4296 else 4297 reducing_redundancy = false; 4298 4299 /* if we're not converting, the target field is uninitialized */ 4300 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4301 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4302 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4303 bctl->data.target : fs_info->avail_data_alloc_bits; 4304 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4305 4306 if (reducing_redundancy) { 4307 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4308 btrfs_info(fs_info, 4309 "balance: force reducing metadata redundancy"); 4310 } else { 4311 btrfs_err(fs_info, 4312 "balance: reduces metadata redundancy, use --force if you want this"); 4313 ret = -EINVAL; 4314 goto out; 4315 } 4316 } 4317 4318 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4319 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4320 btrfs_warn(fs_info, 4321 "balance: metadata profile %s has lower redundancy than data profile %s", 4322 btrfs_bg_type_to_raid_name(meta_target), 4323 btrfs_bg_type_to_raid_name(data_target)); 4324 } 4325 4326 ret = insert_balance_item(fs_info, bctl); 4327 if (ret && ret != -EEXIST) 4328 goto out; 4329 4330 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4331 BUG_ON(ret == -EEXIST); 4332 BUG_ON(fs_info->balance_ctl); 4333 spin_lock(&fs_info->balance_lock); 4334 fs_info->balance_ctl = bctl; 4335 spin_unlock(&fs_info->balance_lock); 4336 } else { 4337 BUG_ON(ret != -EEXIST); 4338 spin_lock(&fs_info->balance_lock); 4339 update_balance_args(bctl); 4340 spin_unlock(&fs_info->balance_lock); 4341 } 4342 4343 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4344 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4345 describe_balance_start_or_resume(fs_info); 4346 mutex_unlock(&fs_info->balance_mutex); 4347 4348 ret = __btrfs_balance(fs_info); 4349 4350 mutex_lock(&fs_info->balance_mutex); 4351 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4352 btrfs_info(fs_info, "balance: paused"); 4353 /* 4354 * Balance can be canceled by: 4355 * 4356 * - Regular cancel request 4357 * Then ret == -ECANCELED and balance_cancel_req > 0 4358 * 4359 * - Fatal signal to "btrfs" process 4360 * Either the signal caught by wait_reserve_ticket() and callers 4361 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4362 * got -ECANCELED. 4363 * Either way, in this case balance_cancel_req = 0, and 4364 * ret == -EINTR or ret == -ECANCELED. 4365 * 4366 * So here we only check the return value to catch canceled balance. 4367 */ 4368 else if (ret == -ECANCELED || ret == -EINTR) 4369 btrfs_info(fs_info, "balance: canceled"); 4370 else 4371 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4372 4373 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4374 4375 if (bargs) { 4376 memset(bargs, 0, sizeof(*bargs)); 4377 btrfs_update_ioctl_balance_args(fs_info, bargs); 4378 } 4379 4380 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4381 balance_need_close(fs_info)) { 4382 reset_balance_state(fs_info); 4383 btrfs_exclop_finish(fs_info); 4384 } 4385 4386 wake_up(&fs_info->balance_wait_q); 4387 4388 return ret; 4389 out: 4390 if (bctl->flags & BTRFS_BALANCE_RESUME) 4391 reset_balance_state(fs_info); 4392 else 4393 kfree(bctl); 4394 btrfs_exclop_finish(fs_info); 4395 4396 return ret; 4397 } 4398 4399 static int balance_kthread(void *data) 4400 { 4401 struct btrfs_fs_info *fs_info = data; 4402 int ret = 0; 4403 4404 mutex_lock(&fs_info->balance_mutex); 4405 if (fs_info->balance_ctl) 4406 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4407 mutex_unlock(&fs_info->balance_mutex); 4408 4409 return ret; 4410 } 4411 4412 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4413 { 4414 struct task_struct *tsk; 4415 4416 mutex_lock(&fs_info->balance_mutex); 4417 if (!fs_info->balance_ctl) { 4418 mutex_unlock(&fs_info->balance_mutex); 4419 return 0; 4420 } 4421 mutex_unlock(&fs_info->balance_mutex); 4422 4423 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4424 btrfs_info(fs_info, "balance: resume skipped"); 4425 return 0; 4426 } 4427 4428 /* 4429 * A ro->rw remount sequence should continue with the paused balance 4430 * regardless of who pauses it, system or the user as of now, so set 4431 * the resume flag. 4432 */ 4433 spin_lock(&fs_info->balance_lock); 4434 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4435 spin_unlock(&fs_info->balance_lock); 4436 4437 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4438 return PTR_ERR_OR_ZERO(tsk); 4439 } 4440 4441 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4442 { 4443 struct btrfs_balance_control *bctl; 4444 struct btrfs_balance_item *item; 4445 struct btrfs_disk_balance_args disk_bargs; 4446 struct btrfs_path *path; 4447 struct extent_buffer *leaf; 4448 struct btrfs_key key; 4449 int ret; 4450 4451 path = btrfs_alloc_path(); 4452 if (!path) 4453 return -ENOMEM; 4454 4455 key.objectid = BTRFS_BALANCE_OBJECTID; 4456 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4457 key.offset = 0; 4458 4459 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4460 if (ret < 0) 4461 goto out; 4462 if (ret > 0) { /* ret = -ENOENT; */ 4463 ret = 0; 4464 goto out; 4465 } 4466 4467 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4468 if (!bctl) { 4469 ret = -ENOMEM; 4470 goto out; 4471 } 4472 4473 leaf = path->nodes[0]; 4474 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4475 4476 bctl->flags = btrfs_balance_flags(leaf, item); 4477 bctl->flags |= BTRFS_BALANCE_RESUME; 4478 4479 btrfs_balance_data(leaf, item, &disk_bargs); 4480 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4481 btrfs_balance_meta(leaf, item, &disk_bargs); 4482 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4483 btrfs_balance_sys(leaf, item, &disk_bargs); 4484 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4485 4486 /* 4487 * This should never happen, as the paused balance state is recovered 4488 * during mount without any chance of other exclusive ops to collide. 4489 * 4490 * This gives the exclusive op status to balance and keeps in paused 4491 * state until user intervention (cancel or umount). If the ownership 4492 * cannot be assigned, show a message but do not fail. The balance 4493 * is in a paused state and must have fs_info::balance_ctl properly 4494 * set up. 4495 */ 4496 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 4497 btrfs_warn(fs_info, 4498 "balance: cannot set exclusive op status, resume manually"); 4499 4500 btrfs_release_path(path); 4501 4502 mutex_lock(&fs_info->balance_mutex); 4503 BUG_ON(fs_info->balance_ctl); 4504 spin_lock(&fs_info->balance_lock); 4505 fs_info->balance_ctl = bctl; 4506 spin_unlock(&fs_info->balance_lock); 4507 mutex_unlock(&fs_info->balance_mutex); 4508 out: 4509 btrfs_free_path(path); 4510 return ret; 4511 } 4512 4513 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4514 { 4515 int ret = 0; 4516 4517 mutex_lock(&fs_info->balance_mutex); 4518 if (!fs_info->balance_ctl) { 4519 mutex_unlock(&fs_info->balance_mutex); 4520 return -ENOTCONN; 4521 } 4522 4523 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4524 atomic_inc(&fs_info->balance_pause_req); 4525 mutex_unlock(&fs_info->balance_mutex); 4526 4527 wait_event(fs_info->balance_wait_q, 4528 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4529 4530 mutex_lock(&fs_info->balance_mutex); 4531 /* we are good with balance_ctl ripped off from under us */ 4532 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4533 atomic_dec(&fs_info->balance_pause_req); 4534 } else { 4535 ret = -ENOTCONN; 4536 } 4537 4538 mutex_unlock(&fs_info->balance_mutex); 4539 return ret; 4540 } 4541 4542 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4543 { 4544 mutex_lock(&fs_info->balance_mutex); 4545 if (!fs_info->balance_ctl) { 4546 mutex_unlock(&fs_info->balance_mutex); 4547 return -ENOTCONN; 4548 } 4549 4550 /* 4551 * A paused balance with the item stored on disk can be resumed at 4552 * mount time if the mount is read-write. Otherwise it's still paused 4553 * and we must not allow cancelling as it deletes the item. 4554 */ 4555 if (sb_rdonly(fs_info->sb)) { 4556 mutex_unlock(&fs_info->balance_mutex); 4557 return -EROFS; 4558 } 4559 4560 atomic_inc(&fs_info->balance_cancel_req); 4561 /* 4562 * if we are running just wait and return, balance item is 4563 * deleted in btrfs_balance in this case 4564 */ 4565 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4566 mutex_unlock(&fs_info->balance_mutex); 4567 wait_event(fs_info->balance_wait_q, 4568 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4569 mutex_lock(&fs_info->balance_mutex); 4570 } else { 4571 mutex_unlock(&fs_info->balance_mutex); 4572 /* 4573 * Lock released to allow other waiters to continue, we'll 4574 * reexamine the status again. 4575 */ 4576 mutex_lock(&fs_info->balance_mutex); 4577 4578 if (fs_info->balance_ctl) { 4579 reset_balance_state(fs_info); 4580 btrfs_exclop_finish(fs_info); 4581 btrfs_info(fs_info, "balance: canceled"); 4582 } 4583 } 4584 4585 BUG_ON(fs_info->balance_ctl || 4586 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4587 atomic_dec(&fs_info->balance_cancel_req); 4588 mutex_unlock(&fs_info->balance_mutex); 4589 return 0; 4590 } 4591 4592 int btrfs_uuid_scan_kthread(void *data) 4593 { 4594 struct btrfs_fs_info *fs_info = data; 4595 struct btrfs_root *root = fs_info->tree_root; 4596 struct btrfs_key key; 4597 struct btrfs_path *path = NULL; 4598 int ret = 0; 4599 struct extent_buffer *eb; 4600 int slot; 4601 struct btrfs_root_item root_item; 4602 u32 item_size; 4603 struct btrfs_trans_handle *trans = NULL; 4604 bool closing = false; 4605 4606 path = btrfs_alloc_path(); 4607 if (!path) { 4608 ret = -ENOMEM; 4609 goto out; 4610 } 4611 4612 key.objectid = 0; 4613 key.type = BTRFS_ROOT_ITEM_KEY; 4614 key.offset = 0; 4615 4616 while (1) { 4617 if (btrfs_fs_closing(fs_info)) { 4618 closing = true; 4619 break; 4620 } 4621 ret = btrfs_search_forward(root, &key, path, 4622 BTRFS_OLDEST_GENERATION); 4623 if (ret) { 4624 if (ret > 0) 4625 ret = 0; 4626 break; 4627 } 4628 4629 if (key.type != BTRFS_ROOT_ITEM_KEY || 4630 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4631 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4632 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4633 goto skip; 4634 4635 eb = path->nodes[0]; 4636 slot = path->slots[0]; 4637 item_size = btrfs_item_size_nr(eb, slot); 4638 if (item_size < sizeof(root_item)) 4639 goto skip; 4640 4641 read_extent_buffer(eb, &root_item, 4642 btrfs_item_ptr_offset(eb, slot), 4643 (int)sizeof(root_item)); 4644 if (btrfs_root_refs(&root_item) == 0) 4645 goto skip; 4646 4647 if (!btrfs_is_empty_uuid(root_item.uuid) || 4648 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4649 if (trans) 4650 goto update_tree; 4651 4652 btrfs_release_path(path); 4653 /* 4654 * 1 - subvol uuid item 4655 * 1 - received_subvol uuid item 4656 */ 4657 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4658 if (IS_ERR(trans)) { 4659 ret = PTR_ERR(trans); 4660 break; 4661 } 4662 continue; 4663 } else { 4664 goto skip; 4665 } 4666 update_tree: 4667 btrfs_release_path(path); 4668 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4669 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4670 BTRFS_UUID_KEY_SUBVOL, 4671 key.objectid); 4672 if (ret < 0) { 4673 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4674 ret); 4675 break; 4676 } 4677 } 4678 4679 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4680 ret = btrfs_uuid_tree_add(trans, 4681 root_item.received_uuid, 4682 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4683 key.objectid); 4684 if (ret < 0) { 4685 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4686 ret); 4687 break; 4688 } 4689 } 4690 4691 skip: 4692 btrfs_release_path(path); 4693 if (trans) { 4694 ret = btrfs_end_transaction(trans); 4695 trans = NULL; 4696 if (ret) 4697 break; 4698 } 4699 4700 if (key.offset < (u64)-1) { 4701 key.offset++; 4702 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4703 key.offset = 0; 4704 key.type = BTRFS_ROOT_ITEM_KEY; 4705 } else if (key.objectid < (u64)-1) { 4706 key.offset = 0; 4707 key.type = BTRFS_ROOT_ITEM_KEY; 4708 key.objectid++; 4709 } else { 4710 break; 4711 } 4712 cond_resched(); 4713 } 4714 4715 out: 4716 btrfs_free_path(path); 4717 if (trans && !IS_ERR(trans)) 4718 btrfs_end_transaction(trans); 4719 if (ret) 4720 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4721 else if (!closing) 4722 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4723 up(&fs_info->uuid_tree_rescan_sem); 4724 return 0; 4725 } 4726 4727 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4728 { 4729 struct btrfs_trans_handle *trans; 4730 struct btrfs_root *tree_root = fs_info->tree_root; 4731 struct btrfs_root *uuid_root; 4732 struct task_struct *task; 4733 int ret; 4734 4735 /* 4736 * 1 - root node 4737 * 1 - root item 4738 */ 4739 trans = btrfs_start_transaction(tree_root, 2); 4740 if (IS_ERR(trans)) 4741 return PTR_ERR(trans); 4742 4743 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4744 if (IS_ERR(uuid_root)) { 4745 ret = PTR_ERR(uuid_root); 4746 btrfs_abort_transaction(trans, ret); 4747 btrfs_end_transaction(trans); 4748 return ret; 4749 } 4750 4751 fs_info->uuid_root = uuid_root; 4752 4753 ret = btrfs_commit_transaction(trans); 4754 if (ret) 4755 return ret; 4756 4757 down(&fs_info->uuid_tree_rescan_sem); 4758 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4759 if (IS_ERR(task)) { 4760 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4761 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4762 up(&fs_info->uuid_tree_rescan_sem); 4763 return PTR_ERR(task); 4764 } 4765 4766 return 0; 4767 } 4768 4769 /* 4770 * shrinking a device means finding all of the device extents past 4771 * the new size, and then following the back refs to the chunks. 4772 * The chunk relocation code actually frees the device extent 4773 */ 4774 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4775 { 4776 struct btrfs_fs_info *fs_info = device->fs_info; 4777 struct btrfs_root *root = fs_info->dev_root; 4778 struct btrfs_trans_handle *trans; 4779 struct btrfs_dev_extent *dev_extent = NULL; 4780 struct btrfs_path *path; 4781 u64 length; 4782 u64 chunk_offset; 4783 int ret; 4784 int slot; 4785 int failed = 0; 4786 bool retried = false; 4787 struct extent_buffer *l; 4788 struct btrfs_key key; 4789 struct btrfs_super_block *super_copy = fs_info->super_copy; 4790 u64 old_total = btrfs_super_total_bytes(super_copy); 4791 u64 old_size = btrfs_device_get_total_bytes(device); 4792 u64 diff; 4793 u64 start; 4794 4795 new_size = round_down(new_size, fs_info->sectorsize); 4796 start = new_size; 4797 diff = round_down(old_size - new_size, fs_info->sectorsize); 4798 4799 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4800 return -EINVAL; 4801 4802 path = btrfs_alloc_path(); 4803 if (!path) 4804 return -ENOMEM; 4805 4806 path->reada = READA_BACK; 4807 4808 trans = btrfs_start_transaction(root, 0); 4809 if (IS_ERR(trans)) { 4810 btrfs_free_path(path); 4811 return PTR_ERR(trans); 4812 } 4813 4814 mutex_lock(&fs_info->chunk_mutex); 4815 4816 btrfs_device_set_total_bytes(device, new_size); 4817 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4818 device->fs_devices->total_rw_bytes -= diff; 4819 atomic64_sub(diff, &fs_info->free_chunk_space); 4820 } 4821 4822 /* 4823 * Once the device's size has been set to the new size, ensure all 4824 * in-memory chunks are synced to disk so that the loop below sees them 4825 * and relocates them accordingly. 4826 */ 4827 if (contains_pending_extent(device, &start, diff)) { 4828 mutex_unlock(&fs_info->chunk_mutex); 4829 ret = btrfs_commit_transaction(trans); 4830 if (ret) 4831 goto done; 4832 } else { 4833 mutex_unlock(&fs_info->chunk_mutex); 4834 btrfs_end_transaction(trans); 4835 } 4836 4837 again: 4838 key.objectid = device->devid; 4839 key.offset = (u64)-1; 4840 key.type = BTRFS_DEV_EXTENT_KEY; 4841 4842 do { 4843 mutex_lock(&fs_info->reclaim_bgs_lock); 4844 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4845 if (ret < 0) { 4846 mutex_unlock(&fs_info->reclaim_bgs_lock); 4847 goto done; 4848 } 4849 4850 ret = btrfs_previous_item(root, path, 0, key.type); 4851 if (ret) { 4852 mutex_unlock(&fs_info->reclaim_bgs_lock); 4853 if (ret < 0) 4854 goto done; 4855 ret = 0; 4856 btrfs_release_path(path); 4857 break; 4858 } 4859 4860 l = path->nodes[0]; 4861 slot = path->slots[0]; 4862 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4863 4864 if (key.objectid != device->devid) { 4865 mutex_unlock(&fs_info->reclaim_bgs_lock); 4866 btrfs_release_path(path); 4867 break; 4868 } 4869 4870 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4871 length = btrfs_dev_extent_length(l, dev_extent); 4872 4873 if (key.offset + length <= new_size) { 4874 mutex_unlock(&fs_info->reclaim_bgs_lock); 4875 btrfs_release_path(path); 4876 break; 4877 } 4878 4879 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4880 btrfs_release_path(path); 4881 4882 /* 4883 * We may be relocating the only data chunk we have, 4884 * which could potentially end up with losing data's 4885 * raid profile, so lets allocate an empty one in 4886 * advance. 4887 */ 4888 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4889 if (ret < 0) { 4890 mutex_unlock(&fs_info->reclaim_bgs_lock); 4891 goto done; 4892 } 4893 4894 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4895 mutex_unlock(&fs_info->reclaim_bgs_lock); 4896 if (ret == -ENOSPC) { 4897 failed++; 4898 } else if (ret) { 4899 if (ret == -ETXTBSY) { 4900 btrfs_warn(fs_info, 4901 "could not shrink block group %llu due to active swapfile", 4902 chunk_offset); 4903 } 4904 goto done; 4905 } 4906 } while (key.offset-- > 0); 4907 4908 if (failed && !retried) { 4909 failed = 0; 4910 retried = true; 4911 goto again; 4912 } else if (failed && retried) { 4913 ret = -ENOSPC; 4914 goto done; 4915 } 4916 4917 /* Shrinking succeeded, else we would be at "done". */ 4918 trans = btrfs_start_transaction(root, 0); 4919 if (IS_ERR(trans)) { 4920 ret = PTR_ERR(trans); 4921 goto done; 4922 } 4923 4924 mutex_lock(&fs_info->chunk_mutex); 4925 /* Clear all state bits beyond the shrunk device size */ 4926 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4927 CHUNK_STATE_MASK); 4928 4929 btrfs_device_set_disk_total_bytes(device, new_size); 4930 if (list_empty(&device->post_commit_list)) 4931 list_add_tail(&device->post_commit_list, 4932 &trans->transaction->dev_update_list); 4933 4934 WARN_ON(diff > old_total); 4935 btrfs_set_super_total_bytes(super_copy, 4936 round_down(old_total - diff, fs_info->sectorsize)); 4937 mutex_unlock(&fs_info->chunk_mutex); 4938 4939 btrfs_reserve_chunk_metadata(trans, false); 4940 /* Now btrfs_update_device() will change the on-disk size. */ 4941 ret = btrfs_update_device(trans, device); 4942 btrfs_trans_release_chunk_metadata(trans); 4943 if (ret < 0) { 4944 btrfs_abort_transaction(trans, ret); 4945 btrfs_end_transaction(trans); 4946 } else { 4947 ret = btrfs_commit_transaction(trans); 4948 } 4949 done: 4950 btrfs_free_path(path); 4951 if (ret) { 4952 mutex_lock(&fs_info->chunk_mutex); 4953 btrfs_device_set_total_bytes(device, old_size); 4954 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4955 device->fs_devices->total_rw_bytes += diff; 4956 atomic64_add(diff, &fs_info->free_chunk_space); 4957 mutex_unlock(&fs_info->chunk_mutex); 4958 } 4959 return ret; 4960 } 4961 4962 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4963 struct btrfs_key *key, 4964 struct btrfs_chunk *chunk, int item_size) 4965 { 4966 struct btrfs_super_block *super_copy = fs_info->super_copy; 4967 struct btrfs_disk_key disk_key; 4968 u32 array_size; 4969 u8 *ptr; 4970 4971 lockdep_assert_held(&fs_info->chunk_mutex); 4972 4973 array_size = btrfs_super_sys_array_size(super_copy); 4974 if (array_size + item_size + sizeof(disk_key) 4975 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4976 return -EFBIG; 4977 4978 ptr = super_copy->sys_chunk_array + array_size; 4979 btrfs_cpu_key_to_disk(&disk_key, key); 4980 memcpy(ptr, &disk_key, sizeof(disk_key)); 4981 ptr += sizeof(disk_key); 4982 memcpy(ptr, chunk, item_size); 4983 item_size += sizeof(disk_key); 4984 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4985 4986 return 0; 4987 } 4988 4989 /* 4990 * sort the devices in descending order by max_avail, total_avail 4991 */ 4992 static int btrfs_cmp_device_info(const void *a, const void *b) 4993 { 4994 const struct btrfs_device_info *di_a = a; 4995 const struct btrfs_device_info *di_b = b; 4996 4997 if (di_a->max_avail > di_b->max_avail) 4998 return -1; 4999 if (di_a->max_avail < di_b->max_avail) 5000 return 1; 5001 if (di_a->total_avail > di_b->total_avail) 5002 return -1; 5003 if (di_a->total_avail < di_b->total_avail) 5004 return 1; 5005 return 0; 5006 } 5007 5008 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5009 { 5010 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5011 return; 5012 5013 btrfs_set_fs_incompat(info, RAID56); 5014 } 5015 5016 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5017 { 5018 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5019 return; 5020 5021 btrfs_set_fs_incompat(info, RAID1C34); 5022 } 5023 5024 /* 5025 * Structure used internally for btrfs_create_chunk() function. 5026 * Wraps needed parameters. 5027 */ 5028 struct alloc_chunk_ctl { 5029 u64 start; 5030 u64 type; 5031 /* Total number of stripes to allocate */ 5032 int num_stripes; 5033 /* sub_stripes info for map */ 5034 int sub_stripes; 5035 /* Stripes per device */ 5036 int dev_stripes; 5037 /* Maximum number of devices to use */ 5038 int devs_max; 5039 /* Minimum number of devices to use */ 5040 int devs_min; 5041 /* ndevs has to be a multiple of this */ 5042 int devs_increment; 5043 /* Number of copies */ 5044 int ncopies; 5045 /* Number of stripes worth of bytes to store parity information */ 5046 int nparity; 5047 u64 max_stripe_size; 5048 u64 max_chunk_size; 5049 u64 dev_extent_min; 5050 u64 stripe_size; 5051 u64 chunk_size; 5052 int ndevs; 5053 }; 5054 5055 static void init_alloc_chunk_ctl_policy_regular( 5056 struct btrfs_fs_devices *fs_devices, 5057 struct alloc_chunk_ctl *ctl) 5058 { 5059 u64 type = ctl->type; 5060 5061 if (type & BTRFS_BLOCK_GROUP_DATA) { 5062 ctl->max_stripe_size = SZ_1G; 5063 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5064 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5065 /* For larger filesystems, use larger metadata chunks */ 5066 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5067 ctl->max_stripe_size = SZ_1G; 5068 else 5069 ctl->max_stripe_size = SZ_256M; 5070 ctl->max_chunk_size = ctl->max_stripe_size; 5071 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5072 ctl->max_stripe_size = SZ_32M; 5073 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5074 ctl->devs_max = min_t(int, ctl->devs_max, 5075 BTRFS_MAX_DEVS_SYS_CHUNK); 5076 } else { 5077 BUG(); 5078 } 5079 5080 /* We don't want a chunk larger than 10% of writable space */ 5081 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5082 ctl->max_chunk_size); 5083 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5084 } 5085 5086 static void init_alloc_chunk_ctl_policy_zoned( 5087 struct btrfs_fs_devices *fs_devices, 5088 struct alloc_chunk_ctl *ctl) 5089 { 5090 u64 zone_size = fs_devices->fs_info->zone_size; 5091 u64 limit; 5092 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5093 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5094 u64 min_chunk_size = min_data_stripes * zone_size; 5095 u64 type = ctl->type; 5096 5097 ctl->max_stripe_size = zone_size; 5098 if (type & BTRFS_BLOCK_GROUP_DATA) { 5099 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5100 zone_size); 5101 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5102 ctl->max_chunk_size = ctl->max_stripe_size; 5103 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5104 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5105 ctl->devs_max = min_t(int, ctl->devs_max, 5106 BTRFS_MAX_DEVS_SYS_CHUNK); 5107 } else { 5108 BUG(); 5109 } 5110 5111 /* We don't want a chunk larger than 10% of writable space */ 5112 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5113 zone_size), 5114 min_chunk_size); 5115 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5116 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5117 } 5118 5119 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5120 struct alloc_chunk_ctl *ctl) 5121 { 5122 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5123 5124 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5125 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5126 ctl->devs_max = btrfs_raid_array[index].devs_max; 5127 if (!ctl->devs_max) 5128 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5129 ctl->devs_min = btrfs_raid_array[index].devs_min; 5130 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5131 ctl->ncopies = btrfs_raid_array[index].ncopies; 5132 ctl->nparity = btrfs_raid_array[index].nparity; 5133 ctl->ndevs = 0; 5134 5135 switch (fs_devices->chunk_alloc_policy) { 5136 case BTRFS_CHUNK_ALLOC_REGULAR: 5137 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5138 break; 5139 case BTRFS_CHUNK_ALLOC_ZONED: 5140 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5141 break; 5142 default: 5143 BUG(); 5144 } 5145 } 5146 5147 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5148 struct alloc_chunk_ctl *ctl, 5149 struct btrfs_device_info *devices_info) 5150 { 5151 struct btrfs_fs_info *info = fs_devices->fs_info; 5152 struct btrfs_device *device; 5153 u64 total_avail; 5154 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5155 int ret; 5156 int ndevs = 0; 5157 u64 max_avail; 5158 u64 dev_offset; 5159 5160 /* 5161 * in the first pass through the devices list, we gather information 5162 * about the available holes on each device. 5163 */ 5164 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5165 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5166 WARN(1, KERN_ERR 5167 "BTRFS: read-only device in alloc_list\n"); 5168 continue; 5169 } 5170 5171 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5172 &device->dev_state) || 5173 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5174 continue; 5175 5176 if (device->total_bytes > device->bytes_used) 5177 total_avail = device->total_bytes - device->bytes_used; 5178 else 5179 total_avail = 0; 5180 5181 /* If there is no space on this device, skip it. */ 5182 if (total_avail < ctl->dev_extent_min) 5183 continue; 5184 5185 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5186 &max_avail); 5187 if (ret && ret != -ENOSPC) 5188 return ret; 5189 5190 if (ret == 0) 5191 max_avail = dev_extent_want; 5192 5193 if (max_avail < ctl->dev_extent_min) { 5194 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5195 btrfs_debug(info, 5196 "%s: devid %llu has no free space, have=%llu want=%llu", 5197 __func__, device->devid, max_avail, 5198 ctl->dev_extent_min); 5199 continue; 5200 } 5201 5202 if (ndevs == fs_devices->rw_devices) { 5203 WARN(1, "%s: found more than %llu devices\n", 5204 __func__, fs_devices->rw_devices); 5205 break; 5206 } 5207 devices_info[ndevs].dev_offset = dev_offset; 5208 devices_info[ndevs].max_avail = max_avail; 5209 devices_info[ndevs].total_avail = total_avail; 5210 devices_info[ndevs].dev = device; 5211 ++ndevs; 5212 } 5213 ctl->ndevs = ndevs; 5214 5215 /* 5216 * now sort the devices by hole size / available space 5217 */ 5218 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5219 btrfs_cmp_device_info, NULL); 5220 5221 return 0; 5222 } 5223 5224 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5225 struct btrfs_device_info *devices_info) 5226 { 5227 /* Number of stripes that count for block group size */ 5228 int data_stripes; 5229 5230 /* 5231 * The primary goal is to maximize the number of stripes, so use as 5232 * many devices as possible, even if the stripes are not maximum sized. 5233 * 5234 * The DUP profile stores more than one stripe per device, the 5235 * max_avail is the total size so we have to adjust. 5236 */ 5237 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5238 ctl->dev_stripes); 5239 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5240 5241 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5242 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5243 5244 /* 5245 * Use the number of data stripes to figure out how big this chunk is 5246 * really going to be in terms of logical address space, and compare 5247 * that answer with the max chunk size. If it's higher, we try to 5248 * reduce stripe_size. 5249 */ 5250 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5251 /* 5252 * Reduce stripe_size, round it up to a 16MB boundary again and 5253 * then use it, unless it ends up being even bigger than the 5254 * previous value we had already. 5255 */ 5256 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5257 data_stripes), SZ_16M), 5258 ctl->stripe_size); 5259 } 5260 5261 /* Align to BTRFS_STRIPE_LEN */ 5262 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5263 ctl->chunk_size = ctl->stripe_size * data_stripes; 5264 5265 return 0; 5266 } 5267 5268 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5269 struct btrfs_device_info *devices_info) 5270 { 5271 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5272 /* Number of stripes that count for block group size */ 5273 int data_stripes; 5274 5275 /* 5276 * It should hold because: 5277 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5278 */ 5279 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5280 5281 ctl->stripe_size = zone_size; 5282 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5283 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5284 5285 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5286 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5287 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5288 ctl->stripe_size) + ctl->nparity, 5289 ctl->dev_stripes); 5290 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5291 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5292 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5293 } 5294 5295 ctl->chunk_size = ctl->stripe_size * data_stripes; 5296 5297 return 0; 5298 } 5299 5300 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5301 struct alloc_chunk_ctl *ctl, 5302 struct btrfs_device_info *devices_info) 5303 { 5304 struct btrfs_fs_info *info = fs_devices->fs_info; 5305 5306 /* 5307 * Round down to number of usable stripes, devs_increment can be any 5308 * number so we can't use round_down() that requires power of 2, while 5309 * rounddown is safe. 5310 */ 5311 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5312 5313 if (ctl->ndevs < ctl->devs_min) { 5314 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5315 btrfs_debug(info, 5316 "%s: not enough devices with free space: have=%d minimum required=%d", 5317 __func__, ctl->ndevs, ctl->devs_min); 5318 } 5319 return -ENOSPC; 5320 } 5321 5322 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5323 5324 switch (fs_devices->chunk_alloc_policy) { 5325 case BTRFS_CHUNK_ALLOC_REGULAR: 5326 return decide_stripe_size_regular(ctl, devices_info); 5327 case BTRFS_CHUNK_ALLOC_ZONED: 5328 return decide_stripe_size_zoned(ctl, devices_info); 5329 default: 5330 BUG(); 5331 } 5332 } 5333 5334 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5335 struct alloc_chunk_ctl *ctl, 5336 struct btrfs_device_info *devices_info) 5337 { 5338 struct btrfs_fs_info *info = trans->fs_info; 5339 struct map_lookup *map = NULL; 5340 struct extent_map_tree *em_tree; 5341 struct btrfs_block_group *block_group; 5342 struct extent_map *em; 5343 u64 start = ctl->start; 5344 u64 type = ctl->type; 5345 int ret; 5346 int i; 5347 int j; 5348 5349 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5350 if (!map) 5351 return ERR_PTR(-ENOMEM); 5352 map->num_stripes = ctl->num_stripes; 5353 5354 for (i = 0; i < ctl->ndevs; ++i) { 5355 for (j = 0; j < ctl->dev_stripes; ++j) { 5356 int s = i * ctl->dev_stripes + j; 5357 map->stripes[s].dev = devices_info[i].dev; 5358 map->stripes[s].physical = devices_info[i].dev_offset + 5359 j * ctl->stripe_size; 5360 } 5361 } 5362 map->stripe_len = BTRFS_STRIPE_LEN; 5363 map->io_align = BTRFS_STRIPE_LEN; 5364 map->io_width = BTRFS_STRIPE_LEN; 5365 map->type = type; 5366 map->sub_stripes = ctl->sub_stripes; 5367 5368 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5369 5370 em = alloc_extent_map(); 5371 if (!em) { 5372 kfree(map); 5373 return ERR_PTR(-ENOMEM); 5374 } 5375 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5376 em->map_lookup = map; 5377 em->start = start; 5378 em->len = ctl->chunk_size; 5379 em->block_start = 0; 5380 em->block_len = em->len; 5381 em->orig_block_len = ctl->stripe_size; 5382 5383 em_tree = &info->mapping_tree; 5384 write_lock(&em_tree->lock); 5385 ret = add_extent_mapping(em_tree, em, 0); 5386 if (ret) { 5387 write_unlock(&em_tree->lock); 5388 free_extent_map(em); 5389 return ERR_PTR(ret); 5390 } 5391 write_unlock(&em_tree->lock); 5392 5393 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5394 if (IS_ERR(block_group)) 5395 goto error_del_extent; 5396 5397 for (i = 0; i < map->num_stripes; i++) { 5398 struct btrfs_device *dev = map->stripes[i].dev; 5399 5400 btrfs_device_set_bytes_used(dev, 5401 dev->bytes_used + ctl->stripe_size); 5402 if (list_empty(&dev->post_commit_list)) 5403 list_add_tail(&dev->post_commit_list, 5404 &trans->transaction->dev_update_list); 5405 } 5406 5407 atomic64_sub(ctl->stripe_size * map->num_stripes, 5408 &info->free_chunk_space); 5409 5410 free_extent_map(em); 5411 check_raid56_incompat_flag(info, type); 5412 check_raid1c34_incompat_flag(info, type); 5413 5414 return block_group; 5415 5416 error_del_extent: 5417 write_lock(&em_tree->lock); 5418 remove_extent_mapping(em_tree, em); 5419 write_unlock(&em_tree->lock); 5420 5421 /* One for our allocation */ 5422 free_extent_map(em); 5423 /* One for the tree reference */ 5424 free_extent_map(em); 5425 5426 return block_group; 5427 } 5428 5429 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5430 u64 type) 5431 { 5432 struct btrfs_fs_info *info = trans->fs_info; 5433 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5434 struct btrfs_device_info *devices_info = NULL; 5435 struct alloc_chunk_ctl ctl; 5436 struct btrfs_block_group *block_group; 5437 int ret; 5438 5439 lockdep_assert_held(&info->chunk_mutex); 5440 5441 if (!alloc_profile_is_valid(type, 0)) { 5442 ASSERT(0); 5443 return ERR_PTR(-EINVAL); 5444 } 5445 5446 if (list_empty(&fs_devices->alloc_list)) { 5447 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5448 btrfs_debug(info, "%s: no writable device", __func__); 5449 return ERR_PTR(-ENOSPC); 5450 } 5451 5452 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5453 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5454 ASSERT(0); 5455 return ERR_PTR(-EINVAL); 5456 } 5457 5458 ctl.start = find_next_chunk(info); 5459 ctl.type = type; 5460 init_alloc_chunk_ctl(fs_devices, &ctl); 5461 5462 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5463 GFP_NOFS); 5464 if (!devices_info) 5465 return ERR_PTR(-ENOMEM); 5466 5467 ret = gather_device_info(fs_devices, &ctl, devices_info); 5468 if (ret < 0) { 5469 block_group = ERR_PTR(ret); 5470 goto out; 5471 } 5472 5473 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5474 if (ret < 0) { 5475 block_group = ERR_PTR(ret); 5476 goto out; 5477 } 5478 5479 block_group = create_chunk(trans, &ctl, devices_info); 5480 5481 out: 5482 kfree(devices_info); 5483 return block_group; 5484 } 5485 5486 /* 5487 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5488 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5489 * chunks. 5490 * 5491 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5492 * phases. 5493 */ 5494 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5495 struct btrfs_block_group *bg) 5496 { 5497 struct btrfs_fs_info *fs_info = trans->fs_info; 5498 struct btrfs_root *extent_root = fs_info->extent_root; 5499 struct btrfs_root *chunk_root = fs_info->chunk_root; 5500 struct btrfs_key key; 5501 struct btrfs_chunk *chunk; 5502 struct btrfs_stripe *stripe; 5503 struct extent_map *em; 5504 struct map_lookup *map; 5505 size_t item_size; 5506 int i; 5507 int ret; 5508 5509 /* 5510 * We take the chunk_mutex for 2 reasons: 5511 * 5512 * 1) Updates and insertions in the chunk btree must be done while holding 5513 * the chunk_mutex, as well as updating the system chunk array in the 5514 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5515 * details; 5516 * 5517 * 2) To prevent races with the final phase of a device replace operation 5518 * that replaces the device object associated with the map's stripes, 5519 * because the device object's id can change at any time during that 5520 * final phase of the device replace operation 5521 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5522 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5523 * which would cause a failure when updating the device item, which does 5524 * not exists, or persisting a stripe of the chunk item with such ID. 5525 * Here we can't use the device_list_mutex because our caller already 5526 * has locked the chunk_mutex, and the final phase of device replace 5527 * acquires both mutexes - first the device_list_mutex and then the 5528 * chunk_mutex. Using any of those two mutexes protects us from a 5529 * concurrent device replace. 5530 */ 5531 lockdep_assert_held(&fs_info->chunk_mutex); 5532 5533 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5534 if (IS_ERR(em)) { 5535 ret = PTR_ERR(em); 5536 btrfs_abort_transaction(trans, ret); 5537 return ret; 5538 } 5539 5540 map = em->map_lookup; 5541 item_size = btrfs_chunk_item_size(map->num_stripes); 5542 5543 chunk = kzalloc(item_size, GFP_NOFS); 5544 if (!chunk) { 5545 ret = -ENOMEM; 5546 btrfs_abort_transaction(trans, ret); 5547 goto out; 5548 } 5549 5550 for (i = 0; i < map->num_stripes; i++) { 5551 struct btrfs_device *device = map->stripes[i].dev; 5552 5553 ret = btrfs_update_device(trans, device); 5554 if (ret) 5555 goto out; 5556 } 5557 5558 stripe = &chunk->stripe; 5559 for (i = 0; i < map->num_stripes; i++) { 5560 struct btrfs_device *device = map->stripes[i].dev; 5561 const u64 dev_offset = map->stripes[i].physical; 5562 5563 btrfs_set_stack_stripe_devid(stripe, device->devid); 5564 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5565 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5566 stripe++; 5567 } 5568 5569 btrfs_set_stack_chunk_length(chunk, bg->length); 5570 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5571 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5572 btrfs_set_stack_chunk_type(chunk, map->type); 5573 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5574 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5575 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5576 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5577 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5578 5579 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5580 key.type = BTRFS_CHUNK_ITEM_KEY; 5581 key.offset = bg->start; 5582 5583 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5584 if (ret) 5585 goto out; 5586 5587 bg->chunk_item_inserted = 1; 5588 5589 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5590 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5591 if (ret) 5592 goto out; 5593 } 5594 5595 out: 5596 kfree(chunk); 5597 free_extent_map(em); 5598 return ret; 5599 } 5600 5601 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5602 { 5603 struct btrfs_fs_info *fs_info = trans->fs_info; 5604 u64 alloc_profile; 5605 struct btrfs_block_group *meta_bg; 5606 struct btrfs_block_group *sys_bg; 5607 5608 /* 5609 * When adding a new device for sprouting, the seed device is read-only 5610 * so we must first allocate a metadata and a system chunk. But before 5611 * adding the block group items to the extent, device and chunk btrees, 5612 * we must first: 5613 * 5614 * 1) Create both chunks without doing any changes to the btrees, as 5615 * otherwise we would get -ENOSPC since the block groups from the 5616 * seed device are read-only; 5617 * 5618 * 2) Add the device item for the new sprout device - finishing the setup 5619 * of a new block group requires updating the device item in the chunk 5620 * btree, so it must exist when we attempt to do it. The previous step 5621 * ensures this does not fail with -ENOSPC. 5622 * 5623 * After that we can add the block group items to their btrees: 5624 * update existing device item in the chunk btree, add a new block group 5625 * item to the extent btree, add a new chunk item to the chunk btree and 5626 * finally add the new device extent items to the devices btree. 5627 */ 5628 5629 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5630 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5631 if (IS_ERR(meta_bg)) 5632 return PTR_ERR(meta_bg); 5633 5634 alloc_profile = btrfs_system_alloc_profile(fs_info); 5635 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5636 if (IS_ERR(sys_bg)) 5637 return PTR_ERR(sys_bg); 5638 5639 return 0; 5640 } 5641 5642 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5643 { 5644 const int index = btrfs_bg_flags_to_raid_index(map->type); 5645 5646 return btrfs_raid_array[index].tolerated_failures; 5647 } 5648 5649 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5650 { 5651 struct extent_map *em; 5652 struct map_lookup *map; 5653 int miss_ndevs = 0; 5654 int i; 5655 bool ret = true; 5656 5657 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5658 if (IS_ERR(em)) 5659 return false; 5660 5661 map = em->map_lookup; 5662 for (i = 0; i < map->num_stripes; i++) { 5663 if (test_bit(BTRFS_DEV_STATE_MISSING, 5664 &map->stripes[i].dev->dev_state)) { 5665 miss_ndevs++; 5666 continue; 5667 } 5668 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5669 &map->stripes[i].dev->dev_state)) { 5670 ret = false; 5671 goto end; 5672 } 5673 } 5674 5675 /* 5676 * If the number of missing devices is larger than max errors, we can 5677 * not write the data into that chunk successfully. 5678 */ 5679 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5680 ret = false; 5681 end: 5682 free_extent_map(em); 5683 return ret; 5684 } 5685 5686 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5687 { 5688 struct extent_map *em; 5689 5690 while (1) { 5691 write_lock(&tree->lock); 5692 em = lookup_extent_mapping(tree, 0, (u64)-1); 5693 if (em) 5694 remove_extent_mapping(tree, em); 5695 write_unlock(&tree->lock); 5696 if (!em) 5697 break; 5698 /* once for us */ 5699 free_extent_map(em); 5700 /* once for the tree */ 5701 free_extent_map(em); 5702 } 5703 } 5704 5705 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5706 { 5707 struct extent_map *em; 5708 struct map_lookup *map; 5709 int ret; 5710 5711 em = btrfs_get_chunk_map(fs_info, logical, len); 5712 if (IS_ERR(em)) 5713 /* 5714 * We could return errors for these cases, but that could get 5715 * ugly and we'd probably do the same thing which is just not do 5716 * anything else and exit, so return 1 so the callers don't try 5717 * to use other copies. 5718 */ 5719 return 1; 5720 5721 map = em->map_lookup; 5722 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5723 ret = map->num_stripes; 5724 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5725 ret = map->sub_stripes; 5726 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5727 ret = 2; 5728 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5729 /* 5730 * There could be two corrupted data stripes, we need 5731 * to loop retry in order to rebuild the correct data. 5732 * 5733 * Fail a stripe at a time on every retry except the 5734 * stripe under reconstruction. 5735 */ 5736 ret = map->num_stripes; 5737 else 5738 ret = 1; 5739 free_extent_map(em); 5740 5741 down_read(&fs_info->dev_replace.rwsem); 5742 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5743 fs_info->dev_replace.tgtdev) 5744 ret++; 5745 up_read(&fs_info->dev_replace.rwsem); 5746 5747 return ret; 5748 } 5749 5750 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5751 u64 logical) 5752 { 5753 struct extent_map *em; 5754 struct map_lookup *map; 5755 unsigned long len = fs_info->sectorsize; 5756 5757 em = btrfs_get_chunk_map(fs_info, logical, len); 5758 5759 if (!WARN_ON(IS_ERR(em))) { 5760 map = em->map_lookup; 5761 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5762 len = map->stripe_len * nr_data_stripes(map); 5763 free_extent_map(em); 5764 } 5765 return len; 5766 } 5767 5768 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5769 { 5770 struct extent_map *em; 5771 struct map_lookup *map; 5772 int ret = 0; 5773 5774 em = btrfs_get_chunk_map(fs_info, logical, len); 5775 5776 if(!WARN_ON(IS_ERR(em))) { 5777 map = em->map_lookup; 5778 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5779 ret = 1; 5780 free_extent_map(em); 5781 } 5782 return ret; 5783 } 5784 5785 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5786 struct map_lookup *map, int first, 5787 int dev_replace_is_ongoing) 5788 { 5789 int i; 5790 int num_stripes; 5791 int preferred_mirror; 5792 int tolerance; 5793 struct btrfs_device *srcdev; 5794 5795 ASSERT((map->type & 5796 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5797 5798 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5799 num_stripes = map->sub_stripes; 5800 else 5801 num_stripes = map->num_stripes; 5802 5803 switch (fs_info->fs_devices->read_policy) { 5804 default: 5805 /* Shouldn't happen, just warn and use pid instead of failing */ 5806 btrfs_warn_rl(fs_info, 5807 "unknown read_policy type %u, reset to pid", 5808 fs_info->fs_devices->read_policy); 5809 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5810 fallthrough; 5811 case BTRFS_READ_POLICY_PID: 5812 preferred_mirror = first + (current->pid % num_stripes); 5813 break; 5814 } 5815 5816 if (dev_replace_is_ongoing && 5817 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5818 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5819 srcdev = fs_info->dev_replace.srcdev; 5820 else 5821 srcdev = NULL; 5822 5823 /* 5824 * try to avoid the drive that is the source drive for a 5825 * dev-replace procedure, only choose it if no other non-missing 5826 * mirror is available 5827 */ 5828 for (tolerance = 0; tolerance < 2; tolerance++) { 5829 if (map->stripes[preferred_mirror].dev->bdev && 5830 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5831 return preferred_mirror; 5832 for (i = first; i < first + num_stripes; i++) { 5833 if (map->stripes[i].dev->bdev && 5834 (tolerance || map->stripes[i].dev != srcdev)) 5835 return i; 5836 } 5837 } 5838 5839 /* we couldn't find one that doesn't fail. Just return something 5840 * and the io error handling code will clean up eventually 5841 */ 5842 return preferred_mirror; 5843 } 5844 5845 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5846 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5847 { 5848 int i; 5849 int again = 1; 5850 5851 while (again) { 5852 again = 0; 5853 for (i = 0; i < num_stripes - 1; i++) { 5854 /* Swap if parity is on a smaller index */ 5855 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5856 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5857 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5858 again = 1; 5859 } 5860 } 5861 } 5862 } 5863 5864 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5865 int total_stripes, 5866 int real_stripes) 5867 { 5868 struct btrfs_io_context *bioc = kzalloc( 5869 /* The size of btrfs_io_context */ 5870 sizeof(struct btrfs_io_context) + 5871 /* Plus the variable array for the stripes */ 5872 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5873 /* Plus the variable array for the tgt dev */ 5874 sizeof(int) * (real_stripes) + 5875 /* 5876 * Plus the raid_map, which includes both the tgt dev 5877 * and the stripes. 5878 */ 5879 sizeof(u64) * (total_stripes), 5880 GFP_NOFS|__GFP_NOFAIL); 5881 5882 atomic_set(&bioc->error, 0); 5883 refcount_set(&bioc->refs, 1); 5884 5885 bioc->fs_info = fs_info; 5886 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5887 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5888 5889 return bioc; 5890 } 5891 5892 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5893 { 5894 WARN_ON(!refcount_read(&bioc->refs)); 5895 refcount_inc(&bioc->refs); 5896 } 5897 5898 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5899 { 5900 if (!bioc) 5901 return; 5902 if (refcount_dec_and_test(&bioc->refs)) 5903 kfree(bioc); 5904 } 5905 5906 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5907 /* 5908 * Please note that, discard won't be sent to target device of device 5909 * replace. 5910 */ 5911 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5912 u64 logical, u64 *length_ret, 5913 struct btrfs_io_context **bioc_ret) 5914 { 5915 struct extent_map *em; 5916 struct map_lookup *map; 5917 struct btrfs_io_context *bioc; 5918 u64 length = *length_ret; 5919 u64 offset; 5920 u64 stripe_nr; 5921 u64 stripe_nr_end; 5922 u64 stripe_end_offset; 5923 u64 stripe_cnt; 5924 u64 stripe_len; 5925 u64 stripe_offset; 5926 u64 num_stripes; 5927 u32 stripe_index; 5928 u32 factor = 0; 5929 u32 sub_stripes = 0; 5930 u64 stripes_per_dev = 0; 5931 u32 remaining_stripes = 0; 5932 u32 last_stripe = 0; 5933 int ret = 0; 5934 int i; 5935 5936 /* Discard always returns a bioc. */ 5937 ASSERT(bioc_ret); 5938 5939 em = btrfs_get_chunk_map(fs_info, logical, length); 5940 if (IS_ERR(em)) 5941 return PTR_ERR(em); 5942 5943 map = em->map_lookup; 5944 /* we don't discard raid56 yet */ 5945 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5946 ret = -EOPNOTSUPP; 5947 goto out; 5948 } 5949 5950 offset = logical - em->start; 5951 length = min_t(u64, em->start + em->len - logical, length); 5952 *length_ret = length; 5953 5954 stripe_len = map->stripe_len; 5955 /* 5956 * stripe_nr counts the total number of stripes we have to stride 5957 * to get to this block 5958 */ 5959 stripe_nr = div64_u64(offset, stripe_len); 5960 5961 /* stripe_offset is the offset of this block in its stripe */ 5962 stripe_offset = offset - stripe_nr * stripe_len; 5963 5964 stripe_nr_end = round_up(offset + length, map->stripe_len); 5965 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5966 stripe_cnt = stripe_nr_end - stripe_nr; 5967 stripe_end_offset = stripe_nr_end * map->stripe_len - 5968 (offset + length); 5969 /* 5970 * after this, stripe_nr is the number of stripes on this 5971 * device we have to walk to find the data, and stripe_index is 5972 * the number of our device in the stripe array 5973 */ 5974 num_stripes = 1; 5975 stripe_index = 0; 5976 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5977 BTRFS_BLOCK_GROUP_RAID10)) { 5978 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5979 sub_stripes = 1; 5980 else 5981 sub_stripes = map->sub_stripes; 5982 5983 factor = map->num_stripes / sub_stripes; 5984 num_stripes = min_t(u64, map->num_stripes, 5985 sub_stripes * stripe_cnt); 5986 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5987 stripe_index *= sub_stripes; 5988 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5989 &remaining_stripes); 5990 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5991 last_stripe *= sub_stripes; 5992 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 5993 BTRFS_BLOCK_GROUP_DUP)) { 5994 num_stripes = map->num_stripes; 5995 } else { 5996 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5997 &stripe_index); 5998 } 5999 6000 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6001 if (!bioc) { 6002 ret = -ENOMEM; 6003 goto out; 6004 } 6005 6006 for (i = 0; i < num_stripes; i++) { 6007 bioc->stripes[i].physical = 6008 map->stripes[stripe_index].physical + 6009 stripe_offset + stripe_nr * map->stripe_len; 6010 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6011 6012 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6013 BTRFS_BLOCK_GROUP_RAID10)) { 6014 bioc->stripes[i].length = stripes_per_dev * 6015 map->stripe_len; 6016 6017 if (i / sub_stripes < remaining_stripes) 6018 bioc->stripes[i].length += map->stripe_len; 6019 6020 /* 6021 * Special for the first stripe and 6022 * the last stripe: 6023 * 6024 * |-------|...|-------| 6025 * |----------| 6026 * off end_off 6027 */ 6028 if (i < sub_stripes) 6029 bioc->stripes[i].length -= stripe_offset; 6030 6031 if (stripe_index >= last_stripe && 6032 stripe_index <= (last_stripe + 6033 sub_stripes - 1)) 6034 bioc->stripes[i].length -= stripe_end_offset; 6035 6036 if (i == sub_stripes - 1) 6037 stripe_offset = 0; 6038 } else { 6039 bioc->stripes[i].length = length; 6040 } 6041 6042 stripe_index++; 6043 if (stripe_index == map->num_stripes) { 6044 stripe_index = 0; 6045 stripe_nr++; 6046 } 6047 } 6048 6049 *bioc_ret = bioc; 6050 bioc->map_type = map->type; 6051 bioc->num_stripes = num_stripes; 6052 out: 6053 free_extent_map(em); 6054 return ret; 6055 } 6056 6057 /* 6058 * In dev-replace case, for repair case (that's the only case where the mirror 6059 * is selected explicitly when calling btrfs_map_block), blocks left of the 6060 * left cursor can also be read from the target drive. 6061 * 6062 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6063 * array of stripes. 6064 * For READ, it also needs to be supported using the same mirror number. 6065 * 6066 * If the requested block is not left of the left cursor, EIO is returned. This 6067 * can happen because btrfs_num_copies() returns one more in the dev-replace 6068 * case. 6069 */ 6070 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6071 u64 logical, u64 length, 6072 u64 srcdev_devid, int *mirror_num, 6073 u64 *physical) 6074 { 6075 struct btrfs_io_context *bioc = NULL; 6076 int num_stripes; 6077 int index_srcdev = 0; 6078 int found = 0; 6079 u64 physical_of_found = 0; 6080 int i; 6081 int ret = 0; 6082 6083 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6084 logical, &length, &bioc, 0, 0); 6085 if (ret) { 6086 ASSERT(bioc == NULL); 6087 return ret; 6088 } 6089 6090 num_stripes = bioc->num_stripes; 6091 if (*mirror_num > num_stripes) { 6092 /* 6093 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6094 * that means that the requested area is not left of the left 6095 * cursor 6096 */ 6097 btrfs_put_bioc(bioc); 6098 return -EIO; 6099 } 6100 6101 /* 6102 * process the rest of the function using the mirror_num of the source 6103 * drive. Therefore look it up first. At the end, patch the device 6104 * pointer to the one of the target drive. 6105 */ 6106 for (i = 0; i < num_stripes; i++) { 6107 if (bioc->stripes[i].dev->devid != srcdev_devid) 6108 continue; 6109 6110 /* 6111 * In case of DUP, in order to keep it simple, only add the 6112 * mirror with the lowest physical address 6113 */ 6114 if (found && 6115 physical_of_found <= bioc->stripes[i].physical) 6116 continue; 6117 6118 index_srcdev = i; 6119 found = 1; 6120 physical_of_found = bioc->stripes[i].physical; 6121 } 6122 6123 btrfs_put_bioc(bioc); 6124 6125 ASSERT(found); 6126 if (!found) 6127 return -EIO; 6128 6129 *mirror_num = index_srcdev + 1; 6130 *physical = physical_of_found; 6131 return ret; 6132 } 6133 6134 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6135 { 6136 struct btrfs_block_group *cache; 6137 bool ret; 6138 6139 /* Non zoned filesystem does not use "to_copy" flag */ 6140 if (!btrfs_is_zoned(fs_info)) 6141 return false; 6142 6143 cache = btrfs_lookup_block_group(fs_info, logical); 6144 6145 spin_lock(&cache->lock); 6146 ret = cache->to_copy; 6147 spin_unlock(&cache->lock); 6148 6149 btrfs_put_block_group(cache); 6150 return ret; 6151 } 6152 6153 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6154 struct btrfs_io_context **bioc_ret, 6155 struct btrfs_dev_replace *dev_replace, 6156 u64 logical, 6157 int *num_stripes_ret, int *max_errors_ret) 6158 { 6159 struct btrfs_io_context *bioc = *bioc_ret; 6160 u64 srcdev_devid = dev_replace->srcdev->devid; 6161 int tgtdev_indexes = 0; 6162 int num_stripes = *num_stripes_ret; 6163 int max_errors = *max_errors_ret; 6164 int i; 6165 6166 if (op == BTRFS_MAP_WRITE) { 6167 int index_where_to_add; 6168 6169 /* 6170 * A block group which have "to_copy" set will eventually 6171 * copied by dev-replace process. We can avoid cloning IO here. 6172 */ 6173 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6174 return; 6175 6176 /* 6177 * duplicate the write operations while the dev replace 6178 * procedure is running. Since the copying of the old disk to 6179 * the new disk takes place at run time while the filesystem is 6180 * mounted writable, the regular write operations to the old 6181 * disk have to be duplicated to go to the new disk as well. 6182 * 6183 * Note that device->missing is handled by the caller, and that 6184 * the write to the old disk is already set up in the stripes 6185 * array. 6186 */ 6187 index_where_to_add = num_stripes; 6188 for (i = 0; i < num_stripes; i++) { 6189 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6190 /* write to new disk, too */ 6191 struct btrfs_io_stripe *new = 6192 bioc->stripes + index_where_to_add; 6193 struct btrfs_io_stripe *old = 6194 bioc->stripes + i; 6195 6196 new->physical = old->physical; 6197 new->length = old->length; 6198 new->dev = dev_replace->tgtdev; 6199 bioc->tgtdev_map[i] = index_where_to_add; 6200 index_where_to_add++; 6201 max_errors++; 6202 tgtdev_indexes++; 6203 } 6204 } 6205 num_stripes = index_where_to_add; 6206 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6207 int index_srcdev = 0; 6208 int found = 0; 6209 u64 physical_of_found = 0; 6210 6211 /* 6212 * During the dev-replace procedure, the target drive can also 6213 * be used to read data in case it is needed to repair a corrupt 6214 * block elsewhere. This is possible if the requested area is 6215 * left of the left cursor. In this area, the target drive is a 6216 * full copy of the source drive. 6217 */ 6218 for (i = 0; i < num_stripes; i++) { 6219 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6220 /* 6221 * In case of DUP, in order to keep it simple, 6222 * only add the mirror with the lowest physical 6223 * address 6224 */ 6225 if (found && 6226 physical_of_found <= bioc->stripes[i].physical) 6227 continue; 6228 index_srcdev = i; 6229 found = 1; 6230 physical_of_found = bioc->stripes[i].physical; 6231 } 6232 } 6233 if (found) { 6234 struct btrfs_io_stripe *tgtdev_stripe = 6235 bioc->stripes + num_stripes; 6236 6237 tgtdev_stripe->physical = physical_of_found; 6238 tgtdev_stripe->length = 6239 bioc->stripes[index_srcdev].length; 6240 tgtdev_stripe->dev = dev_replace->tgtdev; 6241 bioc->tgtdev_map[index_srcdev] = num_stripes; 6242 6243 tgtdev_indexes++; 6244 num_stripes++; 6245 } 6246 } 6247 6248 *num_stripes_ret = num_stripes; 6249 *max_errors_ret = max_errors; 6250 bioc->num_tgtdevs = tgtdev_indexes; 6251 *bioc_ret = bioc; 6252 } 6253 6254 static bool need_full_stripe(enum btrfs_map_op op) 6255 { 6256 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6257 } 6258 6259 /* 6260 * Calculate the geometry of a particular (address, len) tuple. This 6261 * information is used to calculate how big a particular bio can get before it 6262 * straddles a stripe. 6263 * 6264 * @fs_info: the filesystem 6265 * @em: mapping containing the logical extent 6266 * @op: type of operation - write or read 6267 * @logical: address that we want to figure out the geometry of 6268 * @io_geom: pointer used to return values 6269 * 6270 * Returns < 0 in case a chunk for the given logical address cannot be found, 6271 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6272 */ 6273 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6274 enum btrfs_map_op op, u64 logical, 6275 struct btrfs_io_geometry *io_geom) 6276 { 6277 struct map_lookup *map; 6278 u64 len; 6279 u64 offset; 6280 u64 stripe_offset; 6281 u64 stripe_nr; 6282 u64 stripe_len; 6283 u64 raid56_full_stripe_start = (u64)-1; 6284 int data_stripes; 6285 6286 ASSERT(op != BTRFS_MAP_DISCARD); 6287 6288 map = em->map_lookup; 6289 /* Offset of this logical address in the chunk */ 6290 offset = logical - em->start; 6291 /* Len of a stripe in a chunk */ 6292 stripe_len = map->stripe_len; 6293 /* Stripe where this block falls in */ 6294 stripe_nr = div64_u64(offset, stripe_len); 6295 /* Offset of stripe in the chunk */ 6296 stripe_offset = stripe_nr * stripe_len; 6297 if (offset < stripe_offset) { 6298 btrfs_crit(fs_info, 6299 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6300 stripe_offset, offset, em->start, logical, stripe_len); 6301 return -EINVAL; 6302 } 6303 6304 /* stripe_offset is the offset of this block in its stripe */ 6305 stripe_offset = offset - stripe_offset; 6306 data_stripes = nr_data_stripes(map); 6307 6308 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6309 u64 max_len = stripe_len - stripe_offset; 6310 6311 /* 6312 * In case of raid56, we need to know the stripe aligned start 6313 */ 6314 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6315 unsigned long full_stripe_len = stripe_len * data_stripes; 6316 raid56_full_stripe_start = offset; 6317 6318 /* 6319 * Allow a write of a full stripe, but make sure we 6320 * don't allow straddling of stripes 6321 */ 6322 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6323 full_stripe_len); 6324 raid56_full_stripe_start *= full_stripe_len; 6325 6326 /* 6327 * For writes to RAID[56], allow a full stripeset across 6328 * all disks. For other RAID types and for RAID[56] 6329 * reads, just allow a single stripe (on a single disk). 6330 */ 6331 if (op == BTRFS_MAP_WRITE) { 6332 max_len = stripe_len * data_stripes - 6333 (offset - raid56_full_stripe_start); 6334 } 6335 } 6336 len = min_t(u64, em->len - offset, max_len); 6337 } else { 6338 len = em->len - offset; 6339 } 6340 6341 io_geom->len = len; 6342 io_geom->offset = offset; 6343 io_geom->stripe_len = stripe_len; 6344 io_geom->stripe_nr = stripe_nr; 6345 io_geom->stripe_offset = stripe_offset; 6346 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6347 6348 return 0; 6349 } 6350 6351 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6352 enum btrfs_map_op op, 6353 u64 logical, u64 *length, 6354 struct btrfs_io_context **bioc_ret, 6355 int mirror_num, int need_raid_map) 6356 { 6357 struct extent_map *em; 6358 struct map_lookup *map; 6359 u64 stripe_offset; 6360 u64 stripe_nr; 6361 u64 stripe_len; 6362 u32 stripe_index; 6363 int data_stripes; 6364 int i; 6365 int ret = 0; 6366 int num_stripes; 6367 int max_errors = 0; 6368 int tgtdev_indexes = 0; 6369 struct btrfs_io_context *bioc = NULL; 6370 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6371 int dev_replace_is_ongoing = 0; 6372 int num_alloc_stripes; 6373 int patch_the_first_stripe_for_dev_replace = 0; 6374 u64 physical_to_patch_in_first_stripe = 0; 6375 u64 raid56_full_stripe_start = (u64)-1; 6376 struct btrfs_io_geometry geom; 6377 6378 ASSERT(bioc_ret); 6379 ASSERT(op != BTRFS_MAP_DISCARD); 6380 6381 em = btrfs_get_chunk_map(fs_info, logical, *length); 6382 ASSERT(!IS_ERR(em)); 6383 6384 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6385 if (ret < 0) 6386 return ret; 6387 6388 map = em->map_lookup; 6389 6390 *length = geom.len; 6391 stripe_len = geom.stripe_len; 6392 stripe_nr = geom.stripe_nr; 6393 stripe_offset = geom.stripe_offset; 6394 raid56_full_stripe_start = geom.raid56_stripe_offset; 6395 data_stripes = nr_data_stripes(map); 6396 6397 down_read(&dev_replace->rwsem); 6398 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6399 /* 6400 * Hold the semaphore for read during the whole operation, write is 6401 * requested at commit time but must wait. 6402 */ 6403 if (!dev_replace_is_ongoing) 6404 up_read(&dev_replace->rwsem); 6405 6406 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6407 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6408 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6409 dev_replace->srcdev->devid, 6410 &mirror_num, 6411 &physical_to_patch_in_first_stripe); 6412 if (ret) 6413 goto out; 6414 else 6415 patch_the_first_stripe_for_dev_replace = 1; 6416 } else if (mirror_num > map->num_stripes) { 6417 mirror_num = 0; 6418 } 6419 6420 num_stripes = 1; 6421 stripe_index = 0; 6422 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6423 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6424 &stripe_index); 6425 if (!need_full_stripe(op)) 6426 mirror_num = 1; 6427 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6428 if (need_full_stripe(op)) 6429 num_stripes = map->num_stripes; 6430 else if (mirror_num) 6431 stripe_index = mirror_num - 1; 6432 else { 6433 stripe_index = find_live_mirror(fs_info, map, 0, 6434 dev_replace_is_ongoing); 6435 mirror_num = stripe_index + 1; 6436 } 6437 6438 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6439 if (need_full_stripe(op)) { 6440 num_stripes = map->num_stripes; 6441 } else if (mirror_num) { 6442 stripe_index = mirror_num - 1; 6443 } else { 6444 mirror_num = 1; 6445 } 6446 6447 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6448 u32 factor = map->num_stripes / map->sub_stripes; 6449 6450 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6451 stripe_index *= map->sub_stripes; 6452 6453 if (need_full_stripe(op)) 6454 num_stripes = map->sub_stripes; 6455 else if (mirror_num) 6456 stripe_index += mirror_num - 1; 6457 else { 6458 int old_stripe_index = stripe_index; 6459 stripe_index = find_live_mirror(fs_info, map, 6460 stripe_index, 6461 dev_replace_is_ongoing); 6462 mirror_num = stripe_index - old_stripe_index + 1; 6463 } 6464 6465 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6466 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6467 /* push stripe_nr back to the start of the full stripe */ 6468 stripe_nr = div64_u64(raid56_full_stripe_start, 6469 stripe_len * data_stripes); 6470 6471 /* RAID[56] write or recovery. Return all stripes */ 6472 num_stripes = map->num_stripes; 6473 max_errors = nr_parity_stripes(map); 6474 6475 *length = map->stripe_len; 6476 stripe_index = 0; 6477 stripe_offset = 0; 6478 } else { 6479 /* 6480 * Mirror #0 or #1 means the original data block. 6481 * Mirror #2 is RAID5 parity block. 6482 * Mirror #3 is RAID6 Q block. 6483 */ 6484 stripe_nr = div_u64_rem(stripe_nr, 6485 data_stripes, &stripe_index); 6486 if (mirror_num > 1) 6487 stripe_index = data_stripes + mirror_num - 2; 6488 6489 /* We distribute the parity blocks across stripes */ 6490 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6491 &stripe_index); 6492 if (!need_full_stripe(op) && mirror_num <= 1) 6493 mirror_num = 1; 6494 } 6495 } else { 6496 /* 6497 * after this, stripe_nr is the number of stripes on this 6498 * device we have to walk to find the data, and stripe_index is 6499 * the number of our device in the stripe array 6500 */ 6501 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6502 &stripe_index); 6503 mirror_num = stripe_index + 1; 6504 } 6505 if (stripe_index >= map->num_stripes) { 6506 btrfs_crit(fs_info, 6507 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6508 stripe_index, map->num_stripes); 6509 ret = -EINVAL; 6510 goto out; 6511 } 6512 6513 num_alloc_stripes = num_stripes; 6514 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6515 if (op == BTRFS_MAP_WRITE) 6516 num_alloc_stripes <<= 1; 6517 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6518 num_alloc_stripes++; 6519 tgtdev_indexes = num_stripes; 6520 } 6521 6522 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6523 if (!bioc) { 6524 ret = -ENOMEM; 6525 goto out; 6526 } 6527 6528 for (i = 0; i < num_stripes; i++) { 6529 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6530 stripe_offset + stripe_nr * map->stripe_len; 6531 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6532 stripe_index++; 6533 } 6534 6535 /* Build raid_map */ 6536 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6537 (need_full_stripe(op) || mirror_num > 1)) { 6538 u64 tmp; 6539 unsigned rot; 6540 6541 /* Work out the disk rotation on this stripe-set */ 6542 div_u64_rem(stripe_nr, num_stripes, &rot); 6543 6544 /* Fill in the logical address of each stripe */ 6545 tmp = stripe_nr * data_stripes; 6546 for (i = 0; i < data_stripes; i++) 6547 bioc->raid_map[(i + rot) % num_stripes] = 6548 em->start + (tmp + i) * map->stripe_len; 6549 6550 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6551 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6552 bioc->raid_map[(i + rot + 1) % num_stripes] = 6553 RAID6_Q_STRIPE; 6554 6555 sort_parity_stripes(bioc, num_stripes); 6556 } 6557 6558 if (need_full_stripe(op)) 6559 max_errors = btrfs_chunk_max_errors(map); 6560 6561 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6562 need_full_stripe(op)) { 6563 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6564 &num_stripes, &max_errors); 6565 } 6566 6567 *bioc_ret = bioc; 6568 bioc->map_type = map->type; 6569 bioc->num_stripes = num_stripes; 6570 bioc->max_errors = max_errors; 6571 bioc->mirror_num = mirror_num; 6572 6573 /* 6574 * this is the case that REQ_READ && dev_replace_is_ongoing && 6575 * mirror_num == num_stripes + 1 && dev_replace target drive is 6576 * available as a mirror 6577 */ 6578 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6579 WARN_ON(num_stripes > 1); 6580 bioc->stripes[0].dev = dev_replace->tgtdev; 6581 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6582 bioc->mirror_num = map->num_stripes + 1; 6583 } 6584 out: 6585 if (dev_replace_is_ongoing) { 6586 lockdep_assert_held(&dev_replace->rwsem); 6587 /* Unlock and let waiting writers proceed */ 6588 up_read(&dev_replace->rwsem); 6589 } 6590 free_extent_map(em); 6591 return ret; 6592 } 6593 6594 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6595 u64 logical, u64 *length, 6596 struct btrfs_io_context **bioc_ret, int mirror_num) 6597 { 6598 if (op == BTRFS_MAP_DISCARD) 6599 return __btrfs_map_block_for_discard(fs_info, logical, 6600 length, bioc_ret); 6601 6602 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6603 mirror_num, 0); 6604 } 6605 6606 /* For Scrub/replace */ 6607 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6608 u64 logical, u64 *length, 6609 struct btrfs_io_context **bioc_ret) 6610 { 6611 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6612 } 6613 6614 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6615 { 6616 bio->bi_private = bioc->private; 6617 bio->bi_end_io = bioc->end_io; 6618 bio_endio(bio); 6619 6620 btrfs_put_bioc(bioc); 6621 } 6622 6623 static void btrfs_end_bio(struct bio *bio) 6624 { 6625 struct btrfs_io_context *bioc = bio->bi_private; 6626 int is_orig_bio = 0; 6627 6628 if (bio->bi_status) { 6629 atomic_inc(&bioc->error); 6630 if (bio->bi_status == BLK_STS_IOERR || 6631 bio->bi_status == BLK_STS_TARGET) { 6632 struct btrfs_device *dev = btrfs_bio(bio)->device; 6633 6634 ASSERT(dev->bdev); 6635 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6636 btrfs_dev_stat_inc_and_print(dev, 6637 BTRFS_DEV_STAT_WRITE_ERRS); 6638 else if (!(bio->bi_opf & REQ_RAHEAD)) 6639 btrfs_dev_stat_inc_and_print(dev, 6640 BTRFS_DEV_STAT_READ_ERRS); 6641 if (bio->bi_opf & REQ_PREFLUSH) 6642 btrfs_dev_stat_inc_and_print(dev, 6643 BTRFS_DEV_STAT_FLUSH_ERRS); 6644 } 6645 } 6646 6647 if (bio == bioc->orig_bio) 6648 is_orig_bio = 1; 6649 6650 btrfs_bio_counter_dec(bioc->fs_info); 6651 6652 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6653 if (!is_orig_bio) { 6654 bio_put(bio); 6655 bio = bioc->orig_bio; 6656 } 6657 6658 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6659 /* only send an error to the higher layers if it is 6660 * beyond the tolerance of the btrfs bio 6661 */ 6662 if (atomic_read(&bioc->error) > bioc->max_errors) { 6663 bio->bi_status = BLK_STS_IOERR; 6664 } else { 6665 /* 6666 * this bio is actually up to date, we didn't 6667 * go over the max number of errors 6668 */ 6669 bio->bi_status = BLK_STS_OK; 6670 } 6671 6672 btrfs_end_bioc(bioc, bio); 6673 } else if (!is_orig_bio) { 6674 bio_put(bio); 6675 } 6676 } 6677 6678 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6679 u64 physical, struct btrfs_device *dev) 6680 { 6681 struct btrfs_fs_info *fs_info = bioc->fs_info; 6682 6683 bio->bi_private = bioc; 6684 btrfs_bio(bio)->device = dev; 6685 bio->bi_end_io = btrfs_end_bio; 6686 bio->bi_iter.bi_sector = physical >> 9; 6687 /* 6688 * For zone append writing, bi_sector must point the beginning of the 6689 * zone 6690 */ 6691 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6692 if (btrfs_dev_is_sequential(dev, physical)) { 6693 u64 zone_start = round_down(physical, fs_info->zone_size); 6694 6695 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6696 } else { 6697 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6698 bio->bi_opf |= REQ_OP_WRITE; 6699 } 6700 } 6701 btrfs_debug_in_rcu(fs_info, 6702 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6703 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6704 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6705 dev->devid, bio->bi_iter.bi_size); 6706 bio_set_dev(bio, dev->bdev); 6707 6708 btrfs_bio_counter_inc_noblocked(fs_info); 6709 6710 btrfsic_submit_bio(bio); 6711 } 6712 6713 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6714 { 6715 atomic_inc(&bioc->error); 6716 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6717 /* Should be the original bio. */ 6718 WARN_ON(bio != bioc->orig_bio); 6719 6720 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6721 bio->bi_iter.bi_sector = logical >> 9; 6722 if (atomic_read(&bioc->error) > bioc->max_errors) 6723 bio->bi_status = BLK_STS_IOERR; 6724 else 6725 bio->bi_status = BLK_STS_OK; 6726 btrfs_end_bioc(bioc, bio); 6727 } 6728 } 6729 6730 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6731 int mirror_num) 6732 { 6733 struct btrfs_device *dev; 6734 struct bio *first_bio = bio; 6735 u64 logical = bio->bi_iter.bi_sector << 9; 6736 u64 length = 0; 6737 u64 map_length; 6738 int ret; 6739 int dev_nr; 6740 int total_devs; 6741 struct btrfs_io_context *bioc = NULL; 6742 6743 length = bio->bi_iter.bi_size; 6744 map_length = length; 6745 6746 btrfs_bio_counter_inc_blocked(fs_info); 6747 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6748 &map_length, &bioc, mirror_num, 1); 6749 if (ret) { 6750 btrfs_bio_counter_dec(fs_info); 6751 return errno_to_blk_status(ret); 6752 } 6753 6754 total_devs = bioc->num_stripes; 6755 bioc->orig_bio = first_bio; 6756 bioc->private = first_bio->bi_private; 6757 bioc->end_io = first_bio->bi_end_io; 6758 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6759 6760 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6761 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6762 /* In this case, map_length has been set to the length of 6763 a single stripe; not the whole write */ 6764 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6765 ret = raid56_parity_write(bio, bioc, map_length); 6766 } else { 6767 ret = raid56_parity_recover(bio, bioc, map_length, 6768 mirror_num, 1); 6769 } 6770 6771 btrfs_bio_counter_dec(fs_info); 6772 return errno_to_blk_status(ret); 6773 } 6774 6775 if (map_length < length) { 6776 btrfs_crit(fs_info, 6777 "mapping failed logical %llu bio len %llu len %llu", 6778 logical, length, map_length); 6779 BUG(); 6780 } 6781 6782 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6783 dev = bioc->stripes[dev_nr].dev; 6784 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6785 &dev->dev_state) || 6786 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6787 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6788 bioc_error(bioc, first_bio, logical); 6789 continue; 6790 } 6791 6792 if (dev_nr < total_devs - 1) 6793 bio = btrfs_bio_clone(first_bio); 6794 else 6795 bio = first_bio; 6796 6797 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6798 } 6799 btrfs_bio_counter_dec(fs_info); 6800 return BLK_STS_OK; 6801 } 6802 6803 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6804 const struct btrfs_fs_devices *fs_devices) 6805 { 6806 if (args->fsid == NULL) 6807 return true; 6808 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6809 return true; 6810 return false; 6811 } 6812 6813 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6814 const struct btrfs_device *device) 6815 { 6816 ASSERT((args->devid != (u64)-1) || args->missing); 6817 6818 if ((args->devid != (u64)-1) && device->devid != args->devid) 6819 return false; 6820 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6821 return false; 6822 if (!args->missing) 6823 return true; 6824 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6825 !device->bdev) 6826 return true; 6827 return false; 6828 } 6829 6830 /* 6831 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6832 * return NULL. 6833 * 6834 * If devid and uuid are both specified, the match must be exact, otherwise 6835 * only devid is used. 6836 */ 6837 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6838 const struct btrfs_dev_lookup_args *args) 6839 { 6840 struct btrfs_device *device; 6841 struct btrfs_fs_devices *seed_devs; 6842 6843 if (dev_args_match_fs_devices(args, fs_devices)) { 6844 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6845 if (dev_args_match_device(args, device)) 6846 return device; 6847 } 6848 } 6849 6850 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6851 if (!dev_args_match_fs_devices(args, seed_devs)) 6852 continue; 6853 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6854 if (dev_args_match_device(args, device)) 6855 return device; 6856 } 6857 } 6858 6859 return NULL; 6860 } 6861 6862 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6863 u64 devid, u8 *dev_uuid) 6864 { 6865 struct btrfs_device *device; 6866 unsigned int nofs_flag; 6867 6868 /* 6869 * We call this under the chunk_mutex, so we want to use NOFS for this 6870 * allocation, however we don't want to change btrfs_alloc_device() to 6871 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6872 * places. 6873 */ 6874 nofs_flag = memalloc_nofs_save(); 6875 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6876 memalloc_nofs_restore(nofs_flag); 6877 if (IS_ERR(device)) 6878 return device; 6879 6880 list_add(&device->dev_list, &fs_devices->devices); 6881 device->fs_devices = fs_devices; 6882 fs_devices->num_devices++; 6883 6884 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6885 fs_devices->missing_devices++; 6886 6887 return device; 6888 } 6889 6890 /** 6891 * btrfs_alloc_device - allocate struct btrfs_device 6892 * @fs_info: used only for generating a new devid, can be NULL if 6893 * devid is provided (i.e. @devid != NULL). 6894 * @devid: a pointer to devid for this device. If NULL a new devid 6895 * is generated. 6896 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6897 * is generated. 6898 * 6899 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6900 * on error. Returned struct is not linked onto any lists and must be 6901 * destroyed with btrfs_free_device. 6902 */ 6903 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6904 const u64 *devid, 6905 const u8 *uuid) 6906 { 6907 struct btrfs_device *dev; 6908 u64 tmp; 6909 6910 if (WARN_ON(!devid && !fs_info)) 6911 return ERR_PTR(-EINVAL); 6912 6913 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6914 if (!dev) 6915 return ERR_PTR(-ENOMEM); 6916 6917 /* 6918 * Preallocate a bio that's always going to be used for flushing device 6919 * barriers and matches the device lifespan 6920 */ 6921 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6922 if (!dev->flush_bio) { 6923 kfree(dev); 6924 return ERR_PTR(-ENOMEM); 6925 } 6926 6927 INIT_LIST_HEAD(&dev->dev_list); 6928 INIT_LIST_HEAD(&dev->dev_alloc_list); 6929 INIT_LIST_HEAD(&dev->post_commit_list); 6930 6931 atomic_set(&dev->reada_in_flight, 0); 6932 atomic_set(&dev->dev_stats_ccnt, 0); 6933 btrfs_device_data_ordered_init(dev); 6934 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6935 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6936 extent_io_tree_init(fs_info, &dev->alloc_state, 6937 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6938 6939 if (devid) 6940 tmp = *devid; 6941 else { 6942 int ret; 6943 6944 ret = find_next_devid(fs_info, &tmp); 6945 if (ret) { 6946 btrfs_free_device(dev); 6947 return ERR_PTR(ret); 6948 } 6949 } 6950 dev->devid = tmp; 6951 6952 if (uuid) 6953 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6954 else 6955 generate_random_uuid(dev->uuid); 6956 6957 return dev; 6958 } 6959 6960 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6961 u64 devid, u8 *uuid, bool error) 6962 { 6963 if (error) 6964 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6965 devid, uuid); 6966 else 6967 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6968 devid, uuid); 6969 } 6970 6971 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6972 { 6973 const int data_stripes = calc_data_stripes(type, num_stripes); 6974 6975 return div_u64(chunk_len, data_stripes); 6976 } 6977 6978 #if BITS_PER_LONG == 32 6979 /* 6980 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6981 * can't be accessed on 32bit systems. 6982 * 6983 * This function do mount time check to reject the fs if it already has 6984 * metadata chunk beyond that limit. 6985 */ 6986 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6987 u64 logical, u64 length, u64 type) 6988 { 6989 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6990 return 0; 6991 6992 if (logical + length < MAX_LFS_FILESIZE) 6993 return 0; 6994 6995 btrfs_err_32bit_limit(fs_info); 6996 return -EOVERFLOW; 6997 } 6998 6999 /* 7000 * This is to give early warning for any metadata chunk reaching 7001 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7002 * Although we can still access the metadata, it's not going to be possible 7003 * once the limit is reached. 7004 */ 7005 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7006 u64 logical, u64 length, u64 type) 7007 { 7008 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7009 return; 7010 7011 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7012 return; 7013 7014 btrfs_warn_32bit_limit(fs_info); 7015 } 7016 #endif 7017 7018 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7019 struct btrfs_chunk *chunk) 7020 { 7021 BTRFS_DEV_LOOKUP_ARGS(args); 7022 struct btrfs_fs_info *fs_info = leaf->fs_info; 7023 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7024 struct map_lookup *map; 7025 struct extent_map *em; 7026 u64 logical; 7027 u64 length; 7028 u64 devid; 7029 u64 type; 7030 u8 uuid[BTRFS_UUID_SIZE]; 7031 int num_stripes; 7032 int ret; 7033 int i; 7034 7035 logical = key->offset; 7036 length = btrfs_chunk_length(leaf, chunk); 7037 type = btrfs_chunk_type(leaf, chunk); 7038 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7039 7040 #if BITS_PER_LONG == 32 7041 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7042 if (ret < 0) 7043 return ret; 7044 warn_32bit_meta_chunk(fs_info, logical, length, type); 7045 #endif 7046 7047 /* 7048 * Only need to verify chunk item if we're reading from sys chunk array, 7049 * as chunk item in tree block is already verified by tree-checker. 7050 */ 7051 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7052 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7053 if (ret) 7054 return ret; 7055 } 7056 7057 read_lock(&map_tree->lock); 7058 em = lookup_extent_mapping(map_tree, logical, 1); 7059 read_unlock(&map_tree->lock); 7060 7061 /* already mapped? */ 7062 if (em && em->start <= logical && em->start + em->len > logical) { 7063 free_extent_map(em); 7064 return 0; 7065 } else if (em) { 7066 free_extent_map(em); 7067 } 7068 7069 em = alloc_extent_map(); 7070 if (!em) 7071 return -ENOMEM; 7072 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7073 if (!map) { 7074 free_extent_map(em); 7075 return -ENOMEM; 7076 } 7077 7078 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7079 em->map_lookup = map; 7080 em->start = logical; 7081 em->len = length; 7082 em->orig_start = 0; 7083 em->block_start = 0; 7084 em->block_len = em->len; 7085 7086 map->num_stripes = num_stripes; 7087 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7088 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7089 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7090 map->type = type; 7091 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7092 map->verified_stripes = 0; 7093 em->orig_block_len = calc_stripe_length(type, em->len, 7094 map->num_stripes); 7095 for (i = 0; i < num_stripes; i++) { 7096 map->stripes[i].physical = 7097 btrfs_stripe_offset_nr(leaf, chunk, i); 7098 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7099 args.devid = devid; 7100 read_extent_buffer(leaf, uuid, (unsigned long) 7101 btrfs_stripe_dev_uuid_nr(chunk, i), 7102 BTRFS_UUID_SIZE); 7103 args.uuid = uuid; 7104 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7105 if (!map->stripes[i].dev && 7106 !btrfs_test_opt(fs_info, DEGRADED)) { 7107 free_extent_map(em); 7108 btrfs_report_missing_device(fs_info, devid, uuid, true); 7109 return -ENOENT; 7110 } 7111 if (!map->stripes[i].dev) { 7112 map->stripes[i].dev = 7113 add_missing_dev(fs_info->fs_devices, devid, 7114 uuid); 7115 if (IS_ERR(map->stripes[i].dev)) { 7116 free_extent_map(em); 7117 btrfs_err(fs_info, 7118 "failed to init missing dev %llu: %ld", 7119 devid, PTR_ERR(map->stripes[i].dev)); 7120 return PTR_ERR(map->stripes[i].dev); 7121 } 7122 btrfs_report_missing_device(fs_info, devid, uuid, false); 7123 } 7124 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7125 &(map->stripes[i].dev->dev_state)); 7126 7127 } 7128 7129 write_lock(&map_tree->lock); 7130 ret = add_extent_mapping(map_tree, em, 0); 7131 write_unlock(&map_tree->lock); 7132 if (ret < 0) { 7133 btrfs_err(fs_info, 7134 "failed to add chunk map, start=%llu len=%llu: %d", 7135 em->start, em->len, ret); 7136 } 7137 free_extent_map(em); 7138 7139 return ret; 7140 } 7141 7142 static void fill_device_from_item(struct extent_buffer *leaf, 7143 struct btrfs_dev_item *dev_item, 7144 struct btrfs_device *device) 7145 { 7146 unsigned long ptr; 7147 7148 device->devid = btrfs_device_id(leaf, dev_item); 7149 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7150 device->total_bytes = device->disk_total_bytes; 7151 device->commit_total_bytes = device->disk_total_bytes; 7152 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7153 device->commit_bytes_used = device->bytes_used; 7154 device->type = btrfs_device_type(leaf, dev_item); 7155 device->io_align = btrfs_device_io_align(leaf, dev_item); 7156 device->io_width = btrfs_device_io_width(leaf, dev_item); 7157 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7158 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7159 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7160 7161 ptr = btrfs_device_uuid(dev_item); 7162 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7163 } 7164 7165 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7166 u8 *fsid) 7167 { 7168 struct btrfs_fs_devices *fs_devices; 7169 int ret; 7170 7171 lockdep_assert_held(&uuid_mutex); 7172 ASSERT(fsid); 7173 7174 /* This will match only for multi-device seed fs */ 7175 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7176 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7177 return fs_devices; 7178 7179 7180 fs_devices = find_fsid(fsid, NULL); 7181 if (!fs_devices) { 7182 if (!btrfs_test_opt(fs_info, DEGRADED)) 7183 return ERR_PTR(-ENOENT); 7184 7185 fs_devices = alloc_fs_devices(fsid, NULL); 7186 if (IS_ERR(fs_devices)) 7187 return fs_devices; 7188 7189 fs_devices->seeding = true; 7190 fs_devices->opened = 1; 7191 return fs_devices; 7192 } 7193 7194 /* 7195 * Upon first call for a seed fs fsid, just create a private copy of the 7196 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7197 */ 7198 fs_devices = clone_fs_devices(fs_devices); 7199 if (IS_ERR(fs_devices)) 7200 return fs_devices; 7201 7202 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7203 if (ret) { 7204 free_fs_devices(fs_devices); 7205 return ERR_PTR(ret); 7206 } 7207 7208 if (!fs_devices->seeding) { 7209 close_fs_devices(fs_devices); 7210 free_fs_devices(fs_devices); 7211 return ERR_PTR(-EINVAL); 7212 } 7213 7214 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7215 7216 return fs_devices; 7217 } 7218 7219 static int read_one_dev(struct extent_buffer *leaf, 7220 struct btrfs_dev_item *dev_item) 7221 { 7222 BTRFS_DEV_LOOKUP_ARGS(args); 7223 struct btrfs_fs_info *fs_info = leaf->fs_info; 7224 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7225 struct btrfs_device *device; 7226 u64 devid; 7227 int ret; 7228 u8 fs_uuid[BTRFS_FSID_SIZE]; 7229 u8 dev_uuid[BTRFS_UUID_SIZE]; 7230 7231 devid = args.devid = btrfs_device_id(leaf, dev_item); 7232 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7233 BTRFS_UUID_SIZE); 7234 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7235 BTRFS_FSID_SIZE); 7236 args.uuid = dev_uuid; 7237 args.fsid = fs_uuid; 7238 7239 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7240 fs_devices = open_seed_devices(fs_info, fs_uuid); 7241 if (IS_ERR(fs_devices)) 7242 return PTR_ERR(fs_devices); 7243 } 7244 7245 device = btrfs_find_device(fs_info->fs_devices, &args); 7246 if (!device) { 7247 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7248 btrfs_report_missing_device(fs_info, devid, 7249 dev_uuid, true); 7250 return -ENOENT; 7251 } 7252 7253 device = add_missing_dev(fs_devices, devid, dev_uuid); 7254 if (IS_ERR(device)) { 7255 btrfs_err(fs_info, 7256 "failed to add missing dev %llu: %ld", 7257 devid, PTR_ERR(device)); 7258 return PTR_ERR(device); 7259 } 7260 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7261 } else { 7262 if (!device->bdev) { 7263 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7264 btrfs_report_missing_device(fs_info, 7265 devid, dev_uuid, true); 7266 return -ENOENT; 7267 } 7268 btrfs_report_missing_device(fs_info, devid, 7269 dev_uuid, false); 7270 } 7271 7272 if (!device->bdev && 7273 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7274 /* 7275 * this happens when a device that was properly setup 7276 * in the device info lists suddenly goes bad. 7277 * device->bdev is NULL, and so we have to set 7278 * device->missing to one here 7279 */ 7280 device->fs_devices->missing_devices++; 7281 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7282 } 7283 7284 /* Move the device to its own fs_devices */ 7285 if (device->fs_devices != fs_devices) { 7286 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7287 &device->dev_state)); 7288 7289 list_move(&device->dev_list, &fs_devices->devices); 7290 device->fs_devices->num_devices--; 7291 fs_devices->num_devices++; 7292 7293 device->fs_devices->missing_devices--; 7294 fs_devices->missing_devices++; 7295 7296 device->fs_devices = fs_devices; 7297 } 7298 } 7299 7300 if (device->fs_devices != fs_info->fs_devices) { 7301 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7302 if (device->generation != 7303 btrfs_device_generation(leaf, dev_item)) 7304 return -EINVAL; 7305 } 7306 7307 fill_device_from_item(leaf, dev_item, device); 7308 if (device->bdev) { 7309 u64 max_total_bytes = i_size_read(device->bdev->bd_inode); 7310 7311 if (device->total_bytes > max_total_bytes) { 7312 btrfs_err(fs_info, 7313 "device total_bytes should be at most %llu but found %llu", 7314 max_total_bytes, device->total_bytes); 7315 return -EINVAL; 7316 } 7317 } 7318 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7319 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7320 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7321 device->fs_devices->total_rw_bytes += device->total_bytes; 7322 atomic64_add(device->total_bytes - device->bytes_used, 7323 &fs_info->free_chunk_space); 7324 } 7325 ret = 0; 7326 return ret; 7327 } 7328 7329 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7330 { 7331 struct btrfs_root *root = fs_info->tree_root; 7332 struct btrfs_super_block *super_copy = fs_info->super_copy; 7333 struct extent_buffer *sb; 7334 struct btrfs_disk_key *disk_key; 7335 struct btrfs_chunk *chunk; 7336 u8 *array_ptr; 7337 unsigned long sb_array_offset; 7338 int ret = 0; 7339 u32 num_stripes; 7340 u32 array_size; 7341 u32 len = 0; 7342 u32 cur_offset; 7343 u64 type; 7344 struct btrfs_key key; 7345 7346 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7347 /* 7348 * This will create extent buffer of nodesize, superblock size is 7349 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7350 * overallocate but we can keep it as-is, only the first page is used. 7351 */ 7352 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7353 root->root_key.objectid, 0); 7354 if (IS_ERR(sb)) 7355 return PTR_ERR(sb); 7356 set_extent_buffer_uptodate(sb); 7357 /* 7358 * The sb extent buffer is artificial and just used to read the system array. 7359 * set_extent_buffer_uptodate() call does not properly mark all it's 7360 * pages up-to-date when the page is larger: extent does not cover the 7361 * whole page and consequently check_page_uptodate does not find all 7362 * the page's extents up-to-date (the hole beyond sb), 7363 * write_extent_buffer then triggers a WARN_ON. 7364 * 7365 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7366 * but sb spans only this function. Add an explicit SetPageUptodate call 7367 * to silence the warning eg. on PowerPC 64. 7368 */ 7369 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7370 SetPageUptodate(sb->pages[0]); 7371 7372 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7373 array_size = btrfs_super_sys_array_size(super_copy); 7374 7375 array_ptr = super_copy->sys_chunk_array; 7376 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7377 cur_offset = 0; 7378 7379 while (cur_offset < array_size) { 7380 disk_key = (struct btrfs_disk_key *)array_ptr; 7381 len = sizeof(*disk_key); 7382 if (cur_offset + len > array_size) 7383 goto out_short_read; 7384 7385 btrfs_disk_key_to_cpu(&key, disk_key); 7386 7387 array_ptr += len; 7388 sb_array_offset += len; 7389 cur_offset += len; 7390 7391 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7392 btrfs_err(fs_info, 7393 "unexpected item type %u in sys_array at offset %u", 7394 (u32)key.type, cur_offset); 7395 ret = -EIO; 7396 break; 7397 } 7398 7399 chunk = (struct btrfs_chunk *)sb_array_offset; 7400 /* 7401 * At least one btrfs_chunk with one stripe must be present, 7402 * exact stripe count check comes afterwards 7403 */ 7404 len = btrfs_chunk_item_size(1); 7405 if (cur_offset + len > array_size) 7406 goto out_short_read; 7407 7408 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7409 if (!num_stripes) { 7410 btrfs_err(fs_info, 7411 "invalid number of stripes %u in sys_array at offset %u", 7412 num_stripes, cur_offset); 7413 ret = -EIO; 7414 break; 7415 } 7416 7417 type = btrfs_chunk_type(sb, chunk); 7418 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7419 btrfs_err(fs_info, 7420 "invalid chunk type %llu in sys_array at offset %u", 7421 type, cur_offset); 7422 ret = -EIO; 7423 break; 7424 } 7425 7426 len = btrfs_chunk_item_size(num_stripes); 7427 if (cur_offset + len > array_size) 7428 goto out_short_read; 7429 7430 ret = read_one_chunk(&key, sb, chunk); 7431 if (ret) 7432 break; 7433 7434 array_ptr += len; 7435 sb_array_offset += len; 7436 cur_offset += len; 7437 } 7438 clear_extent_buffer_uptodate(sb); 7439 free_extent_buffer_stale(sb); 7440 return ret; 7441 7442 out_short_read: 7443 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7444 len, cur_offset); 7445 clear_extent_buffer_uptodate(sb); 7446 free_extent_buffer_stale(sb); 7447 return -EIO; 7448 } 7449 7450 /* 7451 * Check if all chunks in the fs are OK for read-write degraded mount 7452 * 7453 * If the @failing_dev is specified, it's accounted as missing. 7454 * 7455 * Return true if all chunks meet the minimal RW mount requirements. 7456 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7457 */ 7458 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7459 struct btrfs_device *failing_dev) 7460 { 7461 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7462 struct extent_map *em; 7463 u64 next_start = 0; 7464 bool ret = true; 7465 7466 read_lock(&map_tree->lock); 7467 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7468 read_unlock(&map_tree->lock); 7469 /* No chunk at all? Return false anyway */ 7470 if (!em) { 7471 ret = false; 7472 goto out; 7473 } 7474 while (em) { 7475 struct map_lookup *map; 7476 int missing = 0; 7477 int max_tolerated; 7478 int i; 7479 7480 map = em->map_lookup; 7481 max_tolerated = 7482 btrfs_get_num_tolerated_disk_barrier_failures( 7483 map->type); 7484 for (i = 0; i < map->num_stripes; i++) { 7485 struct btrfs_device *dev = map->stripes[i].dev; 7486 7487 if (!dev || !dev->bdev || 7488 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7489 dev->last_flush_error) 7490 missing++; 7491 else if (failing_dev && failing_dev == dev) 7492 missing++; 7493 } 7494 if (missing > max_tolerated) { 7495 if (!failing_dev) 7496 btrfs_warn(fs_info, 7497 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7498 em->start, missing, max_tolerated); 7499 free_extent_map(em); 7500 ret = false; 7501 goto out; 7502 } 7503 next_start = extent_map_end(em); 7504 free_extent_map(em); 7505 7506 read_lock(&map_tree->lock); 7507 em = lookup_extent_mapping(map_tree, next_start, 7508 (u64)(-1) - next_start); 7509 read_unlock(&map_tree->lock); 7510 } 7511 out: 7512 return ret; 7513 } 7514 7515 static void readahead_tree_node_children(struct extent_buffer *node) 7516 { 7517 int i; 7518 const int nr_items = btrfs_header_nritems(node); 7519 7520 for (i = 0; i < nr_items; i++) 7521 btrfs_readahead_node_child(node, i); 7522 } 7523 7524 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7525 { 7526 struct btrfs_root *root = fs_info->chunk_root; 7527 struct btrfs_path *path; 7528 struct extent_buffer *leaf; 7529 struct btrfs_key key; 7530 struct btrfs_key found_key; 7531 int ret; 7532 int slot; 7533 u64 total_dev = 0; 7534 u64 last_ra_node = 0; 7535 7536 path = btrfs_alloc_path(); 7537 if (!path) 7538 return -ENOMEM; 7539 7540 /* 7541 * uuid_mutex is needed only if we are mounting a sprout FS 7542 * otherwise we don't need it. 7543 */ 7544 mutex_lock(&uuid_mutex); 7545 7546 /* 7547 * It is possible for mount and umount to race in such a way that 7548 * we execute this code path, but open_fs_devices failed to clear 7549 * total_rw_bytes. We certainly want it cleared before reading the 7550 * device items, so clear it here. 7551 */ 7552 fs_info->fs_devices->total_rw_bytes = 0; 7553 7554 /* 7555 * Read all device items, and then all the chunk items. All 7556 * device items are found before any chunk item (their object id 7557 * is smaller than the lowest possible object id for a chunk 7558 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7559 */ 7560 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7561 key.offset = 0; 7562 key.type = 0; 7563 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7564 if (ret < 0) 7565 goto error; 7566 while (1) { 7567 struct extent_buffer *node; 7568 7569 leaf = path->nodes[0]; 7570 slot = path->slots[0]; 7571 if (slot >= btrfs_header_nritems(leaf)) { 7572 ret = btrfs_next_leaf(root, path); 7573 if (ret == 0) 7574 continue; 7575 if (ret < 0) 7576 goto error; 7577 break; 7578 } 7579 /* 7580 * The nodes on level 1 are not locked but we don't need to do 7581 * that during mount time as nothing else can access the tree 7582 */ 7583 node = path->nodes[1]; 7584 if (node) { 7585 if (last_ra_node != node->start) { 7586 readahead_tree_node_children(node); 7587 last_ra_node = node->start; 7588 } 7589 } 7590 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7591 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7592 struct btrfs_dev_item *dev_item; 7593 dev_item = btrfs_item_ptr(leaf, slot, 7594 struct btrfs_dev_item); 7595 ret = read_one_dev(leaf, dev_item); 7596 if (ret) 7597 goto error; 7598 total_dev++; 7599 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7600 struct btrfs_chunk *chunk; 7601 7602 /* 7603 * We are only called at mount time, so no need to take 7604 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7605 * we always lock first fs_info->chunk_mutex before 7606 * acquiring any locks on the chunk tree. This is a 7607 * requirement for chunk allocation, see the comment on 7608 * top of btrfs_chunk_alloc() for details. 7609 */ 7610 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7611 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7612 ret = read_one_chunk(&found_key, leaf, chunk); 7613 if (ret) 7614 goto error; 7615 } 7616 path->slots[0]++; 7617 } 7618 7619 /* 7620 * After loading chunk tree, we've got all device information, 7621 * do another round of validation checks. 7622 */ 7623 if (total_dev != fs_info->fs_devices->total_devices) { 7624 btrfs_err(fs_info, 7625 "super_num_devices %llu mismatch with num_devices %llu found here", 7626 btrfs_super_num_devices(fs_info->super_copy), 7627 total_dev); 7628 ret = -EINVAL; 7629 goto error; 7630 } 7631 if (btrfs_super_total_bytes(fs_info->super_copy) < 7632 fs_info->fs_devices->total_rw_bytes) { 7633 btrfs_err(fs_info, 7634 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7635 btrfs_super_total_bytes(fs_info->super_copy), 7636 fs_info->fs_devices->total_rw_bytes); 7637 ret = -EINVAL; 7638 goto error; 7639 } 7640 ret = 0; 7641 error: 7642 mutex_unlock(&uuid_mutex); 7643 7644 btrfs_free_path(path); 7645 return ret; 7646 } 7647 7648 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7649 { 7650 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7651 struct btrfs_device *device; 7652 7653 fs_devices->fs_info = fs_info; 7654 7655 mutex_lock(&fs_devices->device_list_mutex); 7656 list_for_each_entry(device, &fs_devices->devices, dev_list) 7657 device->fs_info = fs_info; 7658 7659 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7660 list_for_each_entry(device, &seed_devs->devices, dev_list) 7661 device->fs_info = fs_info; 7662 7663 seed_devs->fs_info = fs_info; 7664 } 7665 mutex_unlock(&fs_devices->device_list_mutex); 7666 } 7667 7668 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7669 const struct btrfs_dev_stats_item *ptr, 7670 int index) 7671 { 7672 u64 val; 7673 7674 read_extent_buffer(eb, &val, 7675 offsetof(struct btrfs_dev_stats_item, values) + 7676 ((unsigned long)ptr) + (index * sizeof(u64)), 7677 sizeof(val)); 7678 return val; 7679 } 7680 7681 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7682 struct btrfs_dev_stats_item *ptr, 7683 int index, u64 val) 7684 { 7685 write_extent_buffer(eb, &val, 7686 offsetof(struct btrfs_dev_stats_item, values) + 7687 ((unsigned long)ptr) + (index * sizeof(u64)), 7688 sizeof(val)); 7689 } 7690 7691 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7692 struct btrfs_path *path) 7693 { 7694 struct btrfs_dev_stats_item *ptr; 7695 struct extent_buffer *eb; 7696 struct btrfs_key key; 7697 int item_size; 7698 int i, ret, slot; 7699 7700 if (!device->fs_info->dev_root) 7701 return 0; 7702 7703 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7704 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7705 key.offset = device->devid; 7706 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7707 if (ret) { 7708 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7709 btrfs_dev_stat_set(device, i, 0); 7710 device->dev_stats_valid = 1; 7711 btrfs_release_path(path); 7712 return ret < 0 ? ret : 0; 7713 } 7714 slot = path->slots[0]; 7715 eb = path->nodes[0]; 7716 item_size = btrfs_item_size_nr(eb, slot); 7717 7718 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7719 7720 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7721 if (item_size >= (1 + i) * sizeof(__le64)) 7722 btrfs_dev_stat_set(device, i, 7723 btrfs_dev_stats_value(eb, ptr, i)); 7724 else 7725 btrfs_dev_stat_set(device, i, 0); 7726 } 7727 7728 device->dev_stats_valid = 1; 7729 btrfs_dev_stat_print_on_load(device); 7730 btrfs_release_path(path); 7731 7732 return 0; 7733 } 7734 7735 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7736 { 7737 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7738 struct btrfs_device *device; 7739 struct btrfs_path *path = NULL; 7740 int ret = 0; 7741 7742 path = btrfs_alloc_path(); 7743 if (!path) 7744 return -ENOMEM; 7745 7746 mutex_lock(&fs_devices->device_list_mutex); 7747 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7748 ret = btrfs_device_init_dev_stats(device, path); 7749 if (ret) 7750 goto out; 7751 } 7752 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7753 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7754 ret = btrfs_device_init_dev_stats(device, path); 7755 if (ret) 7756 goto out; 7757 } 7758 } 7759 out: 7760 mutex_unlock(&fs_devices->device_list_mutex); 7761 7762 btrfs_free_path(path); 7763 return ret; 7764 } 7765 7766 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7767 struct btrfs_device *device) 7768 { 7769 struct btrfs_fs_info *fs_info = trans->fs_info; 7770 struct btrfs_root *dev_root = fs_info->dev_root; 7771 struct btrfs_path *path; 7772 struct btrfs_key key; 7773 struct extent_buffer *eb; 7774 struct btrfs_dev_stats_item *ptr; 7775 int ret; 7776 int i; 7777 7778 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7779 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7780 key.offset = device->devid; 7781 7782 path = btrfs_alloc_path(); 7783 if (!path) 7784 return -ENOMEM; 7785 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7786 if (ret < 0) { 7787 btrfs_warn_in_rcu(fs_info, 7788 "error %d while searching for dev_stats item for device %s", 7789 ret, rcu_str_deref(device->name)); 7790 goto out; 7791 } 7792 7793 if (ret == 0 && 7794 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7795 /* need to delete old one and insert a new one */ 7796 ret = btrfs_del_item(trans, dev_root, path); 7797 if (ret != 0) { 7798 btrfs_warn_in_rcu(fs_info, 7799 "delete too small dev_stats item for device %s failed %d", 7800 rcu_str_deref(device->name), ret); 7801 goto out; 7802 } 7803 ret = 1; 7804 } 7805 7806 if (ret == 1) { 7807 /* need to insert a new item */ 7808 btrfs_release_path(path); 7809 ret = btrfs_insert_empty_item(trans, dev_root, path, 7810 &key, sizeof(*ptr)); 7811 if (ret < 0) { 7812 btrfs_warn_in_rcu(fs_info, 7813 "insert dev_stats item for device %s failed %d", 7814 rcu_str_deref(device->name), ret); 7815 goto out; 7816 } 7817 } 7818 7819 eb = path->nodes[0]; 7820 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7821 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7822 btrfs_set_dev_stats_value(eb, ptr, i, 7823 btrfs_dev_stat_read(device, i)); 7824 btrfs_mark_buffer_dirty(eb); 7825 7826 out: 7827 btrfs_free_path(path); 7828 return ret; 7829 } 7830 7831 /* 7832 * called from commit_transaction. Writes all changed device stats to disk. 7833 */ 7834 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7835 { 7836 struct btrfs_fs_info *fs_info = trans->fs_info; 7837 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7838 struct btrfs_device *device; 7839 int stats_cnt; 7840 int ret = 0; 7841 7842 mutex_lock(&fs_devices->device_list_mutex); 7843 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7844 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7845 if (!device->dev_stats_valid || stats_cnt == 0) 7846 continue; 7847 7848 7849 /* 7850 * There is a LOAD-LOAD control dependency between the value of 7851 * dev_stats_ccnt and updating the on-disk values which requires 7852 * reading the in-memory counters. Such control dependencies 7853 * require explicit read memory barriers. 7854 * 7855 * This memory barriers pairs with smp_mb__before_atomic in 7856 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7857 * barrier implied by atomic_xchg in 7858 * btrfs_dev_stats_read_and_reset 7859 */ 7860 smp_rmb(); 7861 7862 ret = update_dev_stat_item(trans, device); 7863 if (!ret) 7864 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7865 } 7866 mutex_unlock(&fs_devices->device_list_mutex); 7867 7868 return ret; 7869 } 7870 7871 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7872 { 7873 btrfs_dev_stat_inc(dev, index); 7874 btrfs_dev_stat_print_on_error(dev); 7875 } 7876 7877 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7878 { 7879 if (!dev->dev_stats_valid) 7880 return; 7881 btrfs_err_rl_in_rcu(dev->fs_info, 7882 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7883 rcu_str_deref(dev->name), 7884 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7885 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7886 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7887 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7888 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7889 } 7890 7891 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7892 { 7893 int i; 7894 7895 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7896 if (btrfs_dev_stat_read(dev, i) != 0) 7897 break; 7898 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7899 return; /* all values == 0, suppress message */ 7900 7901 btrfs_info_in_rcu(dev->fs_info, 7902 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7903 rcu_str_deref(dev->name), 7904 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7905 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7906 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7907 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7908 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7909 } 7910 7911 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7912 struct btrfs_ioctl_get_dev_stats *stats) 7913 { 7914 BTRFS_DEV_LOOKUP_ARGS(args); 7915 struct btrfs_device *dev; 7916 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7917 int i; 7918 7919 mutex_lock(&fs_devices->device_list_mutex); 7920 args.devid = stats->devid; 7921 dev = btrfs_find_device(fs_info->fs_devices, &args); 7922 mutex_unlock(&fs_devices->device_list_mutex); 7923 7924 if (!dev) { 7925 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7926 return -ENODEV; 7927 } else if (!dev->dev_stats_valid) { 7928 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7929 return -ENODEV; 7930 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7931 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7932 if (stats->nr_items > i) 7933 stats->values[i] = 7934 btrfs_dev_stat_read_and_reset(dev, i); 7935 else 7936 btrfs_dev_stat_set(dev, i, 0); 7937 } 7938 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7939 current->comm, task_pid_nr(current)); 7940 } else { 7941 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7942 if (stats->nr_items > i) 7943 stats->values[i] = btrfs_dev_stat_read(dev, i); 7944 } 7945 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7946 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7947 return 0; 7948 } 7949 7950 /* 7951 * Update the size and bytes used for each device where it changed. This is 7952 * delayed since we would otherwise get errors while writing out the 7953 * superblocks. 7954 * 7955 * Must be invoked during transaction commit. 7956 */ 7957 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7958 { 7959 struct btrfs_device *curr, *next; 7960 7961 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7962 7963 if (list_empty(&trans->dev_update_list)) 7964 return; 7965 7966 /* 7967 * We don't need the device_list_mutex here. This list is owned by the 7968 * transaction and the transaction must complete before the device is 7969 * released. 7970 */ 7971 mutex_lock(&trans->fs_info->chunk_mutex); 7972 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7973 post_commit_list) { 7974 list_del_init(&curr->post_commit_list); 7975 curr->commit_total_bytes = curr->disk_total_bytes; 7976 curr->commit_bytes_used = curr->bytes_used; 7977 } 7978 mutex_unlock(&trans->fs_info->chunk_mutex); 7979 } 7980 7981 /* 7982 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7983 */ 7984 int btrfs_bg_type_to_factor(u64 flags) 7985 { 7986 const int index = btrfs_bg_flags_to_raid_index(flags); 7987 7988 return btrfs_raid_array[index].ncopies; 7989 } 7990 7991 7992 7993 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7994 u64 chunk_offset, u64 devid, 7995 u64 physical_offset, u64 physical_len) 7996 { 7997 struct btrfs_dev_lookup_args args = { .devid = devid }; 7998 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7999 struct extent_map *em; 8000 struct map_lookup *map; 8001 struct btrfs_device *dev; 8002 u64 stripe_len; 8003 bool found = false; 8004 int ret = 0; 8005 int i; 8006 8007 read_lock(&em_tree->lock); 8008 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8009 read_unlock(&em_tree->lock); 8010 8011 if (!em) { 8012 btrfs_err(fs_info, 8013 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8014 physical_offset, devid); 8015 ret = -EUCLEAN; 8016 goto out; 8017 } 8018 8019 map = em->map_lookup; 8020 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8021 if (physical_len != stripe_len) { 8022 btrfs_err(fs_info, 8023 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8024 physical_offset, devid, em->start, physical_len, 8025 stripe_len); 8026 ret = -EUCLEAN; 8027 goto out; 8028 } 8029 8030 for (i = 0; i < map->num_stripes; i++) { 8031 if (map->stripes[i].dev->devid == devid && 8032 map->stripes[i].physical == physical_offset) { 8033 found = true; 8034 if (map->verified_stripes >= map->num_stripes) { 8035 btrfs_err(fs_info, 8036 "too many dev extents for chunk %llu found", 8037 em->start); 8038 ret = -EUCLEAN; 8039 goto out; 8040 } 8041 map->verified_stripes++; 8042 break; 8043 } 8044 } 8045 if (!found) { 8046 btrfs_err(fs_info, 8047 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8048 physical_offset, devid); 8049 ret = -EUCLEAN; 8050 } 8051 8052 /* Make sure no dev extent is beyond device boundary */ 8053 dev = btrfs_find_device(fs_info->fs_devices, &args); 8054 if (!dev) { 8055 btrfs_err(fs_info, "failed to find devid %llu", devid); 8056 ret = -EUCLEAN; 8057 goto out; 8058 } 8059 8060 if (physical_offset + physical_len > dev->disk_total_bytes) { 8061 btrfs_err(fs_info, 8062 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8063 devid, physical_offset, physical_len, 8064 dev->disk_total_bytes); 8065 ret = -EUCLEAN; 8066 goto out; 8067 } 8068 8069 if (dev->zone_info) { 8070 u64 zone_size = dev->zone_info->zone_size; 8071 8072 if (!IS_ALIGNED(physical_offset, zone_size) || 8073 !IS_ALIGNED(physical_len, zone_size)) { 8074 btrfs_err(fs_info, 8075 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8076 devid, physical_offset, physical_len); 8077 ret = -EUCLEAN; 8078 goto out; 8079 } 8080 } 8081 8082 out: 8083 free_extent_map(em); 8084 return ret; 8085 } 8086 8087 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8088 { 8089 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8090 struct extent_map *em; 8091 struct rb_node *node; 8092 int ret = 0; 8093 8094 read_lock(&em_tree->lock); 8095 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8096 em = rb_entry(node, struct extent_map, rb_node); 8097 if (em->map_lookup->num_stripes != 8098 em->map_lookup->verified_stripes) { 8099 btrfs_err(fs_info, 8100 "chunk %llu has missing dev extent, have %d expect %d", 8101 em->start, em->map_lookup->verified_stripes, 8102 em->map_lookup->num_stripes); 8103 ret = -EUCLEAN; 8104 goto out; 8105 } 8106 } 8107 out: 8108 read_unlock(&em_tree->lock); 8109 return ret; 8110 } 8111 8112 /* 8113 * Ensure that all dev extents are mapped to correct chunk, otherwise 8114 * later chunk allocation/free would cause unexpected behavior. 8115 * 8116 * NOTE: This will iterate through the whole device tree, which should be of 8117 * the same size level as the chunk tree. This slightly increases mount time. 8118 */ 8119 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8120 { 8121 struct btrfs_path *path; 8122 struct btrfs_root *root = fs_info->dev_root; 8123 struct btrfs_key key; 8124 u64 prev_devid = 0; 8125 u64 prev_dev_ext_end = 0; 8126 int ret = 0; 8127 8128 /* 8129 * We don't have a dev_root because we mounted with ignorebadroots and 8130 * failed to load the root, so we want to skip the verification in this 8131 * case for sure. 8132 * 8133 * However if the dev root is fine, but the tree itself is corrupted 8134 * we'd still fail to mount. This verification is only to make sure 8135 * writes can happen safely, so instead just bypass this check 8136 * completely in the case of IGNOREBADROOTS. 8137 */ 8138 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8139 return 0; 8140 8141 key.objectid = 1; 8142 key.type = BTRFS_DEV_EXTENT_KEY; 8143 key.offset = 0; 8144 8145 path = btrfs_alloc_path(); 8146 if (!path) 8147 return -ENOMEM; 8148 8149 path->reada = READA_FORWARD; 8150 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8151 if (ret < 0) 8152 goto out; 8153 8154 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8155 ret = btrfs_next_leaf(root, path); 8156 if (ret < 0) 8157 goto out; 8158 /* No dev extents at all? Not good */ 8159 if (ret > 0) { 8160 ret = -EUCLEAN; 8161 goto out; 8162 } 8163 } 8164 while (1) { 8165 struct extent_buffer *leaf = path->nodes[0]; 8166 struct btrfs_dev_extent *dext; 8167 int slot = path->slots[0]; 8168 u64 chunk_offset; 8169 u64 physical_offset; 8170 u64 physical_len; 8171 u64 devid; 8172 8173 btrfs_item_key_to_cpu(leaf, &key, slot); 8174 if (key.type != BTRFS_DEV_EXTENT_KEY) 8175 break; 8176 devid = key.objectid; 8177 physical_offset = key.offset; 8178 8179 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8180 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8181 physical_len = btrfs_dev_extent_length(leaf, dext); 8182 8183 /* Check if this dev extent overlaps with the previous one */ 8184 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8185 btrfs_err(fs_info, 8186 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8187 devid, physical_offset, prev_dev_ext_end); 8188 ret = -EUCLEAN; 8189 goto out; 8190 } 8191 8192 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8193 physical_offset, physical_len); 8194 if (ret < 0) 8195 goto out; 8196 prev_devid = devid; 8197 prev_dev_ext_end = physical_offset + physical_len; 8198 8199 ret = btrfs_next_item(root, path); 8200 if (ret < 0) 8201 goto out; 8202 if (ret > 0) { 8203 ret = 0; 8204 break; 8205 } 8206 } 8207 8208 /* Ensure all chunks have corresponding dev extents */ 8209 ret = verify_chunk_dev_extent_mapping(fs_info); 8210 out: 8211 btrfs_free_path(path); 8212 return ret; 8213 } 8214 8215 /* 8216 * Check whether the given block group or device is pinned by any inode being 8217 * used as a swapfile. 8218 */ 8219 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8220 { 8221 struct btrfs_swapfile_pin *sp; 8222 struct rb_node *node; 8223 8224 spin_lock(&fs_info->swapfile_pins_lock); 8225 node = fs_info->swapfile_pins.rb_node; 8226 while (node) { 8227 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8228 if (ptr < sp->ptr) 8229 node = node->rb_left; 8230 else if (ptr > sp->ptr) 8231 node = node->rb_right; 8232 else 8233 break; 8234 } 8235 spin_unlock(&fs_info->swapfile_pins_lock); 8236 return node != NULL; 8237 } 8238 8239 static int relocating_repair_kthread(void *data) 8240 { 8241 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8242 struct btrfs_fs_info *fs_info = cache->fs_info; 8243 u64 target; 8244 int ret = 0; 8245 8246 target = cache->start; 8247 btrfs_put_block_group(cache); 8248 8249 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8250 btrfs_info(fs_info, 8251 "zoned: skip relocating block group %llu to repair: EBUSY", 8252 target); 8253 return -EBUSY; 8254 } 8255 8256 mutex_lock(&fs_info->reclaim_bgs_lock); 8257 8258 /* Ensure block group still exists */ 8259 cache = btrfs_lookup_block_group(fs_info, target); 8260 if (!cache) 8261 goto out; 8262 8263 if (!cache->relocating_repair) 8264 goto out; 8265 8266 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8267 if (ret < 0) 8268 goto out; 8269 8270 btrfs_info(fs_info, 8271 "zoned: relocating block group %llu to repair IO failure", 8272 target); 8273 ret = btrfs_relocate_chunk(fs_info, target); 8274 8275 out: 8276 if (cache) 8277 btrfs_put_block_group(cache); 8278 mutex_unlock(&fs_info->reclaim_bgs_lock); 8279 btrfs_exclop_finish(fs_info); 8280 8281 return ret; 8282 } 8283 8284 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8285 { 8286 struct btrfs_block_group *cache; 8287 8288 /* Do not attempt to repair in degraded state */ 8289 if (btrfs_test_opt(fs_info, DEGRADED)) 8290 return 0; 8291 8292 cache = btrfs_lookup_block_group(fs_info, logical); 8293 if (!cache) 8294 return 0; 8295 8296 spin_lock(&cache->lock); 8297 if (cache->relocating_repair) { 8298 spin_unlock(&cache->lock); 8299 btrfs_put_block_group(cache); 8300 return 0; 8301 } 8302 cache->relocating_repair = 1; 8303 spin_unlock(&cache->lock); 8304 8305 kthread_run(relocating_repair_kthread, cache, 8306 "btrfs-relocating-repair"); 8307 8308 return 0; 8309 } 8310