1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include "misc.h" 18 #include "ctree.h" 19 #include "extent_map.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "print-tree.h" 23 #include "volumes.h" 24 #include "raid56.h" 25 #include "async-thread.h" 26 #include "check-integrity.h" 27 #include "rcu-string.h" 28 #include "dev-replace.h" 29 #include "sysfs.h" 30 #include "tree-checker.h" 31 #include "space-info.h" 32 #include "block-group.h" 33 #include "discard.h" 34 #include "zoned.h" 35 36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 37 [BTRFS_RAID_RAID10] = { 38 .sub_stripes = 2, 39 .dev_stripes = 1, 40 .devs_max = 0, /* 0 == as many as possible */ 41 .devs_min = 2, 42 .tolerated_failures = 1, 43 .devs_increment = 2, 44 .ncopies = 2, 45 .nparity = 0, 46 .raid_name = "raid10", 47 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 48 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 49 }, 50 [BTRFS_RAID_RAID1] = { 51 .sub_stripes = 1, 52 .dev_stripes = 1, 53 .devs_max = 2, 54 .devs_min = 2, 55 .tolerated_failures = 1, 56 .devs_increment = 2, 57 .ncopies = 2, 58 .nparity = 0, 59 .raid_name = "raid1", 60 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 61 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 62 }, 63 [BTRFS_RAID_RAID1C3] = { 64 .sub_stripes = 1, 65 .dev_stripes = 1, 66 .devs_max = 3, 67 .devs_min = 3, 68 .tolerated_failures = 2, 69 .devs_increment = 3, 70 .ncopies = 3, 71 .nparity = 0, 72 .raid_name = "raid1c3", 73 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 74 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 75 }, 76 [BTRFS_RAID_RAID1C4] = { 77 .sub_stripes = 1, 78 .dev_stripes = 1, 79 .devs_max = 4, 80 .devs_min = 4, 81 .tolerated_failures = 3, 82 .devs_increment = 4, 83 .ncopies = 4, 84 .nparity = 0, 85 .raid_name = "raid1c4", 86 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 87 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 88 }, 89 [BTRFS_RAID_DUP] = { 90 .sub_stripes = 1, 91 .dev_stripes = 2, 92 .devs_max = 1, 93 .devs_min = 1, 94 .tolerated_failures = 0, 95 .devs_increment = 1, 96 .ncopies = 2, 97 .nparity = 0, 98 .raid_name = "dup", 99 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 100 .mindev_error = 0, 101 }, 102 [BTRFS_RAID_RAID0] = { 103 .sub_stripes = 1, 104 .dev_stripes = 1, 105 .devs_max = 0, 106 .devs_min = 1, 107 .tolerated_failures = 0, 108 .devs_increment = 1, 109 .ncopies = 1, 110 .nparity = 0, 111 .raid_name = "raid0", 112 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 113 .mindev_error = 0, 114 }, 115 [BTRFS_RAID_SINGLE] = { 116 .sub_stripes = 1, 117 .dev_stripes = 1, 118 .devs_max = 1, 119 .devs_min = 1, 120 .tolerated_failures = 0, 121 .devs_increment = 1, 122 .ncopies = 1, 123 .nparity = 0, 124 .raid_name = "single", 125 .bg_flag = 0, 126 .mindev_error = 0, 127 }, 128 [BTRFS_RAID_RAID5] = { 129 .sub_stripes = 1, 130 .dev_stripes = 1, 131 .devs_max = 0, 132 .devs_min = 2, 133 .tolerated_failures = 1, 134 .devs_increment = 1, 135 .ncopies = 1, 136 .nparity = 1, 137 .raid_name = "raid5", 138 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 139 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 140 }, 141 [BTRFS_RAID_RAID6] = { 142 .sub_stripes = 1, 143 .dev_stripes = 1, 144 .devs_max = 0, 145 .devs_min = 3, 146 .tolerated_failures = 2, 147 .devs_increment = 1, 148 .ncopies = 1, 149 .nparity = 2, 150 .raid_name = "raid6", 151 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 152 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 153 }, 154 }; 155 156 /* 157 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 158 * can be used as index to access btrfs_raid_array[]. 159 */ 160 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 161 { 162 if (flags & BTRFS_BLOCK_GROUP_RAID10) 163 return BTRFS_RAID_RAID10; 164 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 165 return BTRFS_RAID_RAID1; 166 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 167 return BTRFS_RAID_RAID1C3; 168 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 169 return BTRFS_RAID_RAID1C4; 170 else if (flags & BTRFS_BLOCK_GROUP_DUP) 171 return BTRFS_RAID_DUP; 172 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 173 return BTRFS_RAID_RAID0; 174 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 175 return BTRFS_RAID_RAID5; 176 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 177 return BTRFS_RAID_RAID6; 178 179 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 180 } 181 182 const char *btrfs_bg_type_to_raid_name(u64 flags) 183 { 184 const int index = btrfs_bg_flags_to_raid_index(flags); 185 186 if (index >= BTRFS_NR_RAID_TYPES) 187 return NULL; 188 189 return btrfs_raid_array[index].raid_name; 190 } 191 192 /* 193 * Fill @buf with textual description of @bg_flags, no more than @size_buf 194 * bytes including terminating null byte. 195 */ 196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 197 { 198 int i; 199 int ret; 200 char *bp = buf; 201 u64 flags = bg_flags; 202 u32 size_bp = size_buf; 203 204 if (!flags) { 205 strcpy(bp, "NONE"); 206 return; 207 } 208 209 #define DESCRIBE_FLAG(flag, desc) \ 210 do { \ 211 if (flags & (flag)) { \ 212 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 213 if (ret < 0 || ret >= size_bp) \ 214 goto out_overflow; \ 215 size_bp -= ret; \ 216 bp += ret; \ 217 flags &= ~(flag); \ 218 } \ 219 } while (0) 220 221 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 224 225 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 226 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 227 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 228 btrfs_raid_array[i].raid_name); 229 #undef DESCRIBE_FLAG 230 231 if (flags) { 232 ret = snprintf(bp, size_bp, "0x%llx|", flags); 233 size_bp -= ret; 234 } 235 236 if (size_bp < size_buf) 237 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 238 239 /* 240 * The text is trimmed, it's up to the caller to provide sufficiently 241 * large buffer 242 */ 243 out_overflow:; 244 } 245 246 static int init_first_rw_device(struct btrfs_trans_handle *trans); 247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 248 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 249 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 250 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 251 enum btrfs_map_op op, 252 u64 logical, u64 *length, 253 struct btrfs_io_context **bioc_ret, 254 int mirror_num, int need_raid_map); 255 256 /* 257 * Device locking 258 * ============== 259 * 260 * There are several mutexes that protect manipulation of devices and low-level 261 * structures like chunks but not block groups, extents or files 262 * 263 * uuid_mutex (global lock) 264 * ------------------------ 265 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 266 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 267 * device) or requested by the device= mount option 268 * 269 * the mutex can be very coarse and can cover long-running operations 270 * 271 * protects: updates to fs_devices counters like missing devices, rw devices, 272 * seeding, structure cloning, opening/closing devices at mount/umount time 273 * 274 * global::fs_devs - add, remove, updates to the global list 275 * 276 * does not protect: manipulation of the fs_devices::devices list in general 277 * but in mount context it could be used to exclude list modifications by eg. 278 * scan ioctl 279 * 280 * btrfs_device::name - renames (write side), read is RCU 281 * 282 * fs_devices::device_list_mutex (per-fs, with RCU) 283 * ------------------------------------------------ 284 * protects updates to fs_devices::devices, ie. adding and deleting 285 * 286 * simple list traversal with read-only actions can be done with RCU protection 287 * 288 * may be used to exclude some operations from running concurrently without any 289 * modifications to the list (see write_all_supers) 290 * 291 * Is not required at mount and close times, because our device list is 292 * protected by the uuid_mutex at that point. 293 * 294 * balance_mutex 295 * ------------- 296 * protects balance structures (status, state) and context accessed from 297 * several places (internally, ioctl) 298 * 299 * chunk_mutex 300 * ----------- 301 * protects chunks, adding or removing during allocation, trim or when a new 302 * device is added/removed. Additionally it also protects post_commit_list of 303 * individual devices, since they can be added to the transaction's 304 * post_commit_list only with chunk_mutex held. 305 * 306 * cleaner_mutex 307 * ------------- 308 * a big lock that is held by the cleaner thread and prevents running subvolume 309 * cleaning together with relocation or delayed iputs 310 * 311 * 312 * Lock nesting 313 * ============ 314 * 315 * uuid_mutex 316 * device_list_mutex 317 * chunk_mutex 318 * balance_mutex 319 * 320 * 321 * Exclusive operations 322 * ==================== 323 * 324 * Maintains the exclusivity of the following operations that apply to the 325 * whole filesystem and cannot run in parallel. 326 * 327 * - Balance (*) 328 * - Device add 329 * - Device remove 330 * - Device replace (*) 331 * - Resize 332 * 333 * The device operations (as above) can be in one of the following states: 334 * 335 * - Running state 336 * - Paused state 337 * - Completed state 338 * 339 * Only device operations marked with (*) can go into the Paused state for the 340 * following reasons: 341 * 342 * - ioctl (only Balance can be Paused through ioctl) 343 * - filesystem remounted as read-only 344 * - filesystem unmounted and mounted as read-only 345 * - system power-cycle and filesystem mounted as read-only 346 * - filesystem or device errors leading to forced read-only 347 * 348 * The status of exclusive operation is set and cleared atomically. 349 * During the course of Paused state, fs_info::exclusive_operation remains set. 350 * A device operation in Paused or Running state can be canceled or resumed 351 * either by ioctl (Balance only) or when remounted as read-write. 352 * The exclusive status is cleared when the device operation is canceled or 353 * completed. 354 */ 355 356 DEFINE_MUTEX(uuid_mutex); 357 static LIST_HEAD(fs_uuids); 358 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 359 { 360 return &fs_uuids; 361 } 362 363 /* 364 * alloc_fs_devices - allocate struct btrfs_fs_devices 365 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 366 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 367 * 368 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 369 * The returned struct is not linked onto any lists and can be destroyed with 370 * kfree() right away. 371 */ 372 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 373 const u8 *metadata_fsid) 374 { 375 struct btrfs_fs_devices *fs_devs; 376 377 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 378 if (!fs_devs) 379 return ERR_PTR(-ENOMEM); 380 381 mutex_init(&fs_devs->device_list_mutex); 382 383 INIT_LIST_HEAD(&fs_devs->devices); 384 INIT_LIST_HEAD(&fs_devs->alloc_list); 385 INIT_LIST_HEAD(&fs_devs->fs_list); 386 INIT_LIST_HEAD(&fs_devs->seed_list); 387 if (fsid) 388 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 389 390 if (metadata_fsid) 391 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 392 else if (fsid) 393 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 394 395 return fs_devs; 396 } 397 398 void btrfs_free_device(struct btrfs_device *device) 399 { 400 WARN_ON(!list_empty(&device->post_commit_list)); 401 rcu_string_free(device->name); 402 extent_io_tree_release(&device->alloc_state); 403 bio_put(device->flush_bio); 404 btrfs_destroy_dev_zone_info(device); 405 kfree(device); 406 } 407 408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 409 { 410 struct btrfs_device *device; 411 WARN_ON(fs_devices->opened); 412 while (!list_empty(&fs_devices->devices)) { 413 device = list_entry(fs_devices->devices.next, 414 struct btrfs_device, dev_list); 415 list_del(&device->dev_list); 416 btrfs_free_device(device); 417 } 418 kfree(fs_devices); 419 } 420 421 void __exit btrfs_cleanup_fs_uuids(void) 422 { 423 struct btrfs_fs_devices *fs_devices; 424 425 while (!list_empty(&fs_uuids)) { 426 fs_devices = list_entry(fs_uuids.next, 427 struct btrfs_fs_devices, fs_list); 428 list_del(&fs_devices->fs_list); 429 free_fs_devices(fs_devices); 430 } 431 } 432 433 static noinline struct btrfs_fs_devices *find_fsid( 434 const u8 *fsid, const u8 *metadata_fsid) 435 { 436 struct btrfs_fs_devices *fs_devices; 437 438 ASSERT(fsid); 439 440 /* Handle non-split brain cases */ 441 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 442 if (metadata_fsid) { 443 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 444 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 445 BTRFS_FSID_SIZE) == 0) 446 return fs_devices; 447 } else { 448 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 449 return fs_devices; 450 } 451 } 452 return NULL; 453 } 454 455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 456 struct btrfs_super_block *disk_super) 457 { 458 459 struct btrfs_fs_devices *fs_devices; 460 461 /* 462 * Handle scanned device having completed its fsid change but 463 * belonging to a fs_devices that was created by first scanning 464 * a device which didn't have its fsid/metadata_uuid changed 465 * at all and the CHANGING_FSID_V2 flag set. 466 */ 467 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 468 if (fs_devices->fsid_change && 469 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 470 BTRFS_FSID_SIZE) == 0 && 471 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 472 BTRFS_FSID_SIZE) == 0) { 473 return fs_devices; 474 } 475 } 476 /* 477 * Handle scanned device having completed its fsid change but 478 * belonging to a fs_devices that was created by a device that 479 * has an outdated pair of fsid/metadata_uuid and 480 * CHANGING_FSID_V2 flag set. 481 */ 482 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 483 if (fs_devices->fsid_change && 484 memcmp(fs_devices->metadata_uuid, 485 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 486 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 487 BTRFS_FSID_SIZE) == 0) { 488 return fs_devices; 489 } 490 } 491 492 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 493 } 494 495 496 static int 497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 498 int flush, struct block_device **bdev, 499 struct btrfs_super_block **disk_super) 500 { 501 int ret; 502 503 *bdev = blkdev_get_by_path(device_path, flags, holder); 504 505 if (IS_ERR(*bdev)) { 506 ret = PTR_ERR(*bdev); 507 goto error; 508 } 509 510 if (flush) 511 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 512 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 513 if (ret) { 514 blkdev_put(*bdev, flags); 515 goto error; 516 } 517 invalidate_bdev(*bdev); 518 *disk_super = btrfs_read_dev_super(*bdev); 519 if (IS_ERR(*disk_super)) { 520 ret = PTR_ERR(*disk_super); 521 blkdev_put(*bdev, flags); 522 goto error; 523 } 524 525 return 0; 526 527 error: 528 *bdev = NULL; 529 return ret; 530 } 531 532 static bool device_path_matched(const char *path, struct btrfs_device *device) 533 { 534 int found; 535 536 rcu_read_lock(); 537 found = strcmp(rcu_str_deref(device->name), path); 538 rcu_read_unlock(); 539 540 return found == 0; 541 } 542 543 /* 544 * Search and remove all stale (devices which are not mounted) devices. 545 * When both inputs are NULL, it will search and release all stale devices. 546 * path: Optional. When provided will it release all unmounted devices 547 * matching this path only. 548 * skip_dev: Optional. Will skip this device when searching for the stale 549 * devices. 550 * Return: 0 for success or if @path is NULL. 551 * -EBUSY if @path is a mounted device. 552 * -ENOENT if @path does not match any device in the list. 553 */ 554 static int btrfs_free_stale_devices(const char *path, 555 struct btrfs_device *skip_device) 556 { 557 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 558 struct btrfs_device *device, *tmp_device; 559 int ret = 0; 560 561 lockdep_assert_held(&uuid_mutex); 562 563 if (path) 564 ret = -ENOENT; 565 566 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 567 568 mutex_lock(&fs_devices->device_list_mutex); 569 list_for_each_entry_safe(device, tmp_device, 570 &fs_devices->devices, dev_list) { 571 if (skip_device && skip_device == device) 572 continue; 573 if (path && !device->name) 574 continue; 575 if (path && !device_path_matched(path, device)) 576 continue; 577 if (fs_devices->opened) { 578 /* for an already deleted device return 0 */ 579 if (path && ret != 0) 580 ret = -EBUSY; 581 break; 582 } 583 584 /* delete the stale device */ 585 fs_devices->num_devices--; 586 list_del(&device->dev_list); 587 btrfs_free_device(device); 588 589 ret = 0; 590 } 591 mutex_unlock(&fs_devices->device_list_mutex); 592 593 if (fs_devices->num_devices == 0) { 594 btrfs_sysfs_remove_fsid(fs_devices); 595 list_del(&fs_devices->fs_list); 596 free_fs_devices(fs_devices); 597 } 598 } 599 600 return ret; 601 } 602 603 /* 604 * This is only used on mount, and we are protected from competing things 605 * messing with our fs_devices by the uuid_mutex, thus we do not need the 606 * fs_devices->device_list_mutex here. 607 */ 608 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 609 struct btrfs_device *device, fmode_t flags, 610 void *holder) 611 { 612 struct request_queue *q; 613 struct block_device *bdev; 614 struct btrfs_super_block *disk_super; 615 u64 devid; 616 int ret; 617 618 if (device->bdev) 619 return -EINVAL; 620 if (!device->name) 621 return -EINVAL; 622 623 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 624 &bdev, &disk_super); 625 if (ret) 626 return ret; 627 628 devid = btrfs_stack_device_id(&disk_super->dev_item); 629 if (devid != device->devid) 630 goto error_free_page; 631 632 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 633 goto error_free_page; 634 635 device->generation = btrfs_super_generation(disk_super); 636 637 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 638 if (btrfs_super_incompat_flags(disk_super) & 639 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 640 pr_err( 641 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 642 goto error_free_page; 643 } 644 645 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 646 fs_devices->seeding = true; 647 } else { 648 if (bdev_read_only(bdev)) 649 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 650 else 651 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 652 } 653 654 q = bdev_get_queue(bdev); 655 if (!blk_queue_nonrot(q)) 656 fs_devices->rotating = true; 657 658 device->bdev = bdev; 659 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 660 device->mode = flags; 661 662 fs_devices->open_devices++; 663 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 664 device->devid != BTRFS_DEV_REPLACE_DEVID) { 665 fs_devices->rw_devices++; 666 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 667 } 668 btrfs_release_disk_super(disk_super); 669 670 return 0; 671 672 error_free_page: 673 btrfs_release_disk_super(disk_super); 674 blkdev_put(bdev, flags); 675 676 return -EINVAL; 677 } 678 679 /* 680 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 681 * being created with a disk that has already completed its fsid change. Such 682 * disk can belong to an fs which has its FSID changed or to one which doesn't. 683 * Handle both cases here. 684 */ 685 static struct btrfs_fs_devices *find_fsid_inprogress( 686 struct btrfs_super_block *disk_super) 687 { 688 struct btrfs_fs_devices *fs_devices; 689 690 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 691 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 692 BTRFS_FSID_SIZE) != 0 && 693 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 694 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 695 return fs_devices; 696 } 697 } 698 699 return find_fsid(disk_super->fsid, NULL); 700 } 701 702 703 static struct btrfs_fs_devices *find_fsid_changed( 704 struct btrfs_super_block *disk_super) 705 { 706 struct btrfs_fs_devices *fs_devices; 707 708 /* 709 * Handles the case where scanned device is part of an fs that had 710 * multiple successful changes of FSID but currently device didn't 711 * observe it. Meaning our fsid will be different than theirs. We need 712 * to handle two subcases : 713 * 1 - The fs still continues to have different METADATA/FSID uuids. 714 * 2 - The fs is switched back to its original FSID (METADATA/FSID 715 * are equal). 716 */ 717 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 718 /* Changed UUIDs */ 719 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 720 BTRFS_FSID_SIZE) != 0 && 721 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 722 BTRFS_FSID_SIZE) == 0 && 723 memcmp(fs_devices->fsid, disk_super->fsid, 724 BTRFS_FSID_SIZE) != 0) 725 return fs_devices; 726 727 /* Unchanged UUIDs */ 728 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 729 BTRFS_FSID_SIZE) == 0 && 730 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 731 BTRFS_FSID_SIZE) == 0) 732 return fs_devices; 733 } 734 735 return NULL; 736 } 737 738 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 739 struct btrfs_super_block *disk_super) 740 { 741 struct btrfs_fs_devices *fs_devices; 742 743 /* 744 * Handle the case where the scanned device is part of an fs whose last 745 * metadata UUID change reverted it to the original FSID. At the same 746 * time * fs_devices was first created by another constitutent device 747 * which didn't fully observe the operation. This results in an 748 * btrfs_fs_devices created with metadata/fsid different AND 749 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 750 * fs_devices equal to the FSID of the disk. 751 */ 752 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 753 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 754 BTRFS_FSID_SIZE) != 0 && 755 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 756 BTRFS_FSID_SIZE) == 0 && 757 fs_devices->fsid_change) 758 return fs_devices; 759 } 760 761 return NULL; 762 } 763 /* 764 * Add new device to list of registered devices 765 * 766 * Returns: 767 * device pointer which was just added or updated when successful 768 * error pointer when failed 769 */ 770 static noinline struct btrfs_device *device_list_add(const char *path, 771 struct btrfs_super_block *disk_super, 772 bool *new_device_added) 773 { 774 struct btrfs_device *device; 775 struct btrfs_fs_devices *fs_devices = NULL; 776 struct rcu_string *name; 777 u64 found_transid = btrfs_super_generation(disk_super); 778 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 779 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 780 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 781 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 782 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 783 784 if (fsid_change_in_progress) { 785 if (!has_metadata_uuid) 786 fs_devices = find_fsid_inprogress(disk_super); 787 else 788 fs_devices = find_fsid_changed(disk_super); 789 } else if (has_metadata_uuid) { 790 fs_devices = find_fsid_with_metadata_uuid(disk_super); 791 } else { 792 fs_devices = find_fsid_reverted_metadata(disk_super); 793 if (!fs_devices) 794 fs_devices = find_fsid(disk_super->fsid, NULL); 795 } 796 797 798 if (!fs_devices) { 799 if (has_metadata_uuid) 800 fs_devices = alloc_fs_devices(disk_super->fsid, 801 disk_super->metadata_uuid); 802 else 803 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 804 805 if (IS_ERR(fs_devices)) 806 return ERR_CAST(fs_devices); 807 808 fs_devices->fsid_change = fsid_change_in_progress; 809 810 mutex_lock(&fs_devices->device_list_mutex); 811 list_add(&fs_devices->fs_list, &fs_uuids); 812 813 device = NULL; 814 } else { 815 mutex_lock(&fs_devices->device_list_mutex); 816 device = btrfs_find_device(fs_devices, devid, 817 disk_super->dev_item.uuid, NULL); 818 819 /* 820 * If this disk has been pulled into an fs devices created by 821 * a device which had the CHANGING_FSID_V2 flag then replace the 822 * metadata_uuid/fsid values of the fs_devices. 823 */ 824 if (fs_devices->fsid_change && 825 found_transid > fs_devices->latest_generation) { 826 memcpy(fs_devices->fsid, disk_super->fsid, 827 BTRFS_FSID_SIZE); 828 829 if (has_metadata_uuid) 830 memcpy(fs_devices->metadata_uuid, 831 disk_super->metadata_uuid, 832 BTRFS_FSID_SIZE); 833 else 834 memcpy(fs_devices->metadata_uuid, 835 disk_super->fsid, BTRFS_FSID_SIZE); 836 837 fs_devices->fsid_change = false; 838 } 839 } 840 841 if (!device) { 842 if (fs_devices->opened) { 843 mutex_unlock(&fs_devices->device_list_mutex); 844 return ERR_PTR(-EBUSY); 845 } 846 847 device = btrfs_alloc_device(NULL, &devid, 848 disk_super->dev_item.uuid); 849 if (IS_ERR(device)) { 850 mutex_unlock(&fs_devices->device_list_mutex); 851 /* we can safely leave the fs_devices entry around */ 852 return device; 853 } 854 855 name = rcu_string_strdup(path, GFP_NOFS); 856 if (!name) { 857 btrfs_free_device(device); 858 mutex_unlock(&fs_devices->device_list_mutex); 859 return ERR_PTR(-ENOMEM); 860 } 861 rcu_assign_pointer(device->name, name); 862 863 list_add_rcu(&device->dev_list, &fs_devices->devices); 864 fs_devices->num_devices++; 865 866 device->fs_devices = fs_devices; 867 *new_device_added = true; 868 869 if (disk_super->label[0]) 870 pr_info( 871 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 872 disk_super->label, devid, found_transid, path, 873 current->comm, task_pid_nr(current)); 874 else 875 pr_info( 876 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 877 disk_super->fsid, devid, found_transid, path, 878 current->comm, task_pid_nr(current)); 879 880 } else if (!device->name || strcmp(device->name->str, path)) { 881 /* 882 * When FS is already mounted. 883 * 1. If you are here and if the device->name is NULL that 884 * means this device was missing at time of FS mount. 885 * 2. If you are here and if the device->name is different 886 * from 'path' that means either 887 * a. The same device disappeared and reappeared with 888 * different name. or 889 * b. The missing-disk-which-was-replaced, has 890 * reappeared now. 891 * 892 * We must allow 1 and 2a above. But 2b would be a spurious 893 * and unintentional. 894 * 895 * Further in case of 1 and 2a above, the disk at 'path' 896 * would have missed some transaction when it was away and 897 * in case of 2a the stale bdev has to be updated as well. 898 * 2b must not be allowed at all time. 899 */ 900 901 /* 902 * For now, we do allow update to btrfs_fs_device through the 903 * btrfs dev scan cli after FS has been mounted. We're still 904 * tracking a problem where systems fail mount by subvolume id 905 * when we reject replacement on a mounted FS. 906 */ 907 if (!fs_devices->opened && found_transid < device->generation) { 908 /* 909 * That is if the FS is _not_ mounted and if you 910 * are here, that means there is more than one 911 * disk with same uuid and devid.We keep the one 912 * with larger generation number or the last-in if 913 * generation are equal. 914 */ 915 mutex_unlock(&fs_devices->device_list_mutex); 916 return ERR_PTR(-EEXIST); 917 } 918 919 /* 920 * We are going to replace the device path for a given devid, 921 * make sure it's the same device if the device is mounted 922 */ 923 if (device->bdev) { 924 int error; 925 dev_t path_dev; 926 927 error = lookup_bdev(path, &path_dev); 928 if (error) { 929 mutex_unlock(&fs_devices->device_list_mutex); 930 return ERR_PTR(error); 931 } 932 933 if (device->bdev->bd_dev != path_dev) { 934 mutex_unlock(&fs_devices->device_list_mutex); 935 /* 936 * device->fs_info may not be reliable here, so 937 * pass in a NULL instead. This avoids a 938 * possible use-after-free when the fs_info and 939 * fs_info->sb are already torn down. 940 */ 941 btrfs_warn_in_rcu(NULL, 942 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 943 path, devid, found_transid, 944 current->comm, 945 task_pid_nr(current)); 946 return ERR_PTR(-EEXIST); 947 } 948 btrfs_info_in_rcu(device->fs_info, 949 "devid %llu device path %s changed to %s scanned by %s (%d)", 950 devid, rcu_str_deref(device->name), 951 path, current->comm, 952 task_pid_nr(current)); 953 } 954 955 name = rcu_string_strdup(path, GFP_NOFS); 956 if (!name) { 957 mutex_unlock(&fs_devices->device_list_mutex); 958 return ERR_PTR(-ENOMEM); 959 } 960 rcu_string_free(device->name); 961 rcu_assign_pointer(device->name, name); 962 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 963 fs_devices->missing_devices--; 964 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 965 } 966 } 967 968 /* 969 * Unmount does not free the btrfs_device struct but would zero 970 * generation along with most of the other members. So just update 971 * it back. We need it to pick the disk with largest generation 972 * (as above). 973 */ 974 if (!fs_devices->opened) { 975 device->generation = found_transid; 976 fs_devices->latest_generation = max_t(u64, found_transid, 977 fs_devices->latest_generation); 978 } 979 980 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 981 982 mutex_unlock(&fs_devices->device_list_mutex); 983 return device; 984 } 985 986 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 987 { 988 struct btrfs_fs_devices *fs_devices; 989 struct btrfs_device *device; 990 struct btrfs_device *orig_dev; 991 int ret = 0; 992 993 lockdep_assert_held(&uuid_mutex); 994 995 fs_devices = alloc_fs_devices(orig->fsid, NULL); 996 if (IS_ERR(fs_devices)) 997 return fs_devices; 998 999 fs_devices->total_devices = orig->total_devices; 1000 1001 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1002 struct rcu_string *name; 1003 1004 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1005 orig_dev->uuid); 1006 if (IS_ERR(device)) { 1007 ret = PTR_ERR(device); 1008 goto error; 1009 } 1010 1011 /* 1012 * This is ok to do without rcu read locked because we hold the 1013 * uuid mutex so nothing we touch in here is going to disappear. 1014 */ 1015 if (orig_dev->name) { 1016 name = rcu_string_strdup(orig_dev->name->str, 1017 GFP_KERNEL); 1018 if (!name) { 1019 btrfs_free_device(device); 1020 ret = -ENOMEM; 1021 goto error; 1022 } 1023 rcu_assign_pointer(device->name, name); 1024 } 1025 1026 list_add(&device->dev_list, &fs_devices->devices); 1027 device->fs_devices = fs_devices; 1028 fs_devices->num_devices++; 1029 } 1030 return fs_devices; 1031 error: 1032 free_fs_devices(fs_devices); 1033 return ERR_PTR(ret); 1034 } 1035 1036 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1037 struct btrfs_device **latest_dev) 1038 { 1039 struct btrfs_device *device, *next; 1040 1041 /* This is the initialized path, it is safe to release the devices. */ 1042 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1043 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1044 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1045 &device->dev_state) && 1046 !test_bit(BTRFS_DEV_STATE_MISSING, 1047 &device->dev_state) && 1048 (!*latest_dev || 1049 device->generation > (*latest_dev)->generation)) { 1050 *latest_dev = device; 1051 } 1052 continue; 1053 } 1054 1055 /* 1056 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1057 * in btrfs_init_dev_replace() so just continue. 1058 */ 1059 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1060 continue; 1061 1062 if (device->bdev) { 1063 blkdev_put(device->bdev, device->mode); 1064 device->bdev = NULL; 1065 fs_devices->open_devices--; 1066 } 1067 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1068 list_del_init(&device->dev_alloc_list); 1069 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1070 fs_devices->rw_devices--; 1071 } 1072 list_del_init(&device->dev_list); 1073 fs_devices->num_devices--; 1074 btrfs_free_device(device); 1075 } 1076 1077 } 1078 1079 /* 1080 * After we have read the system tree and know devids belonging to this 1081 * filesystem, remove the device which does not belong there. 1082 */ 1083 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1084 { 1085 struct btrfs_device *latest_dev = NULL; 1086 struct btrfs_fs_devices *seed_dev; 1087 1088 mutex_lock(&uuid_mutex); 1089 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1090 1091 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1092 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1093 1094 fs_devices->latest_dev = latest_dev; 1095 1096 mutex_unlock(&uuid_mutex); 1097 } 1098 1099 static void btrfs_close_bdev(struct btrfs_device *device) 1100 { 1101 if (!device->bdev) 1102 return; 1103 1104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1105 sync_blockdev(device->bdev); 1106 invalidate_bdev(device->bdev); 1107 } 1108 1109 blkdev_put(device->bdev, device->mode); 1110 } 1111 1112 static void btrfs_close_one_device(struct btrfs_device *device) 1113 { 1114 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1115 1116 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1117 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1118 list_del_init(&device->dev_alloc_list); 1119 fs_devices->rw_devices--; 1120 } 1121 1122 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1123 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1124 1125 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1126 fs_devices->missing_devices--; 1127 1128 btrfs_close_bdev(device); 1129 if (device->bdev) { 1130 fs_devices->open_devices--; 1131 device->bdev = NULL; 1132 } 1133 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1134 btrfs_destroy_dev_zone_info(device); 1135 1136 device->fs_info = NULL; 1137 atomic_set(&device->dev_stats_ccnt, 0); 1138 extent_io_tree_release(&device->alloc_state); 1139 1140 /* 1141 * Reset the flush error record. We might have a transient flush error 1142 * in this mount, and if so we aborted the current transaction and set 1143 * the fs to an error state, guaranteeing no super blocks can be further 1144 * committed. However that error might be transient and if we unmount the 1145 * filesystem and mount it again, we should allow the mount to succeed 1146 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1147 * filesystem again we still get flush errors, then we will again abort 1148 * any transaction and set the error state, guaranteeing no commits of 1149 * unsafe super blocks. 1150 */ 1151 device->last_flush_error = 0; 1152 1153 /* Verify the device is back in a pristine state */ 1154 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1155 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1156 ASSERT(list_empty(&device->dev_alloc_list)); 1157 ASSERT(list_empty(&device->post_commit_list)); 1158 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1159 } 1160 1161 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1162 { 1163 struct btrfs_device *device, *tmp; 1164 1165 lockdep_assert_held(&uuid_mutex); 1166 1167 if (--fs_devices->opened > 0) 1168 return; 1169 1170 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1171 btrfs_close_one_device(device); 1172 1173 WARN_ON(fs_devices->open_devices); 1174 WARN_ON(fs_devices->rw_devices); 1175 fs_devices->opened = 0; 1176 fs_devices->seeding = false; 1177 fs_devices->fs_info = NULL; 1178 } 1179 1180 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1181 { 1182 LIST_HEAD(list); 1183 struct btrfs_fs_devices *tmp; 1184 1185 mutex_lock(&uuid_mutex); 1186 close_fs_devices(fs_devices); 1187 if (!fs_devices->opened) 1188 list_splice_init(&fs_devices->seed_list, &list); 1189 1190 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1191 close_fs_devices(fs_devices); 1192 list_del(&fs_devices->seed_list); 1193 free_fs_devices(fs_devices); 1194 } 1195 mutex_unlock(&uuid_mutex); 1196 } 1197 1198 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1199 fmode_t flags, void *holder) 1200 { 1201 struct btrfs_device *device; 1202 struct btrfs_device *latest_dev = NULL; 1203 struct btrfs_device *tmp_device; 1204 1205 flags |= FMODE_EXCL; 1206 1207 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1208 dev_list) { 1209 int ret; 1210 1211 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1212 if (ret == 0 && 1213 (!latest_dev || device->generation > latest_dev->generation)) { 1214 latest_dev = device; 1215 } else if (ret == -ENODATA) { 1216 fs_devices->num_devices--; 1217 list_del(&device->dev_list); 1218 btrfs_free_device(device); 1219 } 1220 } 1221 if (fs_devices->open_devices == 0) 1222 return -EINVAL; 1223 1224 fs_devices->opened = 1; 1225 fs_devices->latest_dev = latest_dev; 1226 fs_devices->total_rw_bytes = 0; 1227 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1228 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1229 1230 return 0; 1231 } 1232 1233 static int devid_cmp(void *priv, const struct list_head *a, 1234 const struct list_head *b) 1235 { 1236 const struct btrfs_device *dev1, *dev2; 1237 1238 dev1 = list_entry(a, struct btrfs_device, dev_list); 1239 dev2 = list_entry(b, struct btrfs_device, dev_list); 1240 1241 if (dev1->devid < dev2->devid) 1242 return -1; 1243 else if (dev1->devid > dev2->devid) 1244 return 1; 1245 return 0; 1246 } 1247 1248 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1249 fmode_t flags, void *holder) 1250 { 1251 int ret; 1252 1253 lockdep_assert_held(&uuid_mutex); 1254 /* 1255 * The device_list_mutex cannot be taken here in case opening the 1256 * underlying device takes further locks like open_mutex. 1257 * 1258 * We also don't need the lock here as this is called during mount and 1259 * exclusion is provided by uuid_mutex 1260 */ 1261 1262 if (fs_devices->opened) { 1263 fs_devices->opened++; 1264 ret = 0; 1265 } else { 1266 list_sort(NULL, &fs_devices->devices, devid_cmp); 1267 ret = open_fs_devices(fs_devices, flags, holder); 1268 } 1269 1270 return ret; 1271 } 1272 1273 void btrfs_release_disk_super(struct btrfs_super_block *super) 1274 { 1275 struct page *page = virt_to_page(super); 1276 1277 put_page(page); 1278 } 1279 1280 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1281 u64 bytenr, u64 bytenr_orig) 1282 { 1283 struct btrfs_super_block *disk_super; 1284 struct page *page; 1285 void *p; 1286 pgoff_t index; 1287 1288 /* make sure our super fits in the device */ 1289 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1290 return ERR_PTR(-EINVAL); 1291 1292 /* make sure our super fits in the page */ 1293 if (sizeof(*disk_super) > PAGE_SIZE) 1294 return ERR_PTR(-EINVAL); 1295 1296 /* make sure our super doesn't straddle pages on disk */ 1297 index = bytenr >> PAGE_SHIFT; 1298 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1299 return ERR_PTR(-EINVAL); 1300 1301 /* pull in the page with our super */ 1302 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1303 1304 if (IS_ERR(page)) 1305 return ERR_CAST(page); 1306 1307 p = page_address(page); 1308 1309 /* align our pointer to the offset of the super block */ 1310 disk_super = p + offset_in_page(bytenr); 1311 1312 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1313 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1314 btrfs_release_disk_super(p); 1315 return ERR_PTR(-EINVAL); 1316 } 1317 1318 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1319 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1320 1321 return disk_super; 1322 } 1323 1324 int btrfs_forget_devices(const char *path) 1325 { 1326 int ret; 1327 1328 mutex_lock(&uuid_mutex); 1329 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1330 mutex_unlock(&uuid_mutex); 1331 1332 return ret; 1333 } 1334 1335 /* 1336 * Look for a btrfs signature on a device. This may be called out of the mount path 1337 * and we are not allowed to call set_blocksize during the scan. The superblock 1338 * is read via pagecache 1339 */ 1340 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1341 void *holder) 1342 { 1343 struct btrfs_super_block *disk_super; 1344 bool new_device_added = false; 1345 struct btrfs_device *device = NULL; 1346 struct block_device *bdev; 1347 u64 bytenr, bytenr_orig; 1348 int ret; 1349 1350 lockdep_assert_held(&uuid_mutex); 1351 1352 /* 1353 * we would like to check all the supers, but that would make 1354 * a btrfs mount succeed after a mkfs from a different FS. 1355 * So, we need to add a special mount option to scan for 1356 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1357 */ 1358 flags |= FMODE_EXCL; 1359 1360 bdev = blkdev_get_by_path(path, flags, holder); 1361 if (IS_ERR(bdev)) 1362 return ERR_CAST(bdev); 1363 1364 bytenr_orig = btrfs_sb_offset(0); 1365 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1366 if (ret) 1367 return ERR_PTR(ret); 1368 1369 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1370 if (IS_ERR(disk_super)) { 1371 device = ERR_CAST(disk_super); 1372 goto error_bdev_put; 1373 } 1374 1375 device = device_list_add(path, disk_super, &new_device_added); 1376 if (!IS_ERR(device)) { 1377 if (new_device_added) 1378 btrfs_free_stale_devices(path, device); 1379 } 1380 1381 btrfs_release_disk_super(disk_super); 1382 1383 error_bdev_put: 1384 blkdev_put(bdev, flags); 1385 1386 return device; 1387 } 1388 1389 /* 1390 * Try to find a chunk that intersects [start, start + len] range and when one 1391 * such is found, record the end of it in *start 1392 */ 1393 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1394 u64 len) 1395 { 1396 u64 physical_start, physical_end; 1397 1398 lockdep_assert_held(&device->fs_info->chunk_mutex); 1399 1400 if (!find_first_extent_bit(&device->alloc_state, *start, 1401 &physical_start, &physical_end, 1402 CHUNK_ALLOCATED, NULL)) { 1403 1404 if (in_range(physical_start, *start, len) || 1405 in_range(*start, physical_start, 1406 physical_end - physical_start)) { 1407 *start = physical_end + 1; 1408 return true; 1409 } 1410 } 1411 return false; 1412 } 1413 1414 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1415 { 1416 switch (device->fs_devices->chunk_alloc_policy) { 1417 case BTRFS_CHUNK_ALLOC_REGULAR: 1418 /* 1419 * We don't want to overwrite the superblock on the drive nor 1420 * any area used by the boot loader (grub for example), so we 1421 * make sure to start at an offset of at least 1MB. 1422 */ 1423 return max_t(u64, start, SZ_1M); 1424 case BTRFS_CHUNK_ALLOC_ZONED: 1425 /* 1426 * We don't care about the starting region like regular 1427 * allocator, because we anyway use/reserve the first two zones 1428 * for superblock logging. 1429 */ 1430 return ALIGN(start, device->zone_info->zone_size); 1431 default: 1432 BUG(); 1433 } 1434 } 1435 1436 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1437 u64 *hole_start, u64 *hole_size, 1438 u64 num_bytes) 1439 { 1440 u64 zone_size = device->zone_info->zone_size; 1441 u64 pos; 1442 int ret; 1443 bool changed = false; 1444 1445 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1446 1447 while (*hole_size > 0) { 1448 pos = btrfs_find_allocatable_zones(device, *hole_start, 1449 *hole_start + *hole_size, 1450 num_bytes); 1451 if (pos != *hole_start) { 1452 *hole_size = *hole_start + *hole_size - pos; 1453 *hole_start = pos; 1454 changed = true; 1455 if (*hole_size < num_bytes) 1456 break; 1457 } 1458 1459 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1460 1461 /* Range is ensured to be empty */ 1462 if (!ret) 1463 return changed; 1464 1465 /* Given hole range was invalid (outside of device) */ 1466 if (ret == -ERANGE) { 1467 *hole_start += *hole_size; 1468 *hole_size = 0; 1469 return true; 1470 } 1471 1472 *hole_start += zone_size; 1473 *hole_size -= zone_size; 1474 changed = true; 1475 } 1476 1477 return changed; 1478 } 1479 1480 /** 1481 * dev_extent_hole_check - check if specified hole is suitable for allocation 1482 * @device: the device which we have the hole 1483 * @hole_start: starting position of the hole 1484 * @hole_size: the size of the hole 1485 * @num_bytes: the size of the free space that we need 1486 * 1487 * This function may modify @hole_start and @hole_size to reflect the suitable 1488 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1489 */ 1490 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1491 u64 *hole_size, u64 num_bytes) 1492 { 1493 bool changed = false; 1494 u64 hole_end = *hole_start + *hole_size; 1495 1496 for (;;) { 1497 /* 1498 * Check before we set max_hole_start, otherwise we could end up 1499 * sending back this offset anyway. 1500 */ 1501 if (contains_pending_extent(device, hole_start, *hole_size)) { 1502 if (hole_end >= *hole_start) 1503 *hole_size = hole_end - *hole_start; 1504 else 1505 *hole_size = 0; 1506 changed = true; 1507 } 1508 1509 switch (device->fs_devices->chunk_alloc_policy) { 1510 case BTRFS_CHUNK_ALLOC_REGULAR: 1511 /* No extra check */ 1512 break; 1513 case BTRFS_CHUNK_ALLOC_ZONED: 1514 if (dev_extent_hole_check_zoned(device, hole_start, 1515 hole_size, num_bytes)) { 1516 changed = true; 1517 /* 1518 * The changed hole can contain pending extent. 1519 * Loop again to check that. 1520 */ 1521 continue; 1522 } 1523 break; 1524 default: 1525 BUG(); 1526 } 1527 1528 break; 1529 } 1530 1531 return changed; 1532 } 1533 1534 /* 1535 * find_free_dev_extent_start - find free space in the specified device 1536 * @device: the device which we search the free space in 1537 * @num_bytes: the size of the free space that we need 1538 * @search_start: the position from which to begin the search 1539 * @start: store the start of the free space. 1540 * @len: the size of the free space. that we find, or the size 1541 * of the max free space if we don't find suitable free space 1542 * 1543 * this uses a pretty simple search, the expectation is that it is 1544 * called very infrequently and that a given device has a small number 1545 * of extents 1546 * 1547 * @start is used to store the start of the free space if we find. But if we 1548 * don't find suitable free space, it will be used to store the start position 1549 * of the max free space. 1550 * 1551 * @len is used to store the size of the free space that we find. 1552 * But if we don't find suitable free space, it is used to store the size of 1553 * the max free space. 1554 * 1555 * NOTE: This function will search *commit* root of device tree, and does extra 1556 * check to ensure dev extents are not double allocated. 1557 * This makes the function safe to allocate dev extents but may not report 1558 * correct usable device space, as device extent freed in current transaction 1559 * is not reported as available. 1560 */ 1561 static int find_free_dev_extent_start(struct btrfs_device *device, 1562 u64 num_bytes, u64 search_start, u64 *start, 1563 u64 *len) 1564 { 1565 struct btrfs_fs_info *fs_info = device->fs_info; 1566 struct btrfs_root *root = fs_info->dev_root; 1567 struct btrfs_key key; 1568 struct btrfs_dev_extent *dev_extent; 1569 struct btrfs_path *path; 1570 u64 hole_size; 1571 u64 max_hole_start; 1572 u64 max_hole_size; 1573 u64 extent_end; 1574 u64 search_end = device->total_bytes; 1575 int ret; 1576 int slot; 1577 struct extent_buffer *l; 1578 1579 search_start = dev_extent_search_start(device, search_start); 1580 1581 WARN_ON(device->zone_info && 1582 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1583 1584 path = btrfs_alloc_path(); 1585 if (!path) 1586 return -ENOMEM; 1587 1588 max_hole_start = search_start; 1589 max_hole_size = 0; 1590 1591 again: 1592 if (search_start >= search_end || 1593 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1594 ret = -ENOSPC; 1595 goto out; 1596 } 1597 1598 path->reada = READA_FORWARD; 1599 path->search_commit_root = 1; 1600 path->skip_locking = 1; 1601 1602 key.objectid = device->devid; 1603 key.offset = search_start; 1604 key.type = BTRFS_DEV_EXTENT_KEY; 1605 1606 ret = btrfs_search_backwards(root, &key, path); 1607 if (ret < 0) 1608 goto out; 1609 1610 while (1) { 1611 l = path->nodes[0]; 1612 slot = path->slots[0]; 1613 if (slot >= btrfs_header_nritems(l)) { 1614 ret = btrfs_next_leaf(root, path); 1615 if (ret == 0) 1616 continue; 1617 if (ret < 0) 1618 goto out; 1619 1620 break; 1621 } 1622 btrfs_item_key_to_cpu(l, &key, slot); 1623 1624 if (key.objectid < device->devid) 1625 goto next; 1626 1627 if (key.objectid > device->devid) 1628 break; 1629 1630 if (key.type != BTRFS_DEV_EXTENT_KEY) 1631 goto next; 1632 1633 if (key.offset > search_start) { 1634 hole_size = key.offset - search_start; 1635 dev_extent_hole_check(device, &search_start, &hole_size, 1636 num_bytes); 1637 1638 if (hole_size > max_hole_size) { 1639 max_hole_start = search_start; 1640 max_hole_size = hole_size; 1641 } 1642 1643 /* 1644 * If this free space is greater than which we need, 1645 * it must be the max free space that we have found 1646 * until now, so max_hole_start must point to the start 1647 * of this free space and the length of this free space 1648 * is stored in max_hole_size. Thus, we return 1649 * max_hole_start and max_hole_size and go back to the 1650 * caller. 1651 */ 1652 if (hole_size >= num_bytes) { 1653 ret = 0; 1654 goto out; 1655 } 1656 } 1657 1658 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1659 extent_end = key.offset + btrfs_dev_extent_length(l, 1660 dev_extent); 1661 if (extent_end > search_start) 1662 search_start = extent_end; 1663 next: 1664 path->slots[0]++; 1665 cond_resched(); 1666 } 1667 1668 /* 1669 * At this point, search_start should be the end of 1670 * allocated dev extents, and when shrinking the device, 1671 * search_end may be smaller than search_start. 1672 */ 1673 if (search_end > search_start) { 1674 hole_size = search_end - search_start; 1675 if (dev_extent_hole_check(device, &search_start, &hole_size, 1676 num_bytes)) { 1677 btrfs_release_path(path); 1678 goto again; 1679 } 1680 1681 if (hole_size > max_hole_size) { 1682 max_hole_start = search_start; 1683 max_hole_size = hole_size; 1684 } 1685 } 1686 1687 /* See above. */ 1688 if (max_hole_size < num_bytes) 1689 ret = -ENOSPC; 1690 else 1691 ret = 0; 1692 1693 out: 1694 btrfs_free_path(path); 1695 *start = max_hole_start; 1696 if (len) 1697 *len = max_hole_size; 1698 return ret; 1699 } 1700 1701 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1702 u64 *start, u64 *len) 1703 { 1704 /* FIXME use last free of some kind */ 1705 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1706 } 1707 1708 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1709 struct btrfs_device *device, 1710 u64 start, u64 *dev_extent_len) 1711 { 1712 struct btrfs_fs_info *fs_info = device->fs_info; 1713 struct btrfs_root *root = fs_info->dev_root; 1714 int ret; 1715 struct btrfs_path *path; 1716 struct btrfs_key key; 1717 struct btrfs_key found_key; 1718 struct extent_buffer *leaf = NULL; 1719 struct btrfs_dev_extent *extent = NULL; 1720 1721 path = btrfs_alloc_path(); 1722 if (!path) 1723 return -ENOMEM; 1724 1725 key.objectid = device->devid; 1726 key.offset = start; 1727 key.type = BTRFS_DEV_EXTENT_KEY; 1728 again: 1729 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1730 if (ret > 0) { 1731 ret = btrfs_previous_item(root, path, key.objectid, 1732 BTRFS_DEV_EXTENT_KEY); 1733 if (ret) 1734 goto out; 1735 leaf = path->nodes[0]; 1736 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1737 extent = btrfs_item_ptr(leaf, path->slots[0], 1738 struct btrfs_dev_extent); 1739 BUG_ON(found_key.offset > start || found_key.offset + 1740 btrfs_dev_extent_length(leaf, extent) < start); 1741 key = found_key; 1742 btrfs_release_path(path); 1743 goto again; 1744 } else if (ret == 0) { 1745 leaf = path->nodes[0]; 1746 extent = btrfs_item_ptr(leaf, path->slots[0], 1747 struct btrfs_dev_extent); 1748 } else { 1749 goto out; 1750 } 1751 1752 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1753 1754 ret = btrfs_del_item(trans, root, path); 1755 if (ret == 0) 1756 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1757 out: 1758 btrfs_free_path(path); 1759 return ret; 1760 } 1761 1762 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1763 { 1764 struct extent_map_tree *em_tree; 1765 struct extent_map *em; 1766 struct rb_node *n; 1767 u64 ret = 0; 1768 1769 em_tree = &fs_info->mapping_tree; 1770 read_lock(&em_tree->lock); 1771 n = rb_last(&em_tree->map.rb_root); 1772 if (n) { 1773 em = rb_entry(n, struct extent_map, rb_node); 1774 ret = em->start + em->len; 1775 } 1776 read_unlock(&em_tree->lock); 1777 1778 return ret; 1779 } 1780 1781 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1782 u64 *devid_ret) 1783 { 1784 int ret; 1785 struct btrfs_key key; 1786 struct btrfs_key found_key; 1787 struct btrfs_path *path; 1788 1789 path = btrfs_alloc_path(); 1790 if (!path) 1791 return -ENOMEM; 1792 1793 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1794 key.type = BTRFS_DEV_ITEM_KEY; 1795 key.offset = (u64)-1; 1796 1797 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1798 if (ret < 0) 1799 goto error; 1800 1801 if (ret == 0) { 1802 /* Corruption */ 1803 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1804 ret = -EUCLEAN; 1805 goto error; 1806 } 1807 1808 ret = btrfs_previous_item(fs_info->chunk_root, path, 1809 BTRFS_DEV_ITEMS_OBJECTID, 1810 BTRFS_DEV_ITEM_KEY); 1811 if (ret) { 1812 *devid_ret = 1; 1813 } else { 1814 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1815 path->slots[0]); 1816 *devid_ret = found_key.offset + 1; 1817 } 1818 ret = 0; 1819 error: 1820 btrfs_free_path(path); 1821 return ret; 1822 } 1823 1824 /* 1825 * the device information is stored in the chunk root 1826 * the btrfs_device struct should be fully filled in 1827 */ 1828 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1829 struct btrfs_device *device) 1830 { 1831 int ret; 1832 struct btrfs_path *path; 1833 struct btrfs_dev_item *dev_item; 1834 struct extent_buffer *leaf; 1835 struct btrfs_key key; 1836 unsigned long ptr; 1837 1838 path = btrfs_alloc_path(); 1839 if (!path) 1840 return -ENOMEM; 1841 1842 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1843 key.type = BTRFS_DEV_ITEM_KEY; 1844 key.offset = device->devid; 1845 1846 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1847 &key, sizeof(*dev_item)); 1848 if (ret) 1849 goto out; 1850 1851 leaf = path->nodes[0]; 1852 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1853 1854 btrfs_set_device_id(leaf, dev_item, device->devid); 1855 btrfs_set_device_generation(leaf, dev_item, 0); 1856 btrfs_set_device_type(leaf, dev_item, device->type); 1857 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1858 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1859 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1860 btrfs_set_device_total_bytes(leaf, dev_item, 1861 btrfs_device_get_disk_total_bytes(device)); 1862 btrfs_set_device_bytes_used(leaf, dev_item, 1863 btrfs_device_get_bytes_used(device)); 1864 btrfs_set_device_group(leaf, dev_item, 0); 1865 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1866 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1867 btrfs_set_device_start_offset(leaf, dev_item, 0); 1868 1869 ptr = btrfs_device_uuid(dev_item); 1870 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1871 ptr = btrfs_device_fsid(dev_item); 1872 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1873 ptr, BTRFS_FSID_SIZE); 1874 btrfs_mark_buffer_dirty(leaf); 1875 1876 ret = 0; 1877 out: 1878 btrfs_free_path(path); 1879 return ret; 1880 } 1881 1882 /* 1883 * Function to update ctime/mtime for a given device path. 1884 * Mainly used for ctime/mtime based probe like libblkid. 1885 */ 1886 static void update_dev_time(struct block_device *bdev) 1887 { 1888 struct inode *inode = bdev->bd_inode; 1889 struct timespec64 now; 1890 1891 /* Shouldn't happen but just in case. */ 1892 if (!inode) 1893 return; 1894 1895 now = current_time(inode); 1896 generic_update_time(inode, &now, S_MTIME | S_CTIME); 1897 } 1898 1899 static int btrfs_rm_dev_item(struct btrfs_device *device) 1900 { 1901 struct btrfs_root *root = device->fs_info->chunk_root; 1902 int ret; 1903 struct btrfs_path *path; 1904 struct btrfs_key key; 1905 struct btrfs_trans_handle *trans; 1906 1907 path = btrfs_alloc_path(); 1908 if (!path) 1909 return -ENOMEM; 1910 1911 trans = btrfs_start_transaction(root, 0); 1912 if (IS_ERR(trans)) { 1913 btrfs_free_path(path); 1914 return PTR_ERR(trans); 1915 } 1916 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1917 key.type = BTRFS_DEV_ITEM_KEY; 1918 key.offset = device->devid; 1919 1920 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1921 if (ret) { 1922 if (ret > 0) 1923 ret = -ENOENT; 1924 btrfs_abort_transaction(trans, ret); 1925 btrfs_end_transaction(trans); 1926 goto out; 1927 } 1928 1929 ret = btrfs_del_item(trans, root, path); 1930 if (ret) { 1931 btrfs_abort_transaction(trans, ret); 1932 btrfs_end_transaction(trans); 1933 } 1934 1935 out: 1936 btrfs_free_path(path); 1937 if (!ret) 1938 ret = btrfs_commit_transaction(trans); 1939 return ret; 1940 } 1941 1942 /* 1943 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1944 * filesystem. It's up to the caller to adjust that number regarding eg. device 1945 * replace. 1946 */ 1947 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1948 u64 num_devices) 1949 { 1950 u64 all_avail; 1951 unsigned seq; 1952 int i; 1953 1954 do { 1955 seq = read_seqbegin(&fs_info->profiles_lock); 1956 1957 all_avail = fs_info->avail_data_alloc_bits | 1958 fs_info->avail_system_alloc_bits | 1959 fs_info->avail_metadata_alloc_bits; 1960 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1961 1962 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1963 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1964 continue; 1965 1966 if (num_devices < btrfs_raid_array[i].devs_min) 1967 return btrfs_raid_array[i].mindev_error; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static struct btrfs_device * btrfs_find_next_active_device( 1974 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1975 { 1976 struct btrfs_device *next_device; 1977 1978 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1979 if (next_device != device && 1980 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1981 && next_device->bdev) 1982 return next_device; 1983 } 1984 1985 return NULL; 1986 } 1987 1988 /* 1989 * Helper function to check if the given device is part of s_bdev / latest_dev 1990 * and replace it with the provided or the next active device, in the context 1991 * where this function called, there should be always be another device (or 1992 * this_dev) which is active. 1993 */ 1994 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1995 struct btrfs_device *next_device) 1996 { 1997 struct btrfs_fs_info *fs_info = device->fs_info; 1998 1999 if (!next_device) 2000 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2001 device); 2002 ASSERT(next_device); 2003 2004 if (fs_info->sb->s_bdev && 2005 (fs_info->sb->s_bdev == device->bdev)) 2006 fs_info->sb->s_bdev = next_device->bdev; 2007 2008 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2009 fs_info->fs_devices->latest_dev = next_device; 2010 } 2011 2012 /* 2013 * Return btrfs_fs_devices::num_devices excluding the device that's being 2014 * currently replaced. 2015 */ 2016 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2017 { 2018 u64 num_devices = fs_info->fs_devices->num_devices; 2019 2020 down_read(&fs_info->dev_replace.rwsem); 2021 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2022 ASSERT(num_devices > 1); 2023 num_devices--; 2024 } 2025 up_read(&fs_info->dev_replace.rwsem); 2026 2027 return num_devices; 2028 } 2029 2030 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2031 struct block_device *bdev, 2032 const char *device_path) 2033 { 2034 struct btrfs_super_block *disk_super; 2035 int copy_num; 2036 2037 if (!bdev) 2038 return; 2039 2040 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2041 struct page *page; 2042 int ret; 2043 2044 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2045 if (IS_ERR(disk_super)) 2046 continue; 2047 2048 if (bdev_is_zoned(bdev)) { 2049 btrfs_reset_sb_log_zones(bdev, copy_num); 2050 continue; 2051 } 2052 2053 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2054 2055 page = virt_to_page(disk_super); 2056 set_page_dirty(page); 2057 lock_page(page); 2058 /* write_on_page() unlocks the page */ 2059 ret = write_one_page(page); 2060 if (ret) 2061 btrfs_warn(fs_info, 2062 "error clearing superblock number %d (%d)", 2063 copy_num, ret); 2064 btrfs_release_disk_super(disk_super); 2065 2066 } 2067 2068 /* Notify udev that device has changed */ 2069 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2070 2071 /* Update ctime/mtime for device path for libblkid */ 2072 update_dev_time(bdev); 2073 } 2074 2075 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, 2076 u64 devid, struct block_device **bdev, fmode_t *mode) 2077 { 2078 struct btrfs_device *device; 2079 struct btrfs_fs_devices *cur_devices; 2080 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2081 u64 num_devices; 2082 int ret = 0; 2083 2084 /* 2085 * The device list in fs_devices is accessed without locks (neither 2086 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2087 * filesystem and another device rm cannot run. 2088 */ 2089 num_devices = btrfs_num_devices(fs_info); 2090 2091 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2092 if (ret) 2093 goto out; 2094 2095 device = btrfs_find_device_by_devspec(fs_info, devid, device_path); 2096 2097 if (IS_ERR(device)) { 2098 if (PTR_ERR(device) == -ENOENT && 2099 device_path && strcmp(device_path, "missing") == 0) 2100 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2101 else 2102 ret = PTR_ERR(device); 2103 goto out; 2104 } 2105 2106 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2107 btrfs_warn_in_rcu(fs_info, 2108 "cannot remove device %s (devid %llu) due to active swapfile", 2109 rcu_str_deref(device->name), device->devid); 2110 ret = -ETXTBSY; 2111 goto out; 2112 } 2113 2114 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2115 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2116 goto out; 2117 } 2118 2119 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2120 fs_info->fs_devices->rw_devices == 1) { 2121 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2122 goto out; 2123 } 2124 2125 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2126 mutex_lock(&fs_info->chunk_mutex); 2127 list_del_init(&device->dev_alloc_list); 2128 device->fs_devices->rw_devices--; 2129 mutex_unlock(&fs_info->chunk_mutex); 2130 } 2131 2132 ret = btrfs_shrink_device(device, 0); 2133 if (!ret) 2134 btrfs_reada_remove_dev(device); 2135 if (ret) 2136 goto error_undo; 2137 2138 /* 2139 * TODO: the superblock still includes this device in its num_devices 2140 * counter although write_all_supers() is not locked out. This 2141 * could give a filesystem state which requires a degraded mount. 2142 */ 2143 ret = btrfs_rm_dev_item(device); 2144 if (ret) 2145 goto error_undo; 2146 2147 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2148 btrfs_scrub_cancel_dev(device); 2149 2150 /* 2151 * the device list mutex makes sure that we don't change 2152 * the device list while someone else is writing out all 2153 * the device supers. Whoever is writing all supers, should 2154 * lock the device list mutex before getting the number of 2155 * devices in the super block (super_copy). Conversely, 2156 * whoever updates the number of devices in the super block 2157 * (super_copy) should hold the device list mutex. 2158 */ 2159 2160 /* 2161 * In normal cases the cur_devices == fs_devices. But in case 2162 * of deleting a seed device, the cur_devices should point to 2163 * its own fs_devices listed under the fs_devices->seed_list. 2164 */ 2165 cur_devices = device->fs_devices; 2166 mutex_lock(&fs_devices->device_list_mutex); 2167 list_del_rcu(&device->dev_list); 2168 2169 cur_devices->num_devices--; 2170 cur_devices->total_devices--; 2171 /* Update total_devices of the parent fs_devices if it's seed */ 2172 if (cur_devices != fs_devices) 2173 fs_devices->total_devices--; 2174 2175 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2176 cur_devices->missing_devices--; 2177 2178 btrfs_assign_next_active_device(device, NULL); 2179 2180 if (device->bdev) { 2181 cur_devices->open_devices--; 2182 /* remove sysfs entry */ 2183 btrfs_sysfs_remove_device(device); 2184 } 2185 2186 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2187 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2188 mutex_unlock(&fs_devices->device_list_mutex); 2189 2190 /* 2191 * At this point, the device is zero sized and detached from the 2192 * devices list. All that's left is to zero out the old supers and 2193 * free the device. 2194 * 2195 * We cannot call btrfs_close_bdev() here because we're holding the sb 2196 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2197 * block device and it's dependencies. Instead just flush the device 2198 * and let the caller do the final blkdev_put. 2199 */ 2200 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2201 btrfs_scratch_superblocks(fs_info, device->bdev, 2202 device->name->str); 2203 if (device->bdev) { 2204 sync_blockdev(device->bdev); 2205 invalidate_bdev(device->bdev); 2206 } 2207 } 2208 2209 *bdev = device->bdev; 2210 *mode = device->mode; 2211 synchronize_rcu(); 2212 btrfs_free_device(device); 2213 2214 if (cur_devices->open_devices == 0) { 2215 list_del_init(&cur_devices->seed_list); 2216 close_fs_devices(cur_devices); 2217 free_fs_devices(cur_devices); 2218 } 2219 2220 out: 2221 return ret; 2222 2223 error_undo: 2224 btrfs_reada_undo_remove_dev(device); 2225 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2226 mutex_lock(&fs_info->chunk_mutex); 2227 list_add(&device->dev_alloc_list, 2228 &fs_devices->alloc_list); 2229 device->fs_devices->rw_devices++; 2230 mutex_unlock(&fs_info->chunk_mutex); 2231 } 2232 goto out; 2233 } 2234 2235 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2236 { 2237 struct btrfs_fs_devices *fs_devices; 2238 2239 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2240 2241 /* 2242 * in case of fs with no seed, srcdev->fs_devices will point 2243 * to fs_devices of fs_info. However when the dev being replaced is 2244 * a seed dev it will point to the seed's local fs_devices. In short 2245 * srcdev will have its correct fs_devices in both the cases. 2246 */ 2247 fs_devices = srcdev->fs_devices; 2248 2249 list_del_rcu(&srcdev->dev_list); 2250 list_del(&srcdev->dev_alloc_list); 2251 fs_devices->num_devices--; 2252 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2253 fs_devices->missing_devices--; 2254 2255 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2256 fs_devices->rw_devices--; 2257 2258 if (srcdev->bdev) 2259 fs_devices->open_devices--; 2260 } 2261 2262 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2263 { 2264 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2265 2266 mutex_lock(&uuid_mutex); 2267 2268 btrfs_close_bdev(srcdev); 2269 synchronize_rcu(); 2270 btrfs_free_device(srcdev); 2271 2272 /* if this is no devs we rather delete the fs_devices */ 2273 if (!fs_devices->num_devices) { 2274 /* 2275 * On a mounted FS, num_devices can't be zero unless it's a 2276 * seed. In case of a seed device being replaced, the replace 2277 * target added to the sprout FS, so there will be no more 2278 * device left under the seed FS. 2279 */ 2280 ASSERT(fs_devices->seeding); 2281 2282 list_del_init(&fs_devices->seed_list); 2283 close_fs_devices(fs_devices); 2284 free_fs_devices(fs_devices); 2285 } 2286 mutex_unlock(&uuid_mutex); 2287 } 2288 2289 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2290 { 2291 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2292 2293 mutex_lock(&fs_devices->device_list_mutex); 2294 2295 btrfs_sysfs_remove_device(tgtdev); 2296 2297 if (tgtdev->bdev) 2298 fs_devices->open_devices--; 2299 2300 fs_devices->num_devices--; 2301 2302 btrfs_assign_next_active_device(tgtdev, NULL); 2303 2304 list_del_rcu(&tgtdev->dev_list); 2305 2306 mutex_unlock(&fs_devices->device_list_mutex); 2307 2308 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2309 tgtdev->name->str); 2310 2311 btrfs_close_bdev(tgtdev); 2312 synchronize_rcu(); 2313 btrfs_free_device(tgtdev); 2314 } 2315 2316 static struct btrfs_device *btrfs_find_device_by_path( 2317 struct btrfs_fs_info *fs_info, const char *device_path) 2318 { 2319 int ret = 0; 2320 struct btrfs_super_block *disk_super; 2321 u64 devid; 2322 u8 *dev_uuid; 2323 struct block_device *bdev; 2324 struct btrfs_device *device; 2325 2326 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2327 fs_info->bdev_holder, 0, &bdev, &disk_super); 2328 if (ret) 2329 return ERR_PTR(ret); 2330 2331 devid = btrfs_stack_device_id(&disk_super->dev_item); 2332 dev_uuid = disk_super->dev_item.uuid; 2333 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2334 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2335 disk_super->metadata_uuid); 2336 else 2337 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2338 disk_super->fsid); 2339 2340 btrfs_release_disk_super(disk_super); 2341 if (!device) 2342 device = ERR_PTR(-ENOENT); 2343 blkdev_put(bdev, FMODE_READ); 2344 return device; 2345 } 2346 2347 /* 2348 * Lookup a device given by device id, or the path if the id is 0. 2349 */ 2350 struct btrfs_device *btrfs_find_device_by_devspec( 2351 struct btrfs_fs_info *fs_info, u64 devid, 2352 const char *device_path) 2353 { 2354 struct btrfs_device *device; 2355 2356 if (devid) { 2357 device = btrfs_find_device(fs_info->fs_devices, devid, NULL, 2358 NULL); 2359 if (!device) 2360 return ERR_PTR(-ENOENT); 2361 return device; 2362 } 2363 2364 if (!device_path || !device_path[0]) 2365 return ERR_PTR(-EINVAL); 2366 2367 if (strcmp(device_path, "missing") == 0) { 2368 /* Find first missing device */ 2369 list_for_each_entry(device, &fs_info->fs_devices->devices, 2370 dev_list) { 2371 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 2372 &device->dev_state) && !device->bdev) 2373 return device; 2374 } 2375 return ERR_PTR(-ENOENT); 2376 } 2377 2378 return btrfs_find_device_by_path(fs_info, device_path); 2379 } 2380 2381 /* 2382 * does all the dirty work required for changing file system's UUID. 2383 */ 2384 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2385 { 2386 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2387 struct btrfs_fs_devices *old_devices; 2388 struct btrfs_fs_devices *seed_devices; 2389 struct btrfs_super_block *disk_super = fs_info->super_copy; 2390 struct btrfs_device *device; 2391 u64 super_flags; 2392 2393 lockdep_assert_held(&uuid_mutex); 2394 if (!fs_devices->seeding) 2395 return -EINVAL; 2396 2397 /* 2398 * Private copy of the seed devices, anchored at 2399 * fs_info->fs_devices->seed_list 2400 */ 2401 seed_devices = alloc_fs_devices(NULL, NULL); 2402 if (IS_ERR(seed_devices)) 2403 return PTR_ERR(seed_devices); 2404 2405 /* 2406 * It's necessary to retain a copy of the original seed fs_devices in 2407 * fs_uuids so that filesystems which have been seeded can successfully 2408 * reference the seed device from open_seed_devices. This also supports 2409 * multiple fs seed. 2410 */ 2411 old_devices = clone_fs_devices(fs_devices); 2412 if (IS_ERR(old_devices)) { 2413 kfree(seed_devices); 2414 return PTR_ERR(old_devices); 2415 } 2416 2417 list_add(&old_devices->fs_list, &fs_uuids); 2418 2419 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2420 seed_devices->opened = 1; 2421 INIT_LIST_HEAD(&seed_devices->devices); 2422 INIT_LIST_HEAD(&seed_devices->alloc_list); 2423 mutex_init(&seed_devices->device_list_mutex); 2424 2425 mutex_lock(&fs_devices->device_list_mutex); 2426 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2427 synchronize_rcu); 2428 list_for_each_entry(device, &seed_devices->devices, dev_list) 2429 device->fs_devices = seed_devices; 2430 2431 fs_devices->seeding = false; 2432 fs_devices->num_devices = 0; 2433 fs_devices->open_devices = 0; 2434 fs_devices->missing_devices = 0; 2435 fs_devices->rotating = false; 2436 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2437 2438 generate_random_uuid(fs_devices->fsid); 2439 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2440 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2441 mutex_unlock(&fs_devices->device_list_mutex); 2442 2443 super_flags = btrfs_super_flags(disk_super) & 2444 ~BTRFS_SUPER_FLAG_SEEDING; 2445 btrfs_set_super_flags(disk_super, super_flags); 2446 2447 return 0; 2448 } 2449 2450 /* 2451 * Store the expected generation for seed devices in device items. 2452 */ 2453 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2454 { 2455 struct btrfs_fs_info *fs_info = trans->fs_info; 2456 struct btrfs_root *root = fs_info->chunk_root; 2457 struct btrfs_path *path; 2458 struct extent_buffer *leaf; 2459 struct btrfs_dev_item *dev_item; 2460 struct btrfs_device *device; 2461 struct btrfs_key key; 2462 u8 fs_uuid[BTRFS_FSID_SIZE]; 2463 u8 dev_uuid[BTRFS_UUID_SIZE]; 2464 u64 devid; 2465 int ret; 2466 2467 path = btrfs_alloc_path(); 2468 if (!path) 2469 return -ENOMEM; 2470 2471 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2472 key.offset = 0; 2473 key.type = BTRFS_DEV_ITEM_KEY; 2474 2475 while (1) { 2476 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2477 if (ret < 0) 2478 goto error; 2479 2480 leaf = path->nodes[0]; 2481 next_slot: 2482 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2483 ret = btrfs_next_leaf(root, path); 2484 if (ret > 0) 2485 break; 2486 if (ret < 0) 2487 goto error; 2488 leaf = path->nodes[0]; 2489 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2490 btrfs_release_path(path); 2491 continue; 2492 } 2493 2494 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2495 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2496 key.type != BTRFS_DEV_ITEM_KEY) 2497 break; 2498 2499 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2500 struct btrfs_dev_item); 2501 devid = btrfs_device_id(leaf, dev_item); 2502 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2503 BTRFS_UUID_SIZE); 2504 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2505 BTRFS_FSID_SIZE); 2506 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2507 fs_uuid); 2508 BUG_ON(!device); /* Logic error */ 2509 2510 if (device->fs_devices->seeding) { 2511 btrfs_set_device_generation(leaf, dev_item, 2512 device->generation); 2513 btrfs_mark_buffer_dirty(leaf); 2514 } 2515 2516 path->slots[0]++; 2517 goto next_slot; 2518 } 2519 ret = 0; 2520 error: 2521 btrfs_free_path(path); 2522 return ret; 2523 } 2524 2525 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2526 { 2527 struct btrfs_root *root = fs_info->dev_root; 2528 struct request_queue *q; 2529 struct btrfs_trans_handle *trans; 2530 struct btrfs_device *device; 2531 struct block_device *bdev; 2532 struct super_block *sb = fs_info->sb; 2533 struct rcu_string *name; 2534 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2535 u64 orig_super_total_bytes; 2536 u64 orig_super_num_devices; 2537 int seeding_dev = 0; 2538 int ret = 0; 2539 bool locked = false; 2540 2541 if (sb_rdonly(sb) && !fs_devices->seeding) 2542 return -EROFS; 2543 2544 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2545 fs_info->bdev_holder); 2546 if (IS_ERR(bdev)) 2547 return PTR_ERR(bdev); 2548 2549 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2550 ret = -EINVAL; 2551 goto error; 2552 } 2553 2554 if (fs_devices->seeding) { 2555 seeding_dev = 1; 2556 down_write(&sb->s_umount); 2557 mutex_lock(&uuid_mutex); 2558 locked = true; 2559 } 2560 2561 sync_blockdev(bdev); 2562 2563 rcu_read_lock(); 2564 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2565 if (device->bdev == bdev) { 2566 ret = -EEXIST; 2567 rcu_read_unlock(); 2568 goto error; 2569 } 2570 } 2571 rcu_read_unlock(); 2572 2573 device = btrfs_alloc_device(fs_info, NULL, NULL); 2574 if (IS_ERR(device)) { 2575 /* we can safely leave the fs_devices entry around */ 2576 ret = PTR_ERR(device); 2577 goto error; 2578 } 2579 2580 name = rcu_string_strdup(device_path, GFP_KERNEL); 2581 if (!name) { 2582 ret = -ENOMEM; 2583 goto error_free_device; 2584 } 2585 rcu_assign_pointer(device->name, name); 2586 2587 device->fs_info = fs_info; 2588 device->bdev = bdev; 2589 2590 ret = btrfs_get_dev_zone_info(device); 2591 if (ret) 2592 goto error_free_device; 2593 2594 trans = btrfs_start_transaction(root, 0); 2595 if (IS_ERR(trans)) { 2596 ret = PTR_ERR(trans); 2597 goto error_free_zone; 2598 } 2599 2600 q = bdev_get_queue(bdev); 2601 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2602 device->generation = trans->transid; 2603 device->io_width = fs_info->sectorsize; 2604 device->io_align = fs_info->sectorsize; 2605 device->sector_size = fs_info->sectorsize; 2606 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2607 fs_info->sectorsize); 2608 device->disk_total_bytes = device->total_bytes; 2609 device->commit_total_bytes = device->total_bytes; 2610 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2611 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2612 device->mode = FMODE_EXCL; 2613 device->dev_stats_valid = 1; 2614 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2615 2616 if (seeding_dev) { 2617 btrfs_clear_sb_rdonly(sb); 2618 ret = btrfs_prepare_sprout(fs_info); 2619 if (ret) { 2620 btrfs_abort_transaction(trans, ret); 2621 goto error_trans; 2622 } 2623 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2624 device); 2625 } 2626 2627 device->fs_devices = fs_devices; 2628 2629 mutex_lock(&fs_devices->device_list_mutex); 2630 mutex_lock(&fs_info->chunk_mutex); 2631 list_add_rcu(&device->dev_list, &fs_devices->devices); 2632 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2633 fs_devices->num_devices++; 2634 fs_devices->open_devices++; 2635 fs_devices->rw_devices++; 2636 fs_devices->total_devices++; 2637 fs_devices->total_rw_bytes += device->total_bytes; 2638 2639 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2640 2641 if (!blk_queue_nonrot(q)) 2642 fs_devices->rotating = true; 2643 2644 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2645 btrfs_set_super_total_bytes(fs_info->super_copy, 2646 round_down(orig_super_total_bytes + device->total_bytes, 2647 fs_info->sectorsize)); 2648 2649 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2650 btrfs_set_super_num_devices(fs_info->super_copy, 2651 orig_super_num_devices + 1); 2652 2653 /* 2654 * we've got more storage, clear any full flags on the space 2655 * infos 2656 */ 2657 btrfs_clear_space_info_full(fs_info); 2658 2659 mutex_unlock(&fs_info->chunk_mutex); 2660 2661 /* Add sysfs device entry */ 2662 btrfs_sysfs_add_device(device); 2663 2664 mutex_unlock(&fs_devices->device_list_mutex); 2665 2666 if (seeding_dev) { 2667 mutex_lock(&fs_info->chunk_mutex); 2668 ret = init_first_rw_device(trans); 2669 mutex_unlock(&fs_info->chunk_mutex); 2670 if (ret) { 2671 btrfs_abort_transaction(trans, ret); 2672 goto error_sysfs; 2673 } 2674 } 2675 2676 ret = btrfs_add_dev_item(trans, device); 2677 if (ret) { 2678 btrfs_abort_transaction(trans, ret); 2679 goto error_sysfs; 2680 } 2681 2682 if (seeding_dev) { 2683 ret = btrfs_finish_sprout(trans); 2684 if (ret) { 2685 btrfs_abort_transaction(trans, ret); 2686 goto error_sysfs; 2687 } 2688 2689 /* 2690 * fs_devices now represents the newly sprouted filesystem and 2691 * its fsid has been changed by btrfs_prepare_sprout 2692 */ 2693 btrfs_sysfs_update_sprout_fsid(fs_devices); 2694 } 2695 2696 ret = btrfs_commit_transaction(trans); 2697 2698 if (seeding_dev) { 2699 mutex_unlock(&uuid_mutex); 2700 up_write(&sb->s_umount); 2701 locked = false; 2702 2703 if (ret) /* transaction commit */ 2704 return ret; 2705 2706 ret = btrfs_relocate_sys_chunks(fs_info); 2707 if (ret < 0) 2708 btrfs_handle_fs_error(fs_info, ret, 2709 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2710 trans = btrfs_attach_transaction(root); 2711 if (IS_ERR(trans)) { 2712 if (PTR_ERR(trans) == -ENOENT) 2713 return 0; 2714 ret = PTR_ERR(trans); 2715 trans = NULL; 2716 goto error_sysfs; 2717 } 2718 ret = btrfs_commit_transaction(trans); 2719 } 2720 2721 /* 2722 * Now that we have written a new super block to this device, check all 2723 * other fs_devices list if device_path alienates any other scanned 2724 * device. 2725 * We can ignore the return value as it typically returns -EINVAL and 2726 * only succeeds if the device was an alien. 2727 */ 2728 btrfs_forget_devices(device_path); 2729 2730 /* Update ctime/mtime for blkid or udev */ 2731 update_dev_time(bdev); 2732 2733 return ret; 2734 2735 error_sysfs: 2736 btrfs_sysfs_remove_device(device); 2737 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2738 mutex_lock(&fs_info->chunk_mutex); 2739 list_del_rcu(&device->dev_list); 2740 list_del(&device->dev_alloc_list); 2741 fs_info->fs_devices->num_devices--; 2742 fs_info->fs_devices->open_devices--; 2743 fs_info->fs_devices->rw_devices--; 2744 fs_info->fs_devices->total_devices--; 2745 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2746 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2747 btrfs_set_super_total_bytes(fs_info->super_copy, 2748 orig_super_total_bytes); 2749 btrfs_set_super_num_devices(fs_info->super_copy, 2750 orig_super_num_devices); 2751 mutex_unlock(&fs_info->chunk_mutex); 2752 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2753 error_trans: 2754 if (seeding_dev) 2755 btrfs_set_sb_rdonly(sb); 2756 if (trans) 2757 btrfs_end_transaction(trans); 2758 error_free_zone: 2759 btrfs_destroy_dev_zone_info(device); 2760 error_free_device: 2761 btrfs_free_device(device); 2762 error: 2763 blkdev_put(bdev, FMODE_EXCL); 2764 if (locked) { 2765 mutex_unlock(&uuid_mutex); 2766 up_write(&sb->s_umount); 2767 } 2768 return ret; 2769 } 2770 2771 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2772 struct btrfs_device *device) 2773 { 2774 int ret; 2775 struct btrfs_path *path; 2776 struct btrfs_root *root = device->fs_info->chunk_root; 2777 struct btrfs_dev_item *dev_item; 2778 struct extent_buffer *leaf; 2779 struct btrfs_key key; 2780 2781 path = btrfs_alloc_path(); 2782 if (!path) 2783 return -ENOMEM; 2784 2785 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2786 key.type = BTRFS_DEV_ITEM_KEY; 2787 key.offset = device->devid; 2788 2789 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2790 if (ret < 0) 2791 goto out; 2792 2793 if (ret > 0) { 2794 ret = -ENOENT; 2795 goto out; 2796 } 2797 2798 leaf = path->nodes[0]; 2799 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2800 2801 btrfs_set_device_id(leaf, dev_item, device->devid); 2802 btrfs_set_device_type(leaf, dev_item, device->type); 2803 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2804 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2805 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2806 btrfs_set_device_total_bytes(leaf, dev_item, 2807 btrfs_device_get_disk_total_bytes(device)); 2808 btrfs_set_device_bytes_used(leaf, dev_item, 2809 btrfs_device_get_bytes_used(device)); 2810 btrfs_mark_buffer_dirty(leaf); 2811 2812 out: 2813 btrfs_free_path(path); 2814 return ret; 2815 } 2816 2817 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2818 struct btrfs_device *device, u64 new_size) 2819 { 2820 struct btrfs_fs_info *fs_info = device->fs_info; 2821 struct btrfs_super_block *super_copy = fs_info->super_copy; 2822 u64 old_total; 2823 u64 diff; 2824 2825 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2826 return -EACCES; 2827 2828 new_size = round_down(new_size, fs_info->sectorsize); 2829 2830 mutex_lock(&fs_info->chunk_mutex); 2831 old_total = btrfs_super_total_bytes(super_copy); 2832 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2833 2834 if (new_size <= device->total_bytes || 2835 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2836 mutex_unlock(&fs_info->chunk_mutex); 2837 return -EINVAL; 2838 } 2839 2840 btrfs_set_super_total_bytes(super_copy, 2841 round_down(old_total + diff, fs_info->sectorsize)); 2842 device->fs_devices->total_rw_bytes += diff; 2843 2844 btrfs_device_set_total_bytes(device, new_size); 2845 btrfs_device_set_disk_total_bytes(device, new_size); 2846 btrfs_clear_space_info_full(device->fs_info); 2847 if (list_empty(&device->post_commit_list)) 2848 list_add_tail(&device->post_commit_list, 2849 &trans->transaction->dev_update_list); 2850 mutex_unlock(&fs_info->chunk_mutex); 2851 2852 return btrfs_update_device(trans, device); 2853 } 2854 2855 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2856 { 2857 struct btrfs_fs_info *fs_info = trans->fs_info; 2858 struct btrfs_root *root = fs_info->chunk_root; 2859 int ret; 2860 struct btrfs_path *path; 2861 struct btrfs_key key; 2862 2863 path = btrfs_alloc_path(); 2864 if (!path) 2865 return -ENOMEM; 2866 2867 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2868 key.offset = chunk_offset; 2869 key.type = BTRFS_CHUNK_ITEM_KEY; 2870 2871 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2872 if (ret < 0) 2873 goto out; 2874 else if (ret > 0) { /* Logic error or corruption */ 2875 btrfs_handle_fs_error(fs_info, -ENOENT, 2876 "Failed lookup while freeing chunk."); 2877 ret = -ENOENT; 2878 goto out; 2879 } 2880 2881 ret = btrfs_del_item(trans, root, path); 2882 if (ret < 0) 2883 btrfs_handle_fs_error(fs_info, ret, 2884 "Failed to delete chunk item."); 2885 out: 2886 btrfs_free_path(path); 2887 return ret; 2888 } 2889 2890 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2891 { 2892 struct btrfs_super_block *super_copy = fs_info->super_copy; 2893 struct btrfs_disk_key *disk_key; 2894 struct btrfs_chunk *chunk; 2895 u8 *ptr; 2896 int ret = 0; 2897 u32 num_stripes; 2898 u32 array_size; 2899 u32 len = 0; 2900 u32 cur; 2901 struct btrfs_key key; 2902 2903 lockdep_assert_held(&fs_info->chunk_mutex); 2904 array_size = btrfs_super_sys_array_size(super_copy); 2905 2906 ptr = super_copy->sys_chunk_array; 2907 cur = 0; 2908 2909 while (cur < array_size) { 2910 disk_key = (struct btrfs_disk_key *)ptr; 2911 btrfs_disk_key_to_cpu(&key, disk_key); 2912 2913 len = sizeof(*disk_key); 2914 2915 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2916 chunk = (struct btrfs_chunk *)(ptr + len); 2917 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2918 len += btrfs_chunk_item_size(num_stripes); 2919 } else { 2920 ret = -EIO; 2921 break; 2922 } 2923 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2924 key.offset == chunk_offset) { 2925 memmove(ptr, ptr + len, array_size - (cur + len)); 2926 array_size -= len; 2927 btrfs_set_super_sys_array_size(super_copy, array_size); 2928 } else { 2929 ptr += len; 2930 cur += len; 2931 } 2932 } 2933 return ret; 2934 } 2935 2936 /* 2937 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2938 * @logical: Logical block offset in bytes. 2939 * @length: Length of extent in bytes. 2940 * 2941 * Return: Chunk mapping or ERR_PTR. 2942 */ 2943 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 2944 u64 logical, u64 length) 2945 { 2946 struct extent_map_tree *em_tree; 2947 struct extent_map *em; 2948 2949 em_tree = &fs_info->mapping_tree; 2950 read_lock(&em_tree->lock); 2951 em = lookup_extent_mapping(em_tree, logical, length); 2952 read_unlock(&em_tree->lock); 2953 2954 if (!em) { 2955 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 2956 logical, length); 2957 return ERR_PTR(-EINVAL); 2958 } 2959 2960 if (em->start > logical || em->start + em->len < logical) { 2961 btrfs_crit(fs_info, 2962 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 2963 logical, length, em->start, em->start + em->len); 2964 free_extent_map(em); 2965 return ERR_PTR(-EINVAL); 2966 } 2967 2968 /* callers are responsible for dropping em's ref. */ 2969 return em; 2970 } 2971 2972 static int remove_chunk_item(struct btrfs_trans_handle *trans, 2973 struct map_lookup *map, u64 chunk_offset) 2974 { 2975 int i; 2976 2977 /* 2978 * Removing chunk items and updating the device items in the chunks btree 2979 * requires holding the chunk_mutex. 2980 * See the comment at btrfs_chunk_alloc() for the details. 2981 */ 2982 lockdep_assert_held(&trans->fs_info->chunk_mutex); 2983 2984 for (i = 0; i < map->num_stripes; i++) { 2985 int ret; 2986 2987 ret = btrfs_update_device(trans, map->stripes[i].dev); 2988 if (ret) 2989 return ret; 2990 } 2991 2992 return btrfs_free_chunk(trans, chunk_offset); 2993 } 2994 2995 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2996 { 2997 struct btrfs_fs_info *fs_info = trans->fs_info; 2998 struct extent_map *em; 2999 struct map_lookup *map; 3000 u64 dev_extent_len = 0; 3001 int i, ret = 0; 3002 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3003 3004 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3005 if (IS_ERR(em)) { 3006 /* 3007 * This is a logic error, but we don't want to just rely on the 3008 * user having built with ASSERT enabled, so if ASSERT doesn't 3009 * do anything we still error out. 3010 */ 3011 ASSERT(0); 3012 return PTR_ERR(em); 3013 } 3014 map = em->map_lookup; 3015 3016 /* 3017 * First delete the device extent items from the devices btree. 3018 * We take the device_list_mutex to avoid racing with the finishing phase 3019 * of a device replace operation. See the comment below before acquiring 3020 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3021 * because that can result in a deadlock when deleting the device extent 3022 * items from the devices btree - COWing an extent buffer from the btree 3023 * may result in allocating a new metadata chunk, which would attempt to 3024 * lock again fs_info->chunk_mutex. 3025 */ 3026 mutex_lock(&fs_devices->device_list_mutex); 3027 for (i = 0; i < map->num_stripes; i++) { 3028 struct btrfs_device *device = map->stripes[i].dev; 3029 ret = btrfs_free_dev_extent(trans, device, 3030 map->stripes[i].physical, 3031 &dev_extent_len); 3032 if (ret) { 3033 mutex_unlock(&fs_devices->device_list_mutex); 3034 btrfs_abort_transaction(trans, ret); 3035 goto out; 3036 } 3037 3038 if (device->bytes_used > 0) { 3039 mutex_lock(&fs_info->chunk_mutex); 3040 btrfs_device_set_bytes_used(device, 3041 device->bytes_used - dev_extent_len); 3042 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3043 btrfs_clear_space_info_full(fs_info); 3044 mutex_unlock(&fs_info->chunk_mutex); 3045 } 3046 } 3047 mutex_unlock(&fs_devices->device_list_mutex); 3048 3049 /* 3050 * We acquire fs_info->chunk_mutex for 2 reasons: 3051 * 3052 * 1) Just like with the first phase of the chunk allocation, we must 3053 * reserve system space, do all chunk btree updates and deletions, and 3054 * update the system chunk array in the superblock while holding this 3055 * mutex. This is for similar reasons as explained on the comment at 3056 * the top of btrfs_chunk_alloc(); 3057 * 3058 * 2) Prevent races with the final phase of a device replace operation 3059 * that replaces the device object associated with the map's stripes, 3060 * because the device object's id can change at any time during that 3061 * final phase of the device replace operation 3062 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3063 * replaced device and then see it with an ID of 3064 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3065 * the device item, which does not exists on the chunk btree. 3066 * The finishing phase of device replace acquires both the 3067 * device_list_mutex and the chunk_mutex, in that order, so we are 3068 * safe by just acquiring the chunk_mutex. 3069 */ 3070 trans->removing_chunk = true; 3071 mutex_lock(&fs_info->chunk_mutex); 3072 3073 check_system_chunk(trans, map->type); 3074 3075 ret = remove_chunk_item(trans, map, chunk_offset); 3076 /* 3077 * Normally we should not get -ENOSPC since we reserved space before 3078 * through the call to check_system_chunk(). 3079 * 3080 * Despite our system space_info having enough free space, we may not 3081 * be able to allocate extents from its block groups, because all have 3082 * an incompatible profile, which will force us to allocate a new system 3083 * block group with the right profile, or right after we called 3084 * check_system_space() above, a scrub turned the only system block group 3085 * with enough free space into RO mode. 3086 * This is explained with more detail at do_chunk_alloc(). 3087 * 3088 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3089 */ 3090 if (ret == -ENOSPC) { 3091 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3092 struct btrfs_block_group *sys_bg; 3093 3094 sys_bg = btrfs_create_chunk(trans, sys_flags); 3095 if (IS_ERR(sys_bg)) { 3096 ret = PTR_ERR(sys_bg); 3097 btrfs_abort_transaction(trans, ret); 3098 goto out; 3099 } 3100 3101 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3102 if (ret) { 3103 btrfs_abort_transaction(trans, ret); 3104 goto out; 3105 } 3106 3107 ret = remove_chunk_item(trans, map, chunk_offset); 3108 if (ret) { 3109 btrfs_abort_transaction(trans, ret); 3110 goto out; 3111 } 3112 } else if (ret) { 3113 btrfs_abort_transaction(trans, ret); 3114 goto out; 3115 } 3116 3117 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3118 3119 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3120 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3121 if (ret) { 3122 btrfs_abort_transaction(trans, ret); 3123 goto out; 3124 } 3125 } 3126 3127 mutex_unlock(&fs_info->chunk_mutex); 3128 trans->removing_chunk = false; 3129 3130 /* 3131 * We are done with chunk btree updates and deletions, so release the 3132 * system space we previously reserved (with check_system_chunk()). 3133 */ 3134 btrfs_trans_release_chunk_metadata(trans); 3135 3136 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3137 if (ret) { 3138 btrfs_abort_transaction(trans, ret); 3139 goto out; 3140 } 3141 3142 out: 3143 if (trans->removing_chunk) { 3144 mutex_unlock(&fs_info->chunk_mutex); 3145 trans->removing_chunk = false; 3146 } 3147 /* once for us */ 3148 free_extent_map(em); 3149 return ret; 3150 } 3151 3152 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3153 { 3154 struct btrfs_root *root = fs_info->chunk_root; 3155 struct btrfs_trans_handle *trans; 3156 struct btrfs_block_group *block_group; 3157 u64 length; 3158 int ret; 3159 3160 /* 3161 * Prevent races with automatic removal of unused block groups. 3162 * After we relocate and before we remove the chunk with offset 3163 * chunk_offset, automatic removal of the block group can kick in, 3164 * resulting in a failure when calling btrfs_remove_chunk() below. 3165 * 3166 * Make sure to acquire this mutex before doing a tree search (dev 3167 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3168 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3169 * we release the path used to search the chunk/dev tree and before 3170 * the current task acquires this mutex and calls us. 3171 */ 3172 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3173 3174 /* step one, relocate all the extents inside this chunk */ 3175 btrfs_scrub_pause(fs_info); 3176 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3177 btrfs_scrub_continue(fs_info); 3178 if (ret) 3179 return ret; 3180 3181 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3182 if (!block_group) 3183 return -ENOENT; 3184 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3185 length = block_group->length; 3186 btrfs_put_block_group(block_group); 3187 3188 /* 3189 * On a zoned file system, discard the whole block group, this will 3190 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3191 * resetting the zone fails, don't treat it as a fatal problem from the 3192 * filesystem's point of view. 3193 */ 3194 if (btrfs_is_zoned(fs_info)) { 3195 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3196 if (ret) 3197 btrfs_info(fs_info, 3198 "failed to reset zone %llu after relocation", 3199 chunk_offset); 3200 } 3201 3202 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3203 chunk_offset); 3204 if (IS_ERR(trans)) { 3205 ret = PTR_ERR(trans); 3206 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3207 return ret; 3208 } 3209 3210 /* 3211 * step two, delete the device extents and the 3212 * chunk tree entries 3213 */ 3214 ret = btrfs_remove_chunk(trans, chunk_offset); 3215 btrfs_end_transaction(trans); 3216 return ret; 3217 } 3218 3219 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3220 { 3221 struct btrfs_root *chunk_root = fs_info->chunk_root; 3222 struct btrfs_path *path; 3223 struct extent_buffer *leaf; 3224 struct btrfs_chunk *chunk; 3225 struct btrfs_key key; 3226 struct btrfs_key found_key; 3227 u64 chunk_type; 3228 bool retried = false; 3229 int failed = 0; 3230 int ret; 3231 3232 path = btrfs_alloc_path(); 3233 if (!path) 3234 return -ENOMEM; 3235 3236 again: 3237 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3238 key.offset = (u64)-1; 3239 key.type = BTRFS_CHUNK_ITEM_KEY; 3240 3241 while (1) { 3242 mutex_lock(&fs_info->reclaim_bgs_lock); 3243 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3244 if (ret < 0) { 3245 mutex_unlock(&fs_info->reclaim_bgs_lock); 3246 goto error; 3247 } 3248 BUG_ON(ret == 0); /* Corruption */ 3249 3250 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3251 key.type); 3252 if (ret) 3253 mutex_unlock(&fs_info->reclaim_bgs_lock); 3254 if (ret < 0) 3255 goto error; 3256 if (ret > 0) 3257 break; 3258 3259 leaf = path->nodes[0]; 3260 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3261 3262 chunk = btrfs_item_ptr(leaf, path->slots[0], 3263 struct btrfs_chunk); 3264 chunk_type = btrfs_chunk_type(leaf, chunk); 3265 btrfs_release_path(path); 3266 3267 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3268 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3269 if (ret == -ENOSPC) 3270 failed++; 3271 else 3272 BUG_ON(ret); 3273 } 3274 mutex_unlock(&fs_info->reclaim_bgs_lock); 3275 3276 if (found_key.offset == 0) 3277 break; 3278 key.offset = found_key.offset - 1; 3279 } 3280 ret = 0; 3281 if (failed && !retried) { 3282 failed = 0; 3283 retried = true; 3284 goto again; 3285 } else if (WARN_ON(failed && retried)) { 3286 ret = -ENOSPC; 3287 } 3288 error: 3289 btrfs_free_path(path); 3290 return ret; 3291 } 3292 3293 /* 3294 * return 1 : allocate a data chunk successfully, 3295 * return <0: errors during allocating a data chunk, 3296 * return 0 : no need to allocate a data chunk. 3297 */ 3298 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3299 u64 chunk_offset) 3300 { 3301 struct btrfs_block_group *cache; 3302 u64 bytes_used; 3303 u64 chunk_type; 3304 3305 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3306 ASSERT(cache); 3307 chunk_type = cache->flags; 3308 btrfs_put_block_group(cache); 3309 3310 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3311 return 0; 3312 3313 spin_lock(&fs_info->data_sinfo->lock); 3314 bytes_used = fs_info->data_sinfo->bytes_used; 3315 spin_unlock(&fs_info->data_sinfo->lock); 3316 3317 if (!bytes_used) { 3318 struct btrfs_trans_handle *trans; 3319 int ret; 3320 3321 trans = btrfs_join_transaction(fs_info->tree_root); 3322 if (IS_ERR(trans)) 3323 return PTR_ERR(trans); 3324 3325 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3326 btrfs_end_transaction(trans); 3327 if (ret < 0) 3328 return ret; 3329 return 1; 3330 } 3331 3332 return 0; 3333 } 3334 3335 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3336 struct btrfs_balance_control *bctl) 3337 { 3338 struct btrfs_root *root = fs_info->tree_root; 3339 struct btrfs_trans_handle *trans; 3340 struct btrfs_balance_item *item; 3341 struct btrfs_disk_balance_args disk_bargs; 3342 struct btrfs_path *path; 3343 struct extent_buffer *leaf; 3344 struct btrfs_key key; 3345 int ret, err; 3346 3347 path = btrfs_alloc_path(); 3348 if (!path) 3349 return -ENOMEM; 3350 3351 trans = btrfs_start_transaction(root, 0); 3352 if (IS_ERR(trans)) { 3353 btrfs_free_path(path); 3354 return PTR_ERR(trans); 3355 } 3356 3357 key.objectid = BTRFS_BALANCE_OBJECTID; 3358 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3359 key.offset = 0; 3360 3361 ret = btrfs_insert_empty_item(trans, root, path, &key, 3362 sizeof(*item)); 3363 if (ret) 3364 goto out; 3365 3366 leaf = path->nodes[0]; 3367 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3368 3369 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3370 3371 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3372 btrfs_set_balance_data(leaf, item, &disk_bargs); 3373 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3374 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3375 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3376 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3377 3378 btrfs_set_balance_flags(leaf, item, bctl->flags); 3379 3380 btrfs_mark_buffer_dirty(leaf); 3381 out: 3382 btrfs_free_path(path); 3383 err = btrfs_commit_transaction(trans); 3384 if (err && !ret) 3385 ret = err; 3386 return ret; 3387 } 3388 3389 static int del_balance_item(struct btrfs_fs_info *fs_info) 3390 { 3391 struct btrfs_root *root = fs_info->tree_root; 3392 struct btrfs_trans_handle *trans; 3393 struct btrfs_path *path; 3394 struct btrfs_key key; 3395 int ret, err; 3396 3397 path = btrfs_alloc_path(); 3398 if (!path) 3399 return -ENOMEM; 3400 3401 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3402 if (IS_ERR(trans)) { 3403 btrfs_free_path(path); 3404 return PTR_ERR(trans); 3405 } 3406 3407 key.objectid = BTRFS_BALANCE_OBJECTID; 3408 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3409 key.offset = 0; 3410 3411 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3412 if (ret < 0) 3413 goto out; 3414 if (ret > 0) { 3415 ret = -ENOENT; 3416 goto out; 3417 } 3418 3419 ret = btrfs_del_item(trans, root, path); 3420 out: 3421 btrfs_free_path(path); 3422 err = btrfs_commit_transaction(trans); 3423 if (err && !ret) 3424 ret = err; 3425 return ret; 3426 } 3427 3428 /* 3429 * This is a heuristic used to reduce the number of chunks balanced on 3430 * resume after balance was interrupted. 3431 */ 3432 static void update_balance_args(struct btrfs_balance_control *bctl) 3433 { 3434 /* 3435 * Turn on soft mode for chunk types that were being converted. 3436 */ 3437 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3438 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3439 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3440 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3441 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3442 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3443 3444 /* 3445 * Turn on usage filter if is not already used. The idea is 3446 * that chunks that we have already balanced should be 3447 * reasonably full. Don't do it for chunks that are being 3448 * converted - that will keep us from relocating unconverted 3449 * (albeit full) chunks. 3450 */ 3451 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3452 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3453 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3454 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3455 bctl->data.usage = 90; 3456 } 3457 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3458 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3459 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3460 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3461 bctl->sys.usage = 90; 3462 } 3463 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3464 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3465 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3466 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3467 bctl->meta.usage = 90; 3468 } 3469 } 3470 3471 /* 3472 * Clear the balance status in fs_info and delete the balance item from disk. 3473 */ 3474 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3475 { 3476 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3477 int ret; 3478 3479 BUG_ON(!fs_info->balance_ctl); 3480 3481 spin_lock(&fs_info->balance_lock); 3482 fs_info->balance_ctl = NULL; 3483 spin_unlock(&fs_info->balance_lock); 3484 3485 kfree(bctl); 3486 ret = del_balance_item(fs_info); 3487 if (ret) 3488 btrfs_handle_fs_error(fs_info, ret, NULL); 3489 } 3490 3491 /* 3492 * Balance filters. Return 1 if chunk should be filtered out 3493 * (should not be balanced). 3494 */ 3495 static int chunk_profiles_filter(u64 chunk_type, 3496 struct btrfs_balance_args *bargs) 3497 { 3498 chunk_type = chunk_to_extended(chunk_type) & 3499 BTRFS_EXTENDED_PROFILE_MASK; 3500 3501 if (bargs->profiles & chunk_type) 3502 return 0; 3503 3504 return 1; 3505 } 3506 3507 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3508 struct btrfs_balance_args *bargs) 3509 { 3510 struct btrfs_block_group *cache; 3511 u64 chunk_used; 3512 u64 user_thresh_min; 3513 u64 user_thresh_max; 3514 int ret = 1; 3515 3516 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3517 chunk_used = cache->used; 3518 3519 if (bargs->usage_min == 0) 3520 user_thresh_min = 0; 3521 else 3522 user_thresh_min = div_factor_fine(cache->length, 3523 bargs->usage_min); 3524 3525 if (bargs->usage_max == 0) 3526 user_thresh_max = 1; 3527 else if (bargs->usage_max > 100) 3528 user_thresh_max = cache->length; 3529 else 3530 user_thresh_max = div_factor_fine(cache->length, 3531 bargs->usage_max); 3532 3533 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3534 ret = 0; 3535 3536 btrfs_put_block_group(cache); 3537 return ret; 3538 } 3539 3540 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3541 u64 chunk_offset, struct btrfs_balance_args *bargs) 3542 { 3543 struct btrfs_block_group *cache; 3544 u64 chunk_used, user_thresh; 3545 int ret = 1; 3546 3547 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3548 chunk_used = cache->used; 3549 3550 if (bargs->usage_min == 0) 3551 user_thresh = 1; 3552 else if (bargs->usage > 100) 3553 user_thresh = cache->length; 3554 else 3555 user_thresh = div_factor_fine(cache->length, bargs->usage); 3556 3557 if (chunk_used < user_thresh) 3558 ret = 0; 3559 3560 btrfs_put_block_group(cache); 3561 return ret; 3562 } 3563 3564 static int chunk_devid_filter(struct extent_buffer *leaf, 3565 struct btrfs_chunk *chunk, 3566 struct btrfs_balance_args *bargs) 3567 { 3568 struct btrfs_stripe *stripe; 3569 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3570 int i; 3571 3572 for (i = 0; i < num_stripes; i++) { 3573 stripe = btrfs_stripe_nr(chunk, i); 3574 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3575 return 0; 3576 } 3577 3578 return 1; 3579 } 3580 3581 static u64 calc_data_stripes(u64 type, int num_stripes) 3582 { 3583 const int index = btrfs_bg_flags_to_raid_index(type); 3584 const int ncopies = btrfs_raid_array[index].ncopies; 3585 const int nparity = btrfs_raid_array[index].nparity; 3586 3587 return (num_stripes - nparity) / ncopies; 3588 } 3589 3590 /* [pstart, pend) */ 3591 static int chunk_drange_filter(struct extent_buffer *leaf, 3592 struct btrfs_chunk *chunk, 3593 struct btrfs_balance_args *bargs) 3594 { 3595 struct btrfs_stripe *stripe; 3596 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3597 u64 stripe_offset; 3598 u64 stripe_length; 3599 u64 type; 3600 int factor; 3601 int i; 3602 3603 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3604 return 0; 3605 3606 type = btrfs_chunk_type(leaf, chunk); 3607 factor = calc_data_stripes(type, num_stripes); 3608 3609 for (i = 0; i < num_stripes; i++) { 3610 stripe = btrfs_stripe_nr(chunk, i); 3611 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3612 continue; 3613 3614 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3615 stripe_length = btrfs_chunk_length(leaf, chunk); 3616 stripe_length = div_u64(stripe_length, factor); 3617 3618 if (stripe_offset < bargs->pend && 3619 stripe_offset + stripe_length > bargs->pstart) 3620 return 0; 3621 } 3622 3623 return 1; 3624 } 3625 3626 /* [vstart, vend) */ 3627 static int chunk_vrange_filter(struct extent_buffer *leaf, 3628 struct btrfs_chunk *chunk, 3629 u64 chunk_offset, 3630 struct btrfs_balance_args *bargs) 3631 { 3632 if (chunk_offset < bargs->vend && 3633 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3634 /* at least part of the chunk is inside this vrange */ 3635 return 0; 3636 3637 return 1; 3638 } 3639 3640 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3641 struct btrfs_chunk *chunk, 3642 struct btrfs_balance_args *bargs) 3643 { 3644 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3645 3646 if (bargs->stripes_min <= num_stripes 3647 && num_stripes <= bargs->stripes_max) 3648 return 0; 3649 3650 return 1; 3651 } 3652 3653 static int chunk_soft_convert_filter(u64 chunk_type, 3654 struct btrfs_balance_args *bargs) 3655 { 3656 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3657 return 0; 3658 3659 chunk_type = chunk_to_extended(chunk_type) & 3660 BTRFS_EXTENDED_PROFILE_MASK; 3661 3662 if (bargs->target == chunk_type) 3663 return 1; 3664 3665 return 0; 3666 } 3667 3668 static int should_balance_chunk(struct extent_buffer *leaf, 3669 struct btrfs_chunk *chunk, u64 chunk_offset) 3670 { 3671 struct btrfs_fs_info *fs_info = leaf->fs_info; 3672 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3673 struct btrfs_balance_args *bargs = NULL; 3674 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3675 3676 /* type filter */ 3677 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3678 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3679 return 0; 3680 } 3681 3682 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3683 bargs = &bctl->data; 3684 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3685 bargs = &bctl->sys; 3686 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3687 bargs = &bctl->meta; 3688 3689 /* profiles filter */ 3690 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3691 chunk_profiles_filter(chunk_type, bargs)) { 3692 return 0; 3693 } 3694 3695 /* usage filter */ 3696 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3697 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3698 return 0; 3699 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3700 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3701 return 0; 3702 } 3703 3704 /* devid filter */ 3705 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3706 chunk_devid_filter(leaf, chunk, bargs)) { 3707 return 0; 3708 } 3709 3710 /* drange filter, makes sense only with devid filter */ 3711 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3712 chunk_drange_filter(leaf, chunk, bargs)) { 3713 return 0; 3714 } 3715 3716 /* vrange filter */ 3717 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3718 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3719 return 0; 3720 } 3721 3722 /* stripes filter */ 3723 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3724 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3725 return 0; 3726 } 3727 3728 /* soft profile changing mode */ 3729 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3730 chunk_soft_convert_filter(chunk_type, bargs)) { 3731 return 0; 3732 } 3733 3734 /* 3735 * limited by count, must be the last filter 3736 */ 3737 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3738 if (bargs->limit == 0) 3739 return 0; 3740 else 3741 bargs->limit--; 3742 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3743 /* 3744 * Same logic as the 'limit' filter; the minimum cannot be 3745 * determined here because we do not have the global information 3746 * about the count of all chunks that satisfy the filters. 3747 */ 3748 if (bargs->limit_max == 0) 3749 return 0; 3750 else 3751 bargs->limit_max--; 3752 } 3753 3754 return 1; 3755 } 3756 3757 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3758 { 3759 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3760 struct btrfs_root *chunk_root = fs_info->chunk_root; 3761 u64 chunk_type; 3762 struct btrfs_chunk *chunk; 3763 struct btrfs_path *path = NULL; 3764 struct btrfs_key key; 3765 struct btrfs_key found_key; 3766 struct extent_buffer *leaf; 3767 int slot; 3768 int ret; 3769 int enospc_errors = 0; 3770 bool counting = true; 3771 /* The single value limit and min/max limits use the same bytes in the */ 3772 u64 limit_data = bctl->data.limit; 3773 u64 limit_meta = bctl->meta.limit; 3774 u64 limit_sys = bctl->sys.limit; 3775 u32 count_data = 0; 3776 u32 count_meta = 0; 3777 u32 count_sys = 0; 3778 int chunk_reserved = 0; 3779 3780 path = btrfs_alloc_path(); 3781 if (!path) { 3782 ret = -ENOMEM; 3783 goto error; 3784 } 3785 3786 /* zero out stat counters */ 3787 spin_lock(&fs_info->balance_lock); 3788 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3789 spin_unlock(&fs_info->balance_lock); 3790 again: 3791 if (!counting) { 3792 /* 3793 * The single value limit and min/max limits use the same bytes 3794 * in the 3795 */ 3796 bctl->data.limit = limit_data; 3797 bctl->meta.limit = limit_meta; 3798 bctl->sys.limit = limit_sys; 3799 } 3800 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3801 key.offset = (u64)-1; 3802 key.type = BTRFS_CHUNK_ITEM_KEY; 3803 3804 while (1) { 3805 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3806 atomic_read(&fs_info->balance_cancel_req)) { 3807 ret = -ECANCELED; 3808 goto error; 3809 } 3810 3811 mutex_lock(&fs_info->reclaim_bgs_lock); 3812 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3813 if (ret < 0) { 3814 mutex_unlock(&fs_info->reclaim_bgs_lock); 3815 goto error; 3816 } 3817 3818 /* 3819 * this shouldn't happen, it means the last relocate 3820 * failed 3821 */ 3822 if (ret == 0) 3823 BUG(); /* FIXME break ? */ 3824 3825 ret = btrfs_previous_item(chunk_root, path, 0, 3826 BTRFS_CHUNK_ITEM_KEY); 3827 if (ret) { 3828 mutex_unlock(&fs_info->reclaim_bgs_lock); 3829 ret = 0; 3830 break; 3831 } 3832 3833 leaf = path->nodes[0]; 3834 slot = path->slots[0]; 3835 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3836 3837 if (found_key.objectid != key.objectid) { 3838 mutex_unlock(&fs_info->reclaim_bgs_lock); 3839 break; 3840 } 3841 3842 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3843 chunk_type = btrfs_chunk_type(leaf, chunk); 3844 3845 if (!counting) { 3846 spin_lock(&fs_info->balance_lock); 3847 bctl->stat.considered++; 3848 spin_unlock(&fs_info->balance_lock); 3849 } 3850 3851 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3852 3853 btrfs_release_path(path); 3854 if (!ret) { 3855 mutex_unlock(&fs_info->reclaim_bgs_lock); 3856 goto loop; 3857 } 3858 3859 if (counting) { 3860 mutex_unlock(&fs_info->reclaim_bgs_lock); 3861 spin_lock(&fs_info->balance_lock); 3862 bctl->stat.expected++; 3863 spin_unlock(&fs_info->balance_lock); 3864 3865 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3866 count_data++; 3867 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3868 count_sys++; 3869 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3870 count_meta++; 3871 3872 goto loop; 3873 } 3874 3875 /* 3876 * Apply limit_min filter, no need to check if the LIMITS 3877 * filter is used, limit_min is 0 by default 3878 */ 3879 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3880 count_data < bctl->data.limit_min) 3881 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3882 count_meta < bctl->meta.limit_min) 3883 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3884 count_sys < bctl->sys.limit_min)) { 3885 mutex_unlock(&fs_info->reclaim_bgs_lock); 3886 goto loop; 3887 } 3888 3889 if (!chunk_reserved) { 3890 /* 3891 * We may be relocating the only data chunk we have, 3892 * which could potentially end up with losing data's 3893 * raid profile, so lets allocate an empty one in 3894 * advance. 3895 */ 3896 ret = btrfs_may_alloc_data_chunk(fs_info, 3897 found_key.offset); 3898 if (ret < 0) { 3899 mutex_unlock(&fs_info->reclaim_bgs_lock); 3900 goto error; 3901 } else if (ret == 1) { 3902 chunk_reserved = 1; 3903 } 3904 } 3905 3906 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3907 mutex_unlock(&fs_info->reclaim_bgs_lock); 3908 if (ret == -ENOSPC) { 3909 enospc_errors++; 3910 } else if (ret == -ETXTBSY) { 3911 btrfs_info(fs_info, 3912 "skipping relocation of block group %llu due to active swapfile", 3913 found_key.offset); 3914 ret = 0; 3915 } else if (ret) { 3916 goto error; 3917 } else { 3918 spin_lock(&fs_info->balance_lock); 3919 bctl->stat.completed++; 3920 spin_unlock(&fs_info->balance_lock); 3921 } 3922 loop: 3923 if (found_key.offset == 0) 3924 break; 3925 key.offset = found_key.offset - 1; 3926 } 3927 3928 if (counting) { 3929 btrfs_release_path(path); 3930 counting = false; 3931 goto again; 3932 } 3933 error: 3934 btrfs_free_path(path); 3935 if (enospc_errors) { 3936 btrfs_info(fs_info, "%d enospc errors during balance", 3937 enospc_errors); 3938 if (!ret) 3939 ret = -ENOSPC; 3940 } 3941 3942 return ret; 3943 } 3944 3945 /** 3946 * alloc_profile_is_valid - see if a given profile is valid and reduced 3947 * @flags: profile to validate 3948 * @extended: if true @flags is treated as an extended profile 3949 */ 3950 static int alloc_profile_is_valid(u64 flags, int extended) 3951 { 3952 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3953 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3954 3955 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3956 3957 /* 1) check that all other bits are zeroed */ 3958 if (flags & ~mask) 3959 return 0; 3960 3961 /* 2) see if profile is reduced */ 3962 if (flags == 0) 3963 return !extended; /* "0" is valid for usual profiles */ 3964 3965 return has_single_bit_set(flags); 3966 } 3967 3968 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3969 { 3970 /* cancel requested || normal exit path */ 3971 return atomic_read(&fs_info->balance_cancel_req) || 3972 (atomic_read(&fs_info->balance_pause_req) == 0 && 3973 atomic_read(&fs_info->balance_cancel_req) == 0); 3974 } 3975 3976 /* 3977 * Validate target profile against allowed profiles and return true if it's OK. 3978 * Otherwise print the error message and return false. 3979 */ 3980 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 3981 const struct btrfs_balance_args *bargs, 3982 u64 allowed, const char *type) 3983 { 3984 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3985 return true; 3986 3987 if (fs_info->sectorsize < PAGE_SIZE && 3988 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 3989 btrfs_err(fs_info, 3990 "RAID56 is not yet supported for sectorsize %u with page size %lu", 3991 fs_info->sectorsize, PAGE_SIZE); 3992 return false; 3993 } 3994 /* Profile is valid and does not have bits outside of the allowed set */ 3995 if (alloc_profile_is_valid(bargs->target, 1) && 3996 (bargs->target & ~allowed) == 0) 3997 return true; 3998 3999 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4000 type, btrfs_bg_type_to_raid_name(bargs->target)); 4001 return false; 4002 } 4003 4004 /* 4005 * Fill @buf with textual description of balance filter flags @bargs, up to 4006 * @size_buf including the terminating null. The output may be trimmed if it 4007 * does not fit into the provided buffer. 4008 */ 4009 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4010 u32 size_buf) 4011 { 4012 int ret; 4013 u32 size_bp = size_buf; 4014 char *bp = buf; 4015 u64 flags = bargs->flags; 4016 char tmp_buf[128] = {'\0'}; 4017 4018 if (!flags) 4019 return; 4020 4021 #define CHECK_APPEND_NOARG(a) \ 4022 do { \ 4023 ret = snprintf(bp, size_bp, (a)); \ 4024 if (ret < 0 || ret >= size_bp) \ 4025 goto out_overflow; \ 4026 size_bp -= ret; \ 4027 bp += ret; \ 4028 } while (0) 4029 4030 #define CHECK_APPEND_1ARG(a, v1) \ 4031 do { \ 4032 ret = snprintf(bp, size_bp, (a), (v1)); \ 4033 if (ret < 0 || ret >= size_bp) \ 4034 goto out_overflow; \ 4035 size_bp -= ret; \ 4036 bp += ret; \ 4037 } while (0) 4038 4039 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4040 do { \ 4041 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4042 if (ret < 0 || ret >= size_bp) \ 4043 goto out_overflow; \ 4044 size_bp -= ret; \ 4045 bp += ret; \ 4046 } while (0) 4047 4048 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4049 CHECK_APPEND_1ARG("convert=%s,", 4050 btrfs_bg_type_to_raid_name(bargs->target)); 4051 4052 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4053 CHECK_APPEND_NOARG("soft,"); 4054 4055 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4056 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4057 sizeof(tmp_buf)); 4058 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4059 } 4060 4061 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4062 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4063 4064 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4065 CHECK_APPEND_2ARG("usage=%u..%u,", 4066 bargs->usage_min, bargs->usage_max); 4067 4068 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4069 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4070 4071 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4072 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4073 bargs->pstart, bargs->pend); 4074 4075 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4076 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4077 bargs->vstart, bargs->vend); 4078 4079 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4080 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4081 4082 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4083 CHECK_APPEND_2ARG("limit=%u..%u,", 4084 bargs->limit_min, bargs->limit_max); 4085 4086 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4087 CHECK_APPEND_2ARG("stripes=%u..%u,", 4088 bargs->stripes_min, bargs->stripes_max); 4089 4090 #undef CHECK_APPEND_2ARG 4091 #undef CHECK_APPEND_1ARG 4092 #undef CHECK_APPEND_NOARG 4093 4094 out_overflow: 4095 4096 if (size_bp < size_buf) 4097 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4098 else 4099 buf[0] = '\0'; 4100 } 4101 4102 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4103 { 4104 u32 size_buf = 1024; 4105 char tmp_buf[192] = {'\0'}; 4106 char *buf; 4107 char *bp; 4108 u32 size_bp = size_buf; 4109 int ret; 4110 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4111 4112 buf = kzalloc(size_buf, GFP_KERNEL); 4113 if (!buf) 4114 return; 4115 4116 bp = buf; 4117 4118 #define CHECK_APPEND_1ARG(a, v1) \ 4119 do { \ 4120 ret = snprintf(bp, size_bp, (a), (v1)); \ 4121 if (ret < 0 || ret >= size_bp) \ 4122 goto out_overflow; \ 4123 size_bp -= ret; \ 4124 bp += ret; \ 4125 } while (0) 4126 4127 if (bctl->flags & BTRFS_BALANCE_FORCE) 4128 CHECK_APPEND_1ARG("%s", "-f "); 4129 4130 if (bctl->flags & BTRFS_BALANCE_DATA) { 4131 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4132 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4133 } 4134 4135 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4136 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4137 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4138 } 4139 4140 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4141 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4142 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4143 } 4144 4145 #undef CHECK_APPEND_1ARG 4146 4147 out_overflow: 4148 4149 if (size_bp < size_buf) 4150 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4151 btrfs_info(fs_info, "balance: %s %s", 4152 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4153 "resume" : "start", buf); 4154 4155 kfree(buf); 4156 } 4157 4158 /* 4159 * Should be called with balance mutexe held 4160 */ 4161 int btrfs_balance(struct btrfs_fs_info *fs_info, 4162 struct btrfs_balance_control *bctl, 4163 struct btrfs_ioctl_balance_args *bargs) 4164 { 4165 u64 meta_target, data_target; 4166 u64 allowed; 4167 int mixed = 0; 4168 int ret; 4169 u64 num_devices; 4170 unsigned seq; 4171 bool reducing_redundancy; 4172 int i; 4173 4174 if (btrfs_fs_closing(fs_info) || 4175 atomic_read(&fs_info->balance_pause_req) || 4176 btrfs_should_cancel_balance(fs_info)) { 4177 ret = -EINVAL; 4178 goto out; 4179 } 4180 4181 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4182 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4183 mixed = 1; 4184 4185 /* 4186 * In case of mixed groups both data and meta should be picked, 4187 * and identical options should be given for both of them. 4188 */ 4189 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4190 if (mixed && (bctl->flags & allowed)) { 4191 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4192 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4193 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4194 btrfs_err(fs_info, 4195 "balance: mixed groups data and metadata options must be the same"); 4196 ret = -EINVAL; 4197 goto out; 4198 } 4199 } 4200 4201 /* 4202 * rw_devices will not change at the moment, device add/delete/replace 4203 * are exclusive 4204 */ 4205 num_devices = fs_info->fs_devices->rw_devices; 4206 4207 /* 4208 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4209 * special bit for it, to make it easier to distinguish. Thus we need 4210 * to set it manually, or balance would refuse the profile. 4211 */ 4212 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4213 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4214 if (num_devices >= btrfs_raid_array[i].devs_min) 4215 allowed |= btrfs_raid_array[i].bg_flag; 4216 4217 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4218 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4219 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4220 ret = -EINVAL; 4221 goto out; 4222 } 4223 4224 /* 4225 * Allow to reduce metadata or system integrity only if force set for 4226 * profiles with redundancy (copies, parity) 4227 */ 4228 allowed = 0; 4229 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4230 if (btrfs_raid_array[i].ncopies >= 2 || 4231 btrfs_raid_array[i].tolerated_failures >= 1) 4232 allowed |= btrfs_raid_array[i].bg_flag; 4233 } 4234 do { 4235 seq = read_seqbegin(&fs_info->profiles_lock); 4236 4237 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4238 (fs_info->avail_system_alloc_bits & allowed) && 4239 !(bctl->sys.target & allowed)) || 4240 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4241 (fs_info->avail_metadata_alloc_bits & allowed) && 4242 !(bctl->meta.target & allowed))) 4243 reducing_redundancy = true; 4244 else 4245 reducing_redundancy = false; 4246 4247 /* if we're not converting, the target field is uninitialized */ 4248 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4249 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4250 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4251 bctl->data.target : fs_info->avail_data_alloc_bits; 4252 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4253 4254 if (reducing_redundancy) { 4255 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4256 btrfs_info(fs_info, 4257 "balance: force reducing metadata redundancy"); 4258 } else { 4259 btrfs_err(fs_info, 4260 "balance: reduces metadata redundancy, use --force if you want this"); 4261 ret = -EINVAL; 4262 goto out; 4263 } 4264 } 4265 4266 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4267 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4268 btrfs_warn(fs_info, 4269 "balance: metadata profile %s has lower redundancy than data profile %s", 4270 btrfs_bg_type_to_raid_name(meta_target), 4271 btrfs_bg_type_to_raid_name(data_target)); 4272 } 4273 4274 ret = insert_balance_item(fs_info, bctl); 4275 if (ret && ret != -EEXIST) 4276 goto out; 4277 4278 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4279 BUG_ON(ret == -EEXIST); 4280 BUG_ON(fs_info->balance_ctl); 4281 spin_lock(&fs_info->balance_lock); 4282 fs_info->balance_ctl = bctl; 4283 spin_unlock(&fs_info->balance_lock); 4284 } else { 4285 BUG_ON(ret != -EEXIST); 4286 spin_lock(&fs_info->balance_lock); 4287 update_balance_args(bctl); 4288 spin_unlock(&fs_info->balance_lock); 4289 } 4290 4291 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4292 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4293 describe_balance_start_or_resume(fs_info); 4294 mutex_unlock(&fs_info->balance_mutex); 4295 4296 ret = __btrfs_balance(fs_info); 4297 4298 mutex_lock(&fs_info->balance_mutex); 4299 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4300 btrfs_info(fs_info, "balance: paused"); 4301 /* 4302 * Balance can be canceled by: 4303 * 4304 * - Regular cancel request 4305 * Then ret == -ECANCELED and balance_cancel_req > 0 4306 * 4307 * - Fatal signal to "btrfs" process 4308 * Either the signal caught by wait_reserve_ticket() and callers 4309 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4310 * got -ECANCELED. 4311 * Either way, in this case balance_cancel_req = 0, and 4312 * ret == -EINTR or ret == -ECANCELED. 4313 * 4314 * So here we only check the return value to catch canceled balance. 4315 */ 4316 else if (ret == -ECANCELED || ret == -EINTR) 4317 btrfs_info(fs_info, "balance: canceled"); 4318 else 4319 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4320 4321 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4322 4323 if (bargs) { 4324 memset(bargs, 0, sizeof(*bargs)); 4325 btrfs_update_ioctl_balance_args(fs_info, bargs); 4326 } 4327 4328 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4329 balance_need_close(fs_info)) { 4330 reset_balance_state(fs_info); 4331 btrfs_exclop_finish(fs_info); 4332 } 4333 4334 wake_up(&fs_info->balance_wait_q); 4335 4336 return ret; 4337 out: 4338 if (bctl->flags & BTRFS_BALANCE_RESUME) 4339 reset_balance_state(fs_info); 4340 else 4341 kfree(bctl); 4342 btrfs_exclop_finish(fs_info); 4343 4344 return ret; 4345 } 4346 4347 static int balance_kthread(void *data) 4348 { 4349 struct btrfs_fs_info *fs_info = data; 4350 int ret = 0; 4351 4352 mutex_lock(&fs_info->balance_mutex); 4353 if (fs_info->balance_ctl) 4354 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4355 mutex_unlock(&fs_info->balance_mutex); 4356 4357 return ret; 4358 } 4359 4360 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4361 { 4362 struct task_struct *tsk; 4363 4364 mutex_lock(&fs_info->balance_mutex); 4365 if (!fs_info->balance_ctl) { 4366 mutex_unlock(&fs_info->balance_mutex); 4367 return 0; 4368 } 4369 mutex_unlock(&fs_info->balance_mutex); 4370 4371 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4372 btrfs_info(fs_info, "balance: resume skipped"); 4373 return 0; 4374 } 4375 4376 /* 4377 * A ro->rw remount sequence should continue with the paused balance 4378 * regardless of who pauses it, system or the user as of now, so set 4379 * the resume flag. 4380 */ 4381 spin_lock(&fs_info->balance_lock); 4382 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4383 spin_unlock(&fs_info->balance_lock); 4384 4385 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4386 return PTR_ERR_OR_ZERO(tsk); 4387 } 4388 4389 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4390 { 4391 struct btrfs_balance_control *bctl; 4392 struct btrfs_balance_item *item; 4393 struct btrfs_disk_balance_args disk_bargs; 4394 struct btrfs_path *path; 4395 struct extent_buffer *leaf; 4396 struct btrfs_key key; 4397 int ret; 4398 4399 path = btrfs_alloc_path(); 4400 if (!path) 4401 return -ENOMEM; 4402 4403 key.objectid = BTRFS_BALANCE_OBJECTID; 4404 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4405 key.offset = 0; 4406 4407 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4408 if (ret < 0) 4409 goto out; 4410 if (ret > 0) { /* ret = -ENOENT; */ 4411 ret = 0; 4412 goto out; 4413 } 4414 4415 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4416 if (!bctl) { 4417 ret = -ENOMEM; 4418 goto out; 4419 } 4420 4421 leaf = path->nodes[0]; 4422 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4423 4424 bctl->flags = btrfs_balance_flags(leaf, item); 4425 bctl->flags |= BTRFS_BALANCE_RESUME; 4426 4427 btrfs_balance_data(leaf, item, &disk_bargs); 4428 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4429 btrfs_balance_meta(leaf, item, &disk_bargs); 4430 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4431 btrfs_balance_sys(leaf, item, &disk_bargs); 4432 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4433 4434 /* 4435 * This should never happen, as the paused balance state is recovered 4436 * during mount without any chance of other exclusive ops to collide. 4437 * 4438 * This gives the exclusive op status to balance and keeps in paused 4439 * state until user intervention (cancel or umount). If the ownership 4440 * cannot be assigned, show a message but do not fail. The balance 4441 * is in a paused state and must have fs_info::balance_ctl properly 4442 * set up. 4443 */ 4444 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 4445 btrfs_warn(fs_info, 4446 "balance: cannot set exclusive op status, resume manually"); 4447 4448 btrfs_release_path(path); 4449 4450 mutex_lock(&fs_info->balance_mutex); 4451 BUG_ON(fs_info->balance_ctl); 4452 spin_lock(&fs_info->balance_lock); 4453 fs_info->balance_ctl = bctl; 4454 spin_unlock(&fs_info->balance_lock); 4455 mutex_unlock(&fs_info->balance_mutex); 4456 out: 4457 btrfs_free_path(path); 4458 return ret; 4459 } 4460 4461 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4462 { 4463 int ret = 0; 4464 4465 mutex_lock(&fs_info->balance_mutex); 4466 if (!fs_info->balance_ctl) { 4467 mutex_unlock(&fs_info->balance_mutex); 4468 return -ENOTCONN; 4469 } 4470 4471 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4472 atomic_inc(&fs_info->balance_pause_req); 4473 mutex_unlock(&fs_info->balance_mutex); 4474 4475 wait_event(fs_info->balance_wait_q, 4476 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4477 4478 mutex_lock(&fs_info->balance_mutex); 4479 /* we are good with balance_ctl ripped off from under us */ 4480 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4481 atomic_dec(&fs_info->balance_pause_req); 4482 } else { 4483 ret = -ENOTCONN; 4484 } 4485 4486 mutex_unlock(&fs_info->balance_mutex); 4487 return ret; 4488 } 4489 4490 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4491 { 4492 mutex_lock(&fs_info->balance_mutex); 4493 if (!fs_info->balance_ctl) { 4494 mutex_unlock(&fs_info->balance_mutex); 4495 return -ENOTCONN; 4496 } 4497 4498 /* 4499 * A paused balance with the item stored on disk can be resumed at 4500 * mount time if the mount is read-write. Otherwise it's still paused 4501 * and we must not allow cancelling as it deletes the item. 4502 */ 4503 if (sb_rdonly(fs_info->sb)) { 4504 mutex_unlock(&fs_info->balance_mutex); 4505 return -EROFS; 4506 } 4507 4508 atomic_inc(&fs_info->balance_cancel_req); 4509 /* 4510 * if we are running just wait and return, balance item is 4511 * deleted in btrfs_balance in this case 4512 */ 4513 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4514 mutex_unlock(&fs_info->balance_mutex); 4515 wait_event(fs_info->balance_wait_q, 4516 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4517 mutex_lock(&fs_info->balance_mutex); 4518 } else { 4519 mutex_unlock(&fs_info->balance_mutex); 4520 /* 4521 * Lock released to allow other waiters to continue, we'll 4522 * reexamine the status again. 4523 */ 4524 mutex_lock(&fs_info->balance_mutex); 4525 4526 if (fs_info->balance_ctl) { 4527 reset_balance_state(fs_info); 4528 btrfs_exclop_finish(fs_info); 4529 btrfs_info(fs_info, "balance: canceled"); 4530 } 4531 } 4532 4533 BUG_ON(fs_info->balance_ctl || 4534 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4535 atomic_dec(&fs_info->balance_cancel_req); 4536 mutex_unlock(&fs_info->balance_mutex); 4537 return 0; 4538 } 4539 4540 int btrfs_uuid_scan_kthread(void *data) 4541 { 4542 struct btrfs_fs_info *fs_info = data; 4543 struct btrfs_root *root = fs_info->tree_root; 4544 struct btrfs_key key; 4545 struct btrfs_path *path = NULL; 4546 int ret = 0; 4547 struct extent_buffer *eb; 4548 int slot; 4549 struct btrfs_root_item root_item; 4550 u32 item_size; 4551 struct btrfs_trans_handle *trans = NULL; 4552 bool closing = false; 4553 4554 path = btrfs_alloc_path(); 4555 if (!path) { 4556 ret = -ENOMEM; 4557 goto out; 4558 } 4559 4560 key.objectid = 0; 4561 key.type = BTRFS_ROOT_ITEM_KEY; 4562 key.offset = 0; 4563 4564 while (1) { 4565 if (btrfs_fs_closing(fs_info)) { 4566 closing = true; 4567 break; 4568 } 4569 ret = btrfs_search_forward(root, &key, path, 4570 BTRFS_OLDEST_GENERATION); 4571 if (ret) { 4572 if (ret > 0) 4573 ret = 0; 4574 break; 4575 } 4576 4577 if (key.type != BTRFS_ROOT_ITEM_KEY || 4578 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4579 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4580 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4581 goto skip; 4582 4583 eb = path->nodes[0]; 4584 slot = path->slots[0]; 4585 item_size = btrfs_item_size_nr(eb, slot); 4586 if (item_size < sizeof(root_item)) 4587 goto skip; 4588 4589 read_extent_buffer(eb, &root_item, 4590 btrfs_item_ptr_offset(eb, slot), 4591 (int)sizeof(root_item)); 4592 if (btrfs_root_refs(&root_item) == 0) 4593 goto skip; 4594 4595 if (!btrfs_is_empty_uuid(root_item.uuid) || 4596 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4597 if (trans) 4598 goto update_tree; 4599 4600 btrfs_release_path(path); 4601 /* 4602 * 1 - subvol uuid item 4603 * 1 - received_subvol uuid item 4604 */ 4605 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4606 if (IS_ERR(trans)) { 4607 ret = PTR_ERR(trans); 4608 break; 4609 } 4610 continue; 4611 } else { 4612 goto skip; 4613 } 4614 update_tree: 4615 btrfs_release_path(path); 4616 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4617 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4618 BTRFS_UUID_KEY_SUBVOL, 4619 key.objectid); 4620 if (ret < 0) { 4621 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4622 ret); 4623 break; 4624 } 4625 } 4626 4627 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4628 ret = btrfs_uuid_tree_add(trans, 4629 root_item.received_uuid, 4630 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4631 key.objectid); 4632 if (ret < 0) { 4633 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4634 ret); 4635 break; 4636 } 4637 } 4638 4639 skip: 4640 btrfs_release_path(path); 4641 if (trans) { 4642 ret = btrfs_end_transaction(trans); 4643 trans = NULL; 4644 if (ret) 4645 break; 4646 } 4647 4648 if (key.offset < (u64)-1) { 4649 key.offset++; 4650 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4651 key.offset = 0; 4652 key.type = BTRFS_ROOT_ITEM_KEY; 4653 } else if (key.objectid < (u64)-1) { 4654 key.offset = 0; 4655 key.type = BTRFS_ROOT_ITEM_KEY; 4656 key.objectid++; 4657 } else { 4658 break; 4659 } 4660 cond_resched(); 4661 } 4662 4663 out: 4664 btrfs_free_path(path); 4665 if (trans && !IS_ERR(trans)) 4666 btrfs_end_transaction(trans); 4667 if (ret) 4668 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4669 else if (!closing) 4670 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4671 up(&fs_info->uuid_tree_rescan_sem); 4672 return 0; 4673 } 4674 4675 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4676 { 4677 struct btrfs_trans_handle *trans; 4678 struct btrfs_root *tree_root = fs_info->tree_root; 4679 struct btrfs_root *uuid_root; 4680 struct task_struct *task; 4681 int ret; 4682 4683 /* 4684 * 1 - root node 4685 * 1 - root item 4686 */ 4687 trans = btrfs_start_transaction(tree_root, 2); 4688 if (IS_ERR(trans)) 4689 return PTR_ERR(trans); 4690 4691 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4692 if (IS_ERR(uuid_root)) { 4693 ret = PTR_ERR(uuid_root); 4694 btrfs_abort_transaction(trans, ret); 4695 btrfs_end_transaction(trans); 4696 return ret; 4697 } 4698 4699 fs_info->uuid_root = uuid_root; 4700 4701 ret = btrfs_commit_transaction(trans); 4702 if (ret) 4703 return ret; 4704 4705 down(&fs_info->uuid_tree_rescan_sem); 4706 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4707 if (IS_ERR(task)) { 4708 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4709 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4710 up(&fs_info->uuid_tree_rescan_sem); 4711 return PTR_ERR(task); 4712 } 4713 4714 return 0; 4715 } 4716 4717 /* 4718 * shrinking a device means finding all of the device extents past 4719 * the new size, and then following the back refs to the chunks. 4720 * The chunk relocation code actually frees the device extent 4721 */ 4722 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4723 { 4724 struct btrfs_fs_info *fs_info = device->fs_info; 4725 struct btrfs_root *root = fs_info->dev_root; 4726 struct btrfs_trans_handle *trans; 4727 struct btrfs_dev_extent *dev_extent = NULL; 4728 struct btrfs_path *path; 4729 u64 length; 4730 u64 chunk_offset; 4731 int ret; 4732 int slot; 4733 int failed = 0; 4734 bool retried = false; 4735 struct extent_buffer *l; 4736 struct btrfs_key key; 4737 struct btrfs_super_block *super_copy = fs_info->super_copy; 4738 u64 old_total = btrfs_super_total_bytes(super_copy); 4739 u64 old_size = btrfs_device_get_total_bytes(device); 4740 u64 diff; 4741 u64 start; 4742 4743 new_size = round_down(new_size, fs_info->sectorsize); 4744 start = new_size; 4745 diff = round_down(old_size - new_size, fs_info->sectorsize); 4746 4747 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4748 return -EINVAL; 4749 4750 path = btrfs_alloc_path(); 4751 if (!path) 4752 return -ENOMEM; 4753 4754 path->reada = READA_BACK; 4755 4756 trans = btrfs_start_transaction(root, 0); 4757 if (IS_ERR(trans)) { 4758 btrfs_free_path(path); 4759 return PTR_ERR(trans); 4760 } 4761 4762 mutex_lock(&fs_info->chunk_mutex); 4763 4764 btrfs_device_set_total_bytes(device, new_size); 4765 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4766 device->fs_devices->total_rw_bytes -= diff; 4767 atomic64_sub(diff, &fs_info->free_chunk_space); 4768 } 4769 4770 /* 4771 * Once the device's size has been set to the new size, ensure all 4772 * in-memory chunks are synced to disk so that the loop below sees them 4773 * and relocates them accordingly. 4774 */ 4775 if (contains_pending_extent(device, &start, diff)) { 4776 mutex_unlock(&fs_info->chunk_mutex); 4777 ret = btrfs_commit_transaction(trans); 4778 if (ret) 4779 goto done; 4780 } else { 4781 mutex_unlock(&fs_info->chunk_mutex); 4782 btrfs_end_transaction(trans); 4783 } 4784 4785 again: 4786 key.objectid = device->devid; 4787 key.offset = (u64)-1; 4788 key.type = BTRFS_DEV_EXTENT_KEY; 4789 4790 do { 4791 mutex_lock(&fs_info->reclaim_bgs_lock); 4792 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4793 if (ret < 0) { 4794 mutex_unlock(&fs_info->reclaim_bgs_lock); 4795 goto done; 4796 } 4797 4798 ret = btrfs_previous_item(root, path, 0, key.type); 4799 if (ret) { 4800 mutex_unlock(&fs_info->reclaim_bgs_lock); 4801 if (ret < 0) 4802 goto done; 4803 ret = 0; 4804 btrfs_release_path(path); 4805 break; 4806 } 4807 4808 l = path->nodes[0]; 4809 slot = path->slots[0]; 4810 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4811 4812 if (key.objectid != device->devid) { 4813 mutex_unlock(&fs_info->reclaim_bgs_lock); 4814 btrfs_release_path(path); 4815 break; 4816 } 4817 4818 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4819 length = btrfs_dev_extent_length(l, dev_extent); 4820 4821 if (key.offset + length <= new_size) { 4822 mutex_unlock(&fs_info->reclaim_bgs_lock); 4823 btrfs_release_path(path); 4824 break; 4825 } 4826 4827 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4828 btrfs_release_path(path); 4829 4830 /* 4831 * We may be relocating the only data chunk we have, 4832 * which could potentially end up with losing data's 4833 * raid profile, so lets allocate an empty one in 4834 * advance. 4835 */ 4836 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4837 if (ret < 0) { 4838 mutex_unlock(&fs_info->reclaim_bgs_lock); 4839 goto done; 4840 } 4841 4842 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4843 mutex_unlock(&fs_info->reclaim_bgs_lock); 4844 if (ret == -ENOSPC) { 4845 failed++; 4846 } else if (ret) { 4847 if (ret == -ETXTBSY) { 4848 btrfs_warn(fs_info, 4849 "could not shrink block group %llu due to active swapfile", 4850 chunk_offset); 4851 } 4852 goto done; 4853 } 4854 } while (key.offset-- > 0); 4855 4856 if (failed && !retried) { 4857 failed = 0; 4858 retried = true; 4859 goto again; 4860 } else if (failed && retried) { 4861 ret = -ENOSPC; 4862 goto done; 4863 } 4864 4865 /* Shrinking succeeded, else we would be at "done". */ 4866 trans = btrfs_start_transaction(root, 0); 4867 if (IS_ERR(trans)) { 4868 ret = PTR_ERR(trans); 4869 goto done; 4870 } 4871 4872 mutex_lock(&fs_info->chunk_mutex); 4873 /* Clear all state bits beyond the shrunk device size */ 4874 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4875 CHUNK_STATE_MASK); 4876 4877 btrfs_device_set_disk_total_bytes(device, new_size); 4878 if (list_empty(&device->post_commit_list)) 4879 list_add_tail(&device->post_commit_list, 4880 &trans->transaction->dev_update_list); 4881 4882 WARN_ON(diff > old_total); 4883 btrfs_set_super_total_bytes(super_copy, 4884 round_down(old_total - diff, fs_info->sectorsize)); 4885 mutex_unlock(&fs_info->chunk_mutex); 4886 4887 /* Now btrfs_update_device() will change the on-disk size. */ 4888 ret = btrfs_update_device(trans, device); 4889 if (ret < 0) { 4890 btrfs_abort_transaction(trans, ret); 4891 btrfs_end_transaction(trans); 4892 } else { 4893 ret = btrfs_commit_transaction(trans); 4894 } 4895 done: 4896 btrfs_free_path(path); 4897 if (ret) { 4898 mutex_lock(&fs_info->chunk_mutex); 4899 btrfs_device_set_total_bytes(device, old_size); 4900 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4901 device->fs_devices->total_rw_bytes += diff; 4902 atomic64_add(diff, &fs_info->free_chunk_space); 4903 mutex_unlock(&fs_info->chunk_mutex); 4904 } 4905 return ret; 4906 } 4907 4908 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4909 struct btrfs_key *key, 4910 struct btrfs_chunk *chunk, int item_size) 4911 { 4912 struct btrfs_super_block *super_copy = fs_info->super_copy; 4913 struct btrfs_disk_key disk_key; 4914 u32 array_size; 4915 u8 *ptr; 4916 4917 lockdep_assert_held(&fs_info->chunk_mutex); 4918 4919 array_size = btrfs_super_sys_array_size(super_copy); 4920 if (array_size + item_size + sizeof(disk_key) 4921 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4922 return -EFBIG; 4923 4924 ptr = super_copy->sys_chunk_array + array_size; 4925 btrfs_cpu_key_to_disk(&disk_key, key); 4926 memcpy(ptr, &disk_key, sizeof(disk_key)); 4927 ptr += sizeof(disk_key); 4928 memcpy(ptr, chunk, item_size); 4929 item_size += sizeof(disk_key); 4930 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4931 4932 return 0; 4933 } 4934 4935 /* 4936 * sort the devices in descending order by max_avail, total_avail 4937 */ 4938 static int btrfs_cmp_device_info(const void *a, const void *b) 4939 { 4940 const struct btrfs_device_info *di_a = a; 4941 const struct btrfs_device_info *di_b = b; 4942 4943 if (di_a->max_avail > di_b->max_avail) 4944 return -1; 4945 if (di_a->max_avail < di_b->max_avail) 4946 return 1; 4947 if (di_a->total_avail > di_b->total_avail) 4948 return -1; 4949 if (di_a->total_avail < di_b->total_avail) 4950 return 1; 4951 return 0; 4952 } 4953 4954 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4955 { 4956 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4957 return; 4958 4959 btrfs_set_fs_incompat(info, RAID56); 4960 } 4961 4962 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 4963 { 4964 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 4965 return; 4966 4967 btrfs_set_fs_incompat(info, RAID1C34); 4968 } 4969 4970 /* 4971 * Structure used internally for btrfs_create_chunk() function. 4972 * Wraps needed parameters. 4973 */ 4974 struct alloc_chunk_ctl { 4975 u64 start; 4976 u64 type; 4977 /* Total number of stripes to allocate */ 4978 int num_stripes; 4979 /* sub_stripes info for map */ 4980 int sub_stripes; 4981 /* Stripes per device */ 4982 int dev_stripes; 4983 /* Maximum number of devices to use */ 4984 int devs_max; 4985 /* Minimum number of devices to use */ 4986 int devs_min; 4987 /* ndevs has to be a multiple of this */ 4988 int devs_increment; 4989 /* Number of copies */ 4990 int ncopies; 4991 /* Number of stripes worth of bytes to store parity information */ 4992 int nparity; 4993 u64 max_stripe_size; 4994 u64 max_chunk_size; 4995 u64 dev_extent_min; 4996 u64 stripe_size; 4997 u64 chunk_size; 4998 int ndevs; 4999 }; 5000 5001 static void init_alloc_chunk_ctl_policy_regular( 5002 struct btrfs_fs_devices *fs_devices, 5003 struct alloc_chunk_ctl *ctl) 5004 { 5005 u64 type = ctl->type; 5006 5007 if (type & BTRFS_BLOCK_GROUP_DATA) { 5008 ctl->max_stripe_size = SZ_1G; 5009 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5010 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5011 /* For larger filesystems, use larger metadata chunks */ 5012 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5013 ctl->max_stripe_size = SZ_1G; 5014 else 5015 ctl->max_stripe_size = SZ_256M; 5016 ctl->max_chunk_size = ctl->max_stripe_size; 5017 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5018 ctl->max_stripe_size = SZ_32M; 5019 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5020 ctl->devs_max = min_t(int, ctl->devs_max, 5021 BTRFS_MAX_DEVS_SYS_CHUNK); 5022 } else { 5023 BUG(); 5024 } 5025 5026 /* We don't want a chunk larger than 10% of writable space */ 5027 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5028 ctl->max_chunk_size); 5029 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5030 } 5031 5032 static void init_alloc_chunk_ctl_policy_zoned( 5033 struct btrfs_fs_devices *fs_devices, 5034 struct alloc_chunk_ctl *ctl) 5035 { 5036 u64 zone_size = fs_devices->fs_info->zone_size; 5037 u64 limit; 5038 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5039 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5040 u64 min_chunk_size = min_data_stripes * zone_size; 5041 u64 type = ctl->type; 5042 5043 ctl->max_stripe_size = zone_size; 5044 if (type & BTRFS_BLOCK_GROUP_DATA) { 5045 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5046 zone_size); 5047 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5048 ctl->max_chunk_size = ctl->max_stripe_size; 5049 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5050 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5051 ctl->devs_max = min_t(int, ctl->devs_max, 5052 BTRFS_MAX_DEVS_SYS_CHUNK); 5053 } else { 5054 BUG(); 5055 } 5056 5057 /* We don't want a chunk larger than 10% of writable space */ 5058 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5059 zone_size), 5060 min_chunk_size); 5061 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5062 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5063 } 5064 5065 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5066 struct alloc_chunk_ctl *ctl) 5067 { 5068 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5069 5070 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5071 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5072 ctl->devs_max = btrfs_raid_array[index].devs_max; 5073 if (!ctl->devs_max) 5074 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5075 ctl->devs_min = btrfs_raid_array[index].devs_min; 5076 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5077 ctl->ncopies = btrfs_raid_array[index].ncopies; 5078 ctl->nparity = btrfs_raid_array[index].nparity; 5079 ctl->ndevs = 0; 5080 5081 switch (fs_devices->chunk_alloc_policy) { 5082 case BTRFS_CHUNK_ALLOC_REGULAR: 5083 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5084 break; 5085 case BTRFS_CHUNK_ALLOC_ZONED: 5086 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5087 break; 5088 default: 5089 BUG(); 5090 } 5091 } 5092 5093 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5094 struct alloc_chunk_ctl *ctl, 5095 struct btrfs_device_info *devices_info) 5096 { 5097 struct btrfs_fs_info *info = fs_devices->fs_info; 5098 struct btrfs_device *device; 5099 u64 total_avail; 5100 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5101 int ret; 5102 int ndevs = 0; 5103 u64 max_avail; 5104 u64 dev_offset; 5105 5106 /* 5107 * in the first pass through the devices list, we gather information 5108 * about the available holes on each device. 5109 */ 5110 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5111 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5112 WARN(1, KERN_ERR 5113 "BTRFS: read-only device in alloc_list\n"); 5114 continue; 5115 } 5116 5117 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5118 &device->dev_state) || 5119 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5120 continue; 5121 5122 if (device->total_bytes > device->bytes_used) 5123 total_avail = device->total_bytes - device->bytes_used; 5124 else 5125 total_avail = 0; 5126 5127 /* If there is no space on this device, skip it. */ 5128 if (total_avail < ctl->dev_extent_min) 5129 continue; 5130 5131 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5132 &max_avail); 5133 if (ret && ret != -ENOSPC) 5134 return ret; 5135 5136 if (ret == 0) 5137 max_avail = dev_extent_want; 5138 5139 if (max_avail < ctl->dev_extent_min) { 5140 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5141 btrfs_debug(info, 5142 "%s: devid %llu has no free space, have=%llu want=%llu", 5143 __func__, device->devid, max_avail, 5144 ctl->dev_extent_min); 5145 continue; 5146 } 5147 5148 if (ndevs == fs_devices->rw_devices) { 5149 WARN(1, "%s: found more than %llu devices\n", 5150 __func__, fs_devices->rw_devices); 5151 break; 5152 } 5153 devices_info[ndevs].dev_offset = dev_offset; 5154 devices_info[ndevs].max_avail = max_avail; 5155 devices_info[ndevs].total_avail = total_avail; 5156 devices_info[ndevs].dev = device; 5157 ++ndevs; 5158 } 5159 ctl->ndevs = ndevs; 5160 5161 /* 5162 * now sort the devices by hole size / available space 5163 */ 5164 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5165 btrfs_cmp_device_info, NULL); 5166 5167 return 0; 5168 } 5169 5170 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5171 struct btrfs_device_info *devices_info) 5172 { 5173 /* Number of stripes that count for block group size */ 5174 int data_stripes; 5175 5176 /* 5177 * The primary goal is to maximize the number of stripes, so use as 5178 * many devices as possible, even if the stripes are not maximum sized. 5179 * 5180 * The DUP profile stores more than one stripe per device, the 5181 * max_avail is the total size so we have to adjust. 5182 */ 5183 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5184 ctl->dev_stripes); 5185 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5186 5187 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5188 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5189 5190 /* 5191 * Use the number of data stripes to figure out how big this chunk is 5192 * really going to be in terms of logical address space, and compare 5193 * that answer with the max chunk size. If it's higher, we try to 5194 * reduce stripe_size. 5195 */ 5196 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5197 /* 5198 * Reduce stripe_size, round it up to a 16MB boundary again and 5199 * then use it, unless it ends up being even bigger than the 5200 * previous value we had already. 5201 */ 5202 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5203 data_stripes), SZ_16M), 5204 ctl->stripe_size); 5205 } 5206 5207 /* Align to BTRFS_STRIPE_LEN */ 5208 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5209 ctl->chunk_size = ctl->stripe_size * data_stripes; 5210 5211 return 0; 5212 } 5213 5214 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5215 struct btrfs_device_info *devices_info) 5216 { 5217 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5218 /* Number of stripes that count for block group size */ 5219 int data_stripes; 5220 5221 /* 5222 * It should hold because: 5223 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5224 */ 5225 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5226 5227 ctl->stripe_size = zone_size; 5228 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5229 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5230 5231 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5232 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5233 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5234 ctl->stripe_size) + ctl->nparity, 5235 ctl->dev_stripes); 5236 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5237 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5238 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5239 } 5240 5241 ctl->chunk_size = ctl->stripe_size * data_stripes; 5242 5243 return 0; 5244 } 5245 5246 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5247 struct alloc_chunk_ctl *ctl, 5248 struct btrfs_device_info *devices_info) 5249 { 5250 struct btrfs_fs_info *info = fs_devices->fs_info; 5251 5252 /* 5253 * Round down to number of usable stripes, devs_increment can be any 5254 * number so we can't use round_down() that requires power of 2, while 5255 * rounddown is safe. 5256 */ 5257 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5258 5259 if (ctl->ndevs < ctl->devs_min) { 5260 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5261 btrfs_debug(info, 5262 "%s: not enough devices with free space: have=%d minimum required=%d", 5263 __func__, ctl->ndevs, ctl->devs_min); 5264 } 5265 return -ENOSPC; 5266 } 5267 5268 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5269 5270 switch (fs_devices->chunk_alloc_policy) { 5271 case BTRFS_CHUNK_ALLOC_REGULAR: 5272 return decide_stripe_size_regular(ctl, devices_info); 5273 case BTRFS_CHUNK_ALLOC_ZONED: 5274 return decide_stripe_size_zoned(ctl, devices_info); 5275 default: 5276 BUG(); 5277 } 5278 } 5279 5280 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5281 struct alloc_chunk_ctl *ctl, 5282 struct btrfs_device_info *devices_info) 5283 { 5284 struct btrfs_fs_info *info = trans->fs_info; 5285 struct map_lookup *map = NULL; 5286 struct extent_map_tree *em_tree; 5287 struct btrfs_block_group *block_group; 5288 struct extent_map *em; 5289 u64 start = ctl->start; 5290 u64 type = ctl->type; 5291 int ret; 5292 int i; 5293 int j; 5294 5295 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5296 if (!map) 5297 return ERR_PTR(-ENOMEM); 5298 map->num_stripes = ctl->num_stripes; 5299 5300 for (i = 0; i < ctl->ndevs; ++i) { 5301 for (j = 0; j < ctl->dev_stripes; ++j) { 5302 int s = i * ctl->dev_stripes + j; 5303 map->stripes[s].dev = devices_info[i].dev; 5304 map->stripes[s].physical = devices_info[i].dev_offset + 5305 j * ctl->stripe_size; 5306 } 5307 } 5308 map->stripe_len = BTRFS_STRIPE_LEN; 5309 map->io_align = BTRFS_STRIPE_LEN; 5310 map->io_width = BTRFS_STRIPE_LEN; 5311 map->type = type; 5312 map->sub_stripes = ctl->sub_stripes; 5313 5314 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5315 5316 em = alloc_extent_map(); 5317 if (!em) { 5318 kfree(map); 5319 return ERR_PTR(-ENOMEM); 5320 } 5321 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5322 em->map_lookup = map; 5323 em->start = start; 5324 em->len = ctl->chunk_size; 5325 em->block_start = 0; 5326 em->block_len = em->len; 5327 em->orig_block_len = ctl->stripe_size; 5328 5329 em_tree = &info->mapping_tree; 5330 write_lock(&em_tree->lock); 5331 ret = add_extent_mapping(em_tree, em, 0); 5332 if (ret) { 5333 write_unlock(&em_tree->lock); 5334 free_extent_map(em); 5335 return ERR_PTR(ret); 5336 } 5337 write_unlock(&em_tree->lock); 5338 5339 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5340 if (IS_ERR(block_group)) 5341 goto error_del_extent; 5342 5343 for (i = 0; i < map->num_stripes; i++) { 5344 struct btrfs_device *dev = map->stripes[i].dev; 5345 5346 btrfs_device_set_bytes_used(dev, 5347 dev->bytes_used + ctl->stripe_size); 5348 if (list_empty(&dev->post_commit_list)) 5349 list_add_tail(&dev->post_commit_list, 5350 &trans->transaction->dev_update_list); 5351 } 5352 5353 atomic64_sub(ctl->stripe_size * map->num_stripes, 5354 &info->free_chunk_space); 5355 5356 free_extent_map(em); 5357 check_raid56_incompat_flag(info, type); 5358 check_raid1c34_incompat_flag(info, type); 5359 5360 return block_group; 5361 5362 error_del_extent: 5363 write_lock(&em_tree->lock); 5364 remove_extent_mapping(em_tree, em); 5365 write_unlock(&em_tree->lock); 5366 5367 /* One for our allocation */ 5368 free_extent_map(em); 5369 /* One for the tree reference */ 5370 free_extent_map(em); 5371 5372 return block_group; 5373 } 5374 5375 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5376 u64 type) 5377 { 5378 struct btrfs_fs_info *info = trans->fs_info; 5379 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5380 struct btrfs_device_info *devices_info = NULL; 5381 struct alloc_chunk_ctl ctl; 5382 struct btrfs_block_group *block_group; 5383 int ret; 5384 5385 lockdep_assert_held(&info->chunk_mutex); 5386 5387 if (!alloc_profile_is_valid(type, 0)) { 5388 ASSERT(0); 5389 return ERR_PTR(-EINVAL); 5390 } 5391 5392 if (list_empty(&fs_devices->alloc_list)) { 5393 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5394 btrfs_debug(info, "%s: no writable device", __func__); 5395 return ERR_PTR(-ENOSPC); 5396 } 5397 5398 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5399 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5400 ASSERT(0); 5401 return ERR_PTR(-EINVAL); 5402 } 5403 5404 ctl.start = find_next_chunk(info); 5405 ctl.type = type; 5406 init_alloc_chunk_ctl(fs_devices, &ctl); 5407 5408 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5409 GFP_NOFS); 5410 if (!devices_info) 5411 return ERR_PTR(-ENOMEM); 5412 5413 ret = gather_device_info(fs_devices, &ctl, devices_info); 5414 if (ret < 0) { 5415 block_group = ERR_PTR(ret); 5416 goto out; 5417 } 5418 5419 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5420 if (ret < 0) { 5421 block_group = ERR_PTR(ret); 5422 goto out; 5423 } 5424 5425 block_group = create_chunk(trans, &ctl, devices_info); 5426 5427 out: 5428 kfree(devices_info); 5429 return block_group; 5430 } 5431 5432 /* 5433 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5434 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5435 * chunks. 5436 * 5437 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5438 * phases. 5439 */ 5440 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5441 struct btrfs_block_group *bg) 5442 { 5443 struct btrfs_fs_info *fs_info = trans->fs_info; 5444 struct btrfs_root *extent_root = fs_info->extent_root; 5445 struct btrfs_root *chunk_root = fs_info->chunk_root; 5446 struct btrfs_key key; 5447 struct btrfs_chunk *chunk; 5448 struct btrfs_stripe *stripe; 5449 struct extent_map *em; 5450 struct map_lookup *map; 5451 size_t item_size; 5452 int i; 5453 int ret; 5454 5455 /* 5456 * We take the chunk_mutex for 2 reasons: 5457 * 5458 * 1) Updates and insertions in the chunk btree must be done while holding 5459 * the chunk_mutex, as well as updating the system chunk array in the 5460 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5461 * details; 5462 * 5463 * 2) To prevent races with the final phase of a device replace operation 5464 * that replaces the device object associated with the map's stripes, 5465 * because the device object's id can change at any time during that 5466 * final phase of the device replace operation 5467 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5468 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5469 * which would cause a failure when updating the device item, which does 5470 * not exists, or persisting a stripe of the chunk item with such ID. 5471 * Here we can't use the device_list_mutex because our caller already 5472 * has locked the chunk_mutex, and the final phase of device replace 5473 * acquires both mutexes - first the device_list_mutex and then the 5474 * chunk_mutex. Using any of those two mutexes protects us from a 5475 * concurrent device replace. 5476 */ 5477 lockdep_assert_held(&fs_info->chunk_mutex); 5478 5479 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5480 if (IS_ERR(em)) { 5481 ret = PTR_ERR(em); 5482 btrfs_abort_transaction(trans, ret); 5483 return ret; 5484 } 5485 5486 map = em->map_lookup; 5487 item_size = btrfs_chunk_item_size(map->num_stripes); 5488 5489 chunk = kzalloc(item_size, GFP_NOFS); 5490 if (!chunk) { 5491 ret = -ENOMEM; 5492 btrfs_abort_transaction(trans, ret); 5493 goto out; 5494 } 5495 5496 for (i = 0; i < map->num_stripes; i++) { 5497 struct btrfs_device *device = map->stripes[i].dev; 5498 5499 ret = btrfs_update_device(trans, device); 5500 if (ret) 5501 goto out; 5502 } 5503 5504 stripe = &chunk->stripe; 5505 for (i = 0; i < map->num_stripes; i++) { 5506 struct btrfs_device *device = map->stripes[i].dev; 5507 const u64 dev_offset = map->stripes[i].physical; 5508 5509 btrfs_set_stack_stripe_devid(stripe, device->devid); 5510 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5511 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5512 stripe++; 5513 } 5514 5515 btrfs_set_stack_chunk_length(chunk, bg->length); 5516 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5517 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5518 btrfs_set_stack_chunk_type(chunk, map->type); 5519 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5520 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5521 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5522 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5523 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5524 5525 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5526 key.type = BTRFS_CHUNK_ITEM_KEY; 5527 key.offset = bg->start; 5528 5529 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5530 if (ret) 5531 goto out; 5532 5533 bg->chunk_item_inserted = 1; 5534 5535 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5536 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5537 if (ret) 5538 goto out; 5539 } 5540 5541 out: 5542 kfree(chunk); 5543 free_extent_map(em); 5544 return ret; 5545 } 5546 5547 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5548 { 5549 struct btrfs_fs_info *fs_info = trans->fs_info; 5550 u64 alloc_profile; 5551 struct btrfs_block_group *meta_bg; 5552 struct btrfs_block_group *sys_bg; 5553 5554 /* 5555 * When adding a new device for sprouting, the seed device is read-only 5556 * so we must first allocate a metadata and a system chunk. But before 5557 * adding the block group items to the extent, device and chunk btrees, 5558 * we must first: 5559 * 5560 * 1) Create both chunks without doing any changes to the btrees, as 5561 * otherwise we would get -ENOSPC since the block groups from the 5562 * seed device are read-only; 5563 * 5564 * 2) Add the device item for the new sprout device - finishing the setup 5565 * of a new block group requires updating the device item in the chunk 5566 * btree, so it must exist when we attempt to do it. The previous step 5567 * ensures this does not fail with -ENOSPC. 5568 * 5569 * After that we can add the block group items to their btrees: 5570 * update existing device item in the chunk btree, add a new block group 5571 * item to the extent btree, add a new chunk item to the chunk btree and 5572 * finally add the new device extent items to the devices btree. 5573 */ 5574 5575 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5576 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5577 if (IS_ERR(meta_bg)) 5578 return PTR_ERR(meta_bg); 5579 5580 alloc_profile = btrfs_system_alloc_profile(fs_info); 5581 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5582 if (IS_ERR(sys_bg)) 5583 return PTR_ERR(sys_bg); 5584 5585 return 0; 5586 } 5587 5588 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5589 { 5590 const int index = btrfs_bg_flags_to_raid_index(map->type); 5591 5592 return btrfs_raid_array[index].tolerated_failures; 5593 } 5594 5595 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5596 { 5597 struct extent_map *em; 5598 struct map_lookup *map; 5599 int miss_ndevs = 0; 5600 int i; 5601 bool ret = true; 5602 5603 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5604 if (IS_ERR(em)) 5605 return false; 5606 5607 map = em->map_lookup; 5608 for (i = 0; i < map->num_stripes; i++) { 5609 if (test_bit(BTRFS_DEV_STATE_MISSING, 5610 &map->stripes[i].dev->dev_state)) { 5611 miss_ndevs++; 5612 continue; 5613 } 5614 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5615 &map->stripes[i].dev->dev_state)) { 5616 ret = false; 5617 goto end; 5618 } 5619 } 5620 5621 /* 5622 * If the number of missing devices is larger than max errors, we can 5623 * not write the data into that chunk successfully. 5624 */ 5625 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5626 ret = false; 5627 end: 5628 free_extent_map(em); 5629 return ret; 5630 } 5631 5632 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5633 { 5634 struct extent_map *em; 5635 5636 while (1) { 5637 write_lock(&tree->lock); 5638 em = lookup_extent_mapping(tree, 0, (u64)-1); 5639 if (em) 5640 remove_extent_mapping(tree, em); 5641 write_unlock(&tree->lock); 5642 if (!em) 5643 break; 5644 /* once for us */ 5645 free_extent_map(em); 5646 /* once for the tree */ 5647 free_extent_map(em); 5648 } 5649 } 5650 5651 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5652 { 5653 struct extent_map *em; 5654 struct map_lookup *map; 5655 int ret; 5656 5657 em = btrfs_get_chunk_map(fs_info, logical, len); 5658 if (IS_ERR(em)) 5659 /* 5660 * We could return errors for these cases, but that could get 5661 * ugly and we'd probably do the same thing which is just not do 5662 * anything else and exit, so return 1 so the callers don't try 5663 * to use other copies. 5664 */ 5665 return 1; 5666 5667 map = em->map_lookup; 5668 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5669 ret = map->num_stripes; 5670 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5671 ret = map->sub_stripes; 5672 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5673 ret = 2; 5674 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5675 /* 5676 * There could be two corrupted data stripes, we need 5677 * to loop retry in order to rebuild the correct data. 5678 * 5679 * Fail a stripe at a time on every retry except the 5680 * stripe under reconstruction. 5681 */ 5682 ret = map->num_stripes; 5683 else 5684 ret = 1; 5685 free_extent_map(em); 5686 5687 down_read(&fs_info->dev_replace.rwsem); 5688 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5689 fs_info->dev_replace.tgtdev) 5690 ret++; 5691 up_read(&fs_info->dev_replace.rwsem); 5692 5693 return ret; 5694 } 5695 5696 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5697 u64 logical) 5698 { 5699 struct extent_map *em; 5700 struct map_lookup *map; 5701 unsigned long len = fs_info->sectorsize; 5702 5703 em = btrfs_get_chunk_map(fs_info, logical, len); 5704 5705 if (!WARN_ON(IS_ERR(em))) { 5706 map = em->map_lookup; 5707 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5708 len = map->stripe_len * nr_data_stripes(map); 5709 free_extent_map(em); 5710 } 5711 return len; 5712 } 5713 5714 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5715 { 5716 struct extent_map *em; 5717 struct map_lookup *map; 5718 int ret = 0; 5719 5720 em = btrfs_get_chunk_map(fs_info, logical, len); 5721 5722 if(!WARN_ON(IS_ERR(em))) { 5723 map = em->map_lookup; 5724 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5725 ret = 1; 5726 free_extent_map(em); 5727 } 5728 return ret; 5729 } 5730 5731 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5732 struct map_lookup *map, int first, 5733 int dev_replace_is_ongoing) 5734 { 5735 int i; 5736 int num_stripes; 5737 int preferred_mirror; 5738 int tolerance; 5739 struct btrfs_device *srcdev; 5740 5741 ASSERT((map->type & 5742 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5743 5744 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5745 num_stripes = map->sub_stripes; 5746 else 5747 num_stripes = map->num_stripes; 5748 5749 switch (fs_info->fs_devices->read_policy) { 5750 default: 5751 /* Shouldn't happen, just warn and use pid instead of failing */ 5752 btrfs_warn_rl(fs_info, 5753 "unknown read_policy type %u, reset to pid", 5754 fs_info->fs_devices->read_policy); 5755 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5756 fallthrough; 5757 case BTRFS_READ_POLICY_PID: 5758 preferred_mirror = first + (current->pid % num_stripes); 5759 break; 5760 } 5761 5762 if (dev_replace_is_ongoing && 5763 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5764 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5765 srcdev = fs_info->dev_replace.srcdev; 5766 else 5767 srcdev = NULL; 5768 5769 /* 5770 * try to avoid the drive that is the source drive for a 5771 * dev-replace procedure, only choose it if no other non-missing 5772 * mirror is available 5773 */ 5774 for (tolerance = 0; tolerance < 2; tolerance++) { 5775 if (map->stripes[preferred_mirror].dev->bdev && 5776 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5777 return preferred_mirror; 5778 for (i = first; i < first + num_stripes; i++) { 5779 if (map->stripes[i].dev->bdev && 5780 (tolerance || map->stripes[i].dev != srcdev)) 5781 return i; 5782 } 5783 } 5784 5785 /* we couldn't find one that doesn't fail. Just return something 5786 * and the io error handling code will clean up eventually 5787 */ 5788 return preferred_mirror; 5789 } 5790 5791 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5792 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5793 { 5794 int i; 5795 int again = 1; 5796 5797 while (again) { 5798 again = 0; 5799 for (i = 0; i < num_stripes - 1; i++) { 5800 /* Swap if parity is on a smaller index */ 5801 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5802 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5803 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5804 again = 1; 5805 } 5806 } 5807 } 5808 } 5809 5810 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5811 int total_stripes, 5812 int real_stripes) 5813 { 5814 struct btrfs_io_context *bioc = kzalloc( 5815 /* The size of btrfs_io_context */ 5816 sizeof(struct btrfs_io_context) + 5817 /* Plus the variable array for the stripes */ 5818 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5819 /* Plus the variable array for the tgt dev */ 5820 sizeof(int) * (real_stripes) + 5821 /* 5822 * Plus the raid_map, which includes both the tgt dev 5823 * and the stripes. 5824 */ 5825 sizeof(u64) * (total_stripes), 5826 GFP_NOFS|__GFP_NOFAIL); 5827 5828 atomic_set(&bioc->error, 0); 5829 refcount_set(&bioc->refs, 1); 5830 5831 bioc->fs_info = fs_info; 5832 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5833 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5834 5835 return bioc; 5836 } 5837 5838 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5839 { 5840 WARN_ON(!refcount_read(&bioc->refs)); 5841 refcount_inc(&bioc->refs); 5842 } 5843 5844 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5845 { 5846 if (!bioc) 5847 return; 5848 if (refcount_dec_and_test(&bioc->refs)) 5849 kfree(bioc); 5850 } 5851 5852 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5853 /* 5854 * Please note that, discard won't be sent to target device of device 5855 * replace. 5856 */ 5857 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5858 u64 logical, u64 *length_ret, 5859 struct btrfs_io_context **bioc_ret) 5860 { 5861 struct extent_map *em; 5862 struct map_lookup *map; 5863 struct btrfs_io_context *bioc; 5864 u64 length = *length_ret; 5865 u64 offset; 5866 u64 stripe_nr; 5867 u64 stripe_nr_end; 5868 u64 stripe_end_offset; 5869 u64 stripe_cnt; 5870 u64 stripe_len; 5871 u64 stripe_offset; 5872 u64 num_stripes; 5873 u32 stripe_index; 5874 u32 factor = 0; 5875 u32 sub_stripes = 0; 5876 u64 stripes_per_dev = 0; 5877 u32 remaining_stripes = 0; 5878 u32 last_stripe = 0; 5879 int ret = 0; 5880 int i; 5881 5882 /* Discard always returns a bioc. */ 5883 ASSERT(bioc_ret); 5884 5885 em = btrfs_get_chunk_map(fs_info, logical, length); 5886 if (IS_ERR(em)) 5887 return PTR_ERR(em); 5888 5889 map = em->map_lookup; 5890 /* we don't discard raid56 yet */ 5891 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5892 ret = -EOPNOTSUPP; 5893 goto out; 5894 } 5895 5896 offset = logical - em->start; 5897 length = min_t(u64, em->start + em->len - logical, length); 5898 *length_ret = length; 5899 5900 stripe_len = map->stripe_len; 5901 /* 5902 * stripe_nr counts the total number of stripes we have to stride 5903 * to get to this block 5904 */ 5905 stripe_nr = div64_u64(offset, stripe_len); 5906 5907 /* stripe_offset is the offset of this block in its stripe */ 5908 stripe_offset = offset - stripe_nr * stripe_len; 5909 5910 stripe_nr_end = round_up(offset + length, map->stripe_len); 5911 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5912 stripe_cnt = stripe_nr_end - stripe_nr; 5913 stripe_end_offset = stripe_nr_end * map->stripe_len - 5914 (offset + length); 5915 /* 5916 * after this, stripe_nr is the number of stripes on this 5917 * device we have to walk to find the data, and stripe_index is 5918 * the number of our device in the stripe array 5919 */ 5920 num_stripes = 1; 5921 stripe_index = 0; 5922 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5923 BTRFS_BLOCK_GROUP_RAID10)) { 5924 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5925 sub_stripes = 1; 5926 else 5927 sub_stripes = map->sub_stripes; 5928 5929 factor = map->num_stripes / sub_stripes; 5930 num_stripes = min_t(u64, map->num_stripes, 5931 sub_stripes * stripe_cnt); 5932 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5933 stripe_index *= sub_stripes; 5934 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5935 &remaining_stripes); 5936 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5937 last_stripe *= sub_stripes; 5938 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 5939 BTRFS_BLOCK_GROUP_DUP)) { 5940 num_stripes = map->num_stripes; 5941 } else { 5942 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5943 &stripe_index); 5944 } 5945 5946 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 5947 if (!bioc) { 5948 ret = -ENOMEM; 5949 goto out; 5950 } 5951 5952 for (i = 0; i < num_stripes; i++) { 5953 bioc->stripes[i].physical = 5954 map->stripes[stripe_index].physical + 5955 stripe_offset + stripe_nr * map->stripe_len; 5956 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 5957 5958 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5959 BTRFS_BLOCK_GROUP_RAID10)) { 5960 bioc->stripes[i].length = stripes_per_dev * 5961 map->stripe_len; 5962 5963 if (i / sub_stripes < remaining_stripes) 5964 bioc->stripes[i].length += map->stripe_len; 5965 5966 /* 5967 * Special for the first stripe and 5968 * the last stripe: 5969 * 5970 * |-------|...|-------| 5971 * |----------| 5972 * off end_off 5973 */ 5974 if (i < sub_stripes) 5975 bioc->stripes[i].length -= stripe_offset; 5976 5977 if (stripe_index >= last_stripe && 5978 stripe_index <= (last_stripe + 5979 sub_stripes - 1)) 5980 bioc->stripes[i].length -= stripe_end_offset; 5981 5982 if (i == sub_stripes - 1) 5983 stripe_offset = 0; 5984 } else { 5985 bioc->stripes[i].length = length; 5986 } 5987 5988 stripe_index++; 5989 if (stripe_index == map->num_stripes) { 5990 stripe_index = 0; 5991 stripe_nr++; 5992 } 5993 } 5994 5995 *bioc_ret = bioc; 5996 bioc->map_type = map->type; 5997 bioc->num_stripes = num_stripes; 5998 out: 5999 free_extent_map(em); 6000 return ret; 6001 } 6002 6003 /* 6004 * In dev-replace case, for repair case (that's the only case where the mirror 6005 * is selected explicitly when calling btrfs_map_block), blocks left of the 6006 * left cursor can also be read from the target drive. 6007 * 6008 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6009 * array of stripes. 6010 * For READ, it also needs to be supported using the same mirror number. 6011 * 6012 * If the requested block is not left of the left cursor, EIO is returned. This 6013 * can happen because btrfs_num_copies() returns one more in the dev-replace 6014 * case. 6015 */ 6016 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6017 u64 logical, u64 length, 6018 u64 srcdev_devid, int *mirror_num, 6019 u64 *physical) 6020 { 6021 struct btrfs_io_context *bioc = NULL; 6022 int num_stripes; 6023 int index_srcdev = 0; 6024 int found = 0; 6025 u64 physical_of_found = 0; 6026 int i; 6027 int ret = 0; 6028 6029 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6030 logical, &length, &bioc, 0, 0); 6031 if (ret) { 6032 ASSERT(bioc == NULL); 6033 return ret; 6034 } 6035 6036 num_stripes = bioc->num_stripes; 6037 if (*mirror_num > num_stripes) { 6038 /* 6039 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6040 * that means that the requested area is not left of the left 6041 * cursor 6042 */ 6043 btrfs_put_bioc(bioc); 6044 return -EIO; 6045 } 6046 6047 /* 6048 * process the rest of the function using the mirror_num of the source 6049 * drive. Therefore look it up first. At the end, patch the device 6050 * pointer to the one of the target drive. 6051 */ 6052 for (i = 0; i < num_stripes; i++) { 6053 if (bioc->stripes[i].dev->devid != srcdev_devid) 6054 continue; 6055 6056 /* 6057 * In case of DUP, in order to keep it simple, only add the 6058 * mirror with the lowest physical address 6059 */ 6060 if (found && 6061 physical_of_found <= bioc->stripes[i].physical) 6062 continue; 6063 6064 index_srcdev = i; 6065 found = 1; 6066 physical_of_found = bioc->stripes[i].physical; 6067 } 6068 6069 btrfs_put_bioc(bioc); 6070 6071 ASSERT(found); 6072 if (!found) 6073 return -EIO; 6074 6075 *mirror_num = index_srcdev + 1; 6076 *physical = physical_of_found; 6077 return ret; 6078 } 6079 6080 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6081 { 6082 struct btrfs_block_group *cache; 6083 bool ret; 6084 6085 /* Non zoned filesystem does not use "to_copy" flag */ 6086 if (!btrfs_is_zoned(fs_info)) 6087 return false; 6088 6089 cache = btrfs_lookup_block_group(fs_info, logical); 6090 6091 spin_lock(&cache->lock); 6092 ret = cache->to_copy; 6093 spin_unlock(&cache->lock); 6094 6095 btrfs_put_block_group(cache); 6096 return ret; 6097 } 6098 6099 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6100 struct btrfs_io_context **bioc_ret, 6101 struct btrfs_dev_replace *dev_replace, 6102 u64 logical, 6103 int *num_stripes_ret, int *max_errors_ret) 6104 { 6105 struct btrfs_io_context *bioc = *bioc_ret; 6106 u64 srcdev_devid = dev_replace->srcdev->devid; 6107 int tgtdev_indexes = 0; 6108 int num_stripes = *num_stripes_ret; 6109 int max_errors = *max_errors_ret; 6110 int i; 6111 6112 if (op == BTRFS_MAP_WRITE) { 6113 int index_where_to_add; 6114 6115 /* 6116 * A block group which have "to_copy" set will eventually 6117 * copied by dev-replace process. We can avoid cloning IO here. 6118 */ 6119 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6120 return; 6121 6122 /* 6123 * duplicate the write operations while the dev replace 6124 * procedure is running. Since the copying of the old disk to 6125 * the new disk takes place at run time while the filesystem is 6126 * mounted writable, the regular write operations to the old 6127 * disk have to be duplicated to go to the new disk as well. 6128 * 6129 * Note that device->missing is handled by the caller, and that 6130 * the write to the old disk is already set up in the stripes 6131 * array. 6132 */ 6133 index_where_to_add = num_stripes; 6134 for (i = 0; i < num_stripes; i++) { 6135 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6136 /* write to new disk, too */ 6137 struct btrfs_io_stripe *new = 6138 bioc->stripes + index_where_to_add; 6139 struct btrfs_io_stripe *old = 6140 bioc->stripes + i; 6141 6142 new->physical = old->physical; 6143 new->length = old->length; 6144 new->dev = dev_replace->tgtdev; 6145 bioc->tgtdev_map[i] = index_where_to_add; 6146 index_where_to_add++; 6147 max_errors++; 6148 tgtdev_indexes++; 6149 } 6150 } 6151 num_stripes = index_where_to_add; 6152 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6153 int index_srcdev = 0; 6154 int found = 0; 6155 u64 physical_of_found = 0; 6156 6157 /* 6158 * During the dev-replace procedure, the target drive can also 6159 * be used to read data in case it is needed to repair a corrupt 6160 * block elsewhere. This is possible if the requested area is 6161 * left of the left cursor. In this area, the target drive is a 6162 * full copy of the source drive. 6163 */ 6164 for (i = 0; i < num_stripes; i++) { 6165 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6166 /* 6167 * In case of DUP, in order to keep it simple, 6168 * only add the mirror with the lowest physical 6169 * address 6170 */ 6171 if (found && 6172 physical_of_found <= bioc->stripes[i].physical) 6173 continue; 6174 index_srcdev = i; 6175 found = 1; 6176 physical_of_found = bioc->stripes[i].physical; 6177 } 6178 } 6179 if (found) { 6180 struct btrfs_io_stripe *tgtdev_stripe = 6181 bioc->stripes + num_stripes; 6182 6183 tgtdev_stripe->physical = physical_of_found; 6184 tgtdev_stripe->length = 6185 bioc->stripes[index_srcdev].length; 6186 tgtdev_stripe->dev = dev_replace->tgtdev; 6187 bioc->tgtdev_map[index_srcdev] = num_stripes; 6188 6189 tgtdev_indexes++; 6190 num_stripes++; 6191 } 6192 } 6193 6194 *num_stripes_ret = num_stripes; 6195 *max_errors_ret = max_errors; 6196 bioc->num_tgtdevs = tgtdev_indexes; 6197 *bioc_ret = bioc; 6198 } 6199 6200 static bool need_full_stripe(enum btrfs_map_op op) 6201 { 6202 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6203 } 6204 6205 /* 6206 * Calculate the geometry of a particular (address, len) tuple. This 6207 * information is used to calculate how big a particular bio can get before it 6208 * straddles a stripe. 6209 * 6210 * @fs_info: the filesystem 6211 * @em: mapping containing the logical extent 6212 * @op: type of operation - write or read 6213 * @logical: address that we want to figure out the geometry of 6214 * @io_geom: pointer used to return values 6215 * 6216 * Returns < 0 in case a chunk for the given logical address cannot be found, 6217 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6218 */ 6219 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6220 enum btrfs_map_op op, u64 logical, 6221 struct btrfs_io_geometry *io_geom) 6222 { 6223 struct map_lookup *map; 6224 u64 len; 6225 u64 offset; 6226 u64 stripe_offset; 6227 u64 stripe_nr; 6228 u64 stripe_len; 6229 u64 raid56_full_stripe_start = (u64)-1; 6230 int data_stripes; 6231 6232 ASSERT(op != BTRFS_MAP_DISCARD); 6233 6234 map = em->map_lookup; 6235 /* Offset of this logical address in the chunk */ 6236 offset = logical - em->start; 6237 /* Len of a stripe in a chunk */ 6238 stripe_len = map->stripe_len; 6239 /* Stripe where this block falls in */ 6240 stripe_nr = div64_u64(offset, stripe_len); 6241 /* Offset of stripe in the chunk */ 6242 stripe_offset = stripe_nr * stripe_len; 6243 if (offset < stripe_offset) { 6244 btrfs_crit(fs_info, 6245 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6246 stripe_offset, offset, em->start, logical, stripe_len); 6247 return -EINVAL; 6248 } 6249 6250 /* stripe_offset is the offset of this block in its stripe */ 6251 stripe_offset = offset - stripe_offset; 6252 data_stripes = nr_data_stripes(map); 6253 6254 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6255 u64 max_len = stripe_len - stripe_offset; 6256 6257 /* 6258 * In case of raid56, we need to know the stripe aligned start 6259 */ 6260 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6261 unsigned long full_stripe_len = stripe_len * data_stripes; 6262 raid56_full_stripe_start = offset; 6263 6264 /* 6265 * Allow a write of a full stripe, but make sure we 6266 * don't allow straddling of stripes 6267 */ 6268 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6269 full_stripe_len); 6270 raid56_full_stripe_start *= full_stripe_len; 6271 6272 /* 6273 * For writes to RAID[56], allow a full stripeset across 6274 * all disks. For other RAID types and for RAID[56] 6275 * reads, just allow a single stripe (on a single disk). 6276 */ 6277 if (op == BTRFS_MAP_WRITE) { 6278 max_len = stripe_len * data_stripes - 6279 (offset - raid56_full_stripe_start); 6280 } 6281 } 6282 len = min_t(u64, em->len - offset, max_len); 6283 } else { 6284 len = em->len - offset; 6285 } 6286 6287 io_geom->len = len; 6288 io_geom->offset = offset; 6289 io_geom->stripe_len = stripe_len; 6290 io_geom->stripe_nr = stripe_nr; 6291 io_geom->stripe_offset = stripe_offset; 6292 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6293 6294 return 0; 6295 } 6296 6297 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6298 enum btrfs_map_op op, 6299 u64 logical, u64 *length, 6300 struct btrfs_io_context **bioc_ret, 6301 int mirror_num, int need_raid_map) 6302 { 6303 struct extent_map *em; 6304 struct map_lookup *map; 6305 u64 stripe_offset; 6306 u64 stripe_nr; 6307 u64 stripe_len; 6308 u32 stripe_index; 6309 int data_stripes; 6310 int i; 6311 int ret = 0; 6312 int num_stripes; 6313 int max_errors = 0; 6314 int tgtdev_indexes = 0; 6315 struct btrfs_io_context *bioc = NULL; 6316 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6317 int dev_replace_is_ongoing = 0; 6318 int num_alloc_stripes; 6319 int patch_the_first_stripe_for_dev_replace = 0; 6320 u64 physical_to_patch_in_first_stripe = 0; 6321 u64 raid56_full_stripe_start = (u64)-1; 6322 struct btrfs_io_geometry geom; 6323 6324 ASSERT(bioc_ret); 6325 ASSERT(op != BTRFS_MAP_DISCARD); 6326 6327 em = btrfs_get_chunk_map(fs_info, logical, *length); 6328 ASSERT(!IS_ERR(em)); 6329 6330 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6331 if (ret < 0) 6332 return ret; 6333 6334 map = em->map_lookup; 6335 6336 *length = geom.len; 6337 stripe_len = geom.stripe_len; 6338 stripe_nr = geom.stripe_nr; 6339 stripe_offset = geom.stripe_offset; 6340 raid56_full_stripe_start = geom.raid56_stripe_offset; 6341 data_stripes = nr_data_stripes(map); 6342 6343 down_read(&dev_replace->rwsem); 6344 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6345 /* 6346 * Hold the semaphore for read during the whole operation, write is 6347 * requested at commit time but must wait. 6348 */ 6349 if (!dev_replace_is_ongoing) 6350 up_read(&dev_replace->rwsem); 6351 6352 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6353 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6354 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6355 dev_replace->srcdev->devid, 6356 &mirror_num, 6357 &physical_to_patch_in_first_stripe); 6358 if (ret) 6359 goto out; 6360 else 6361 patch_the_first_stripe_for_dev_replace = 1; 6362 } else if (mirror_num > map->num_stripes) { 6363 mirror_num = 0; 6364 } 6365 6366 num_stripes = 1; 6367 stripe_index = 0; 6368 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6369 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6370 &stripe_index); 6371 if (!need_full_stripe(op)) 6372 mirror_num = 1; 6373 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6374 if (need_full_stripe(op)) 6375 num_stripes = map->num_stripes; 6376 else if (mirror_num) 6377 stripe_index = mirror_num - 1; 6378 else { 6379 stripe_index = find_live_mirror(fs_info, map, 0, 6380 dev_replace_is_ongoing); 6381 mirror_num = stripe_index + 1; 6382 } 6383 6384 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6385 if (need_full_stripe(op)) { 6386 num_stripes = map->num_stripes; 6387 } else if (mirror_num) { 6388 stripe_index = mirror_num - 1; 6389 } else { 6390 mirror_num = 1; 6391 } 6392 6393 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6394 u32 factor = map->num_stripes / map->sub_stripes; 6395 6396 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6397 stripe_index *= map->sub_stripes; 6398 6399 if (need_full_stripe(op)) 6400 num_stripes = map->sub_stripes; 6401 else if (mirror_num) 6402 stripe_index += mirror_num - 1; 6403 else { 6404 int old_stripe_index = stripe_index; 6405 stripe_index = find_live_mirror(fs_info, map, 6406 stripe_index, 6407 dev_replace_is_ongoing); 6408 mirror_num = stripe_index - old_stripe_index + 1; 6409 } 6410 6411 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6412 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6413 /* push stripe_nr back to the start of the full stripe */ 6414 stripe_nr = div64_u64(raid56_full_stripe_start, 6415 stripe_len * data_stripes); 6416 6417 /* RAID[56] write or recovery. Return all stripes */ 6418 num_stripes = map->num_stripes; 6419 max_errors = nr_parity_stripes(map); 6420 6421 *length = map->stripe_len; 6422 stripe_index = 0; 6423 stripe_offset = 0; 6424 } else { 6425 /* 6426 * Mirror #0 or #1 means the original data block. 6427 * Mirror #2 is RAID5 parity block. 6428 * Mirror #3 is RAID6 Q block. 6429 */ 6430 stripe_nr = div_u64_rem(stripe_nr, 6431 data_stripes, &stripe_index); 6432 if (mirror_num > 1) 6433 stripe_index = data_stripes + mirror_num - 2; 6434 6435 /* We distribute the parity blocks across stripes */ 6436 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6437 &stripe_index); 6438 if (!need_full_stripe(op) && mirror_num <= 1) 6439 mirror_num = 1; 6440 } 6441 } else { 6442 /* 6443 * after this, stripe_nr is the number of stripes on this 6444 * device we have to walk to find the data, and stripe_index is 6445 * the number of our device in the stripe array 6446 */ 6447 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6448 &stripe_index); 6449 mirror_num = stripe_index + 1; 6450 } 6451 if (stripe_index >= map->num_stripes) { 6452 btrfs_crit(fs_info, 6453 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6454 stripe_index, map->num_stripes); 6455 ret = -EINVAL; 6456 goto out; 6457 } 6458 6459 num_alloc_stripes = num_stripes; 6460 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6461 if (op == BTRFS_MAP_WRITE) 6462 num_alloc_stripes <<= 1; 6463 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6464 num_alloc_stripes++; 6465 tgtdev_indexes = num_stripes; 6466 } 6467 6468 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6469 if (!bioc) { 6470 ret = -ENOMEM; 6471 goto out; 6472 } 6473 6474 for (i = 0; i < num_stripes; i++) { 6475 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6476 stripe_offset + stripe_nr * map->stripe_len; 6477 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6478 stripe_index++; 6479 } 6480 6481 /* Build raid_map */ 6482 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6483 (need_full_stripe(op) || mirror_num > 1)) { 6484 u64 tmp; 6485 unsigned rot; 6486 6487 /* Work out the disk rotation on this stripe-set */ 6488 div_u64_rem(stripe_nr, num_stripes, &rot); 6489 6490 /* Fill in the logical address of each stripe */ 6491 tmp = stripe_nr * data_stripes; 6492 for (i = 0; i < data_stripes; i++) 6493 bioc->raid_map[(i + rot) % num_stripes] = 6494 em->start + (tmp + i) * map->stripe_len; 6495 6496 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6497 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6498 bioc->raid_map[(i + rot + 1) % num_stripes] = 6499 RAID6_Q_STRIPE; 6500 6501 sort_parity_stripes(bioc, num_stripes); 6502 } 6503 6504 if (need_full_stripe(op)) 6505 max_errors = btrfs_chunk_max_errors(map); 6506 6507 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6508 need_full_stripe(op)) { 6509 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6510 &num_stripes, &max_errors); 6511 } 6512 6513 *bioc_ret = bioc; 6514 bioc->map_type = map->type; 6515 bioc->num_stripes = num_stripes; 6516 bioc->max_errors = max_errors; 6517 bioc->mirror_num = mirror_num; 6518 6519 /* 6520 * this is the case that REQ_READ && dev_replace_is_ongoing && 6521 * mirror_num == num_stripes + 1 && dev_replace target drive is 6522 * available as a mirror 6523 */ 6524 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6525 WARN_ON(num_stripes > 1); 6526 bioc->stripes[0].dev = dev_replace->tgtdev; 6527 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6528 bioc->mirror_num = map->num_stripes + 1; 6529 } 6530 out: 6531 if (dev_replace_is_ongoing) { 6532 lockdep_assert_held(&dev_replace->rwsem); 6533 /* Unlock and let waiting writers proceed */ 6534 up_read(&dev_replace->rwsem); 6535 } 6536 free_extent_map(em); 6537 return ret; 6538 } 6539 6540 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6541 u64 logical, u64 *length, 6542 struct btrfs_io_context **bioc_ret, int mirror_num) 6543 { 6544 if (op == BTRFS_MAP_DISCARD) 6545 return __btrfs_map_block_for_discard(fs_info, logical, 6546 length, bioc_ret); 6547 6548 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6549 mirror_num, 0); 6550 } 6551 6552 /* For Scrub/replace */ 6553 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6554 u64 logical, u64 *length, 6555 struct btrfs_io_context **bioc_ret) 6556 { 6557 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6558 } 6559 6560 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6561 { 6562 bio->bi_private = bioc->private; 6563 bio->bi_end_io = bioc->end_io; 6564 bio_endio(bio); 6565 6566 btrfs_put_bioc(bioc); 6567 } 6568 6569 static void btrfs_end_bio(struct bio *bio) 6570 { 6571 struct btrfs_io_context *bioc = bio->bi_private; 6572 int is_orig_bio = 0; 6573 6574 if (bio->bi_status) { 6575 atomic_inc(&bioc->error); 6576 if (bio->bi_status == BLK_STS_IOERR || 6577 bio->bi_status == BLK_STS_TARGET) { 6578 struct btrfs_device *dev = btrfs_bio(bio)->device; 6579 6580 ASSERT(dev->bdev); 6581 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6582 btrfs_dev_stat_inc_and_print(dev, 6583 BTRFS_DEV_STAT_WRITE_ERRS); 6584 else if (!(bio->bi_opf & REQ_RAHEAD)) 6585 btrfs_dev_stat_inc_and_print(dev, 6586 BTRFS_DEV_STAT_READ_ERRS); 6587 if (bio->bi_opf & REQ_PREFLUSH) 6588 btrfs_dev_stat_inc_and_print(dev, 6589 BTRFS_DEV_STAT_FLUSH_ERRS); 6590 } 6591 } 6592 6593 if (bio == bioc->orig_bio) 6594 is_orig_bio = 1; 6595 6596 btrfs_bio_counter_dec(bioc->fs_info); 6597 6598 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6599 if (!is_orig_bio) { 6600 bio_put(bio); 6601 bio = bioc->orig_bio; 6602 } 6603 6604 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6605 /* only send an error to the higher layers if it is 6606 * beyond the tolerance of the btrfs bio 6607 */ 6608 if (atomic_read(&bioc->error) > bioc->max_errors) { 6609 bio->bi_status = BLK_STS_IOERR; 6610 } else { 6611 /* 6612 * this bio is actually up to date, we didn't 6613 * go over the max number of errors 6614 */ 6615 bio->bi_status = BLK_STS_OK; 6616 } 6617 6618 btrfs_end_bioc(bioc, bio); 6619 } else if (!is_orig_bio) { 6620 bio_put(bio); 6621 } 6622 } 6623 6624 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6625 u64 physical, struct btrfs_device *dev) 6626 { 6627 struct btrfs_fs_info *fs_info = bioc->fs_info; 6628 6629 bio->bi_private = bioc; 6630 btrfs_bio(bio)->device = dev; 6631 bio->bi_end_io = btrfs_end_bio; 6632 bio->bi_iter.bi_sector = physical >> 9; 6633 /* 6634 * For zone append writing, bi_sector must point the beginning of the 6635 * zone 6636 */ 6637 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6638 if (btrfs_dev_is_sequential(dev, physical)) { 6639 u64 zone_start = round_down(physical, fs_info->zone_size); 6640 6641 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6642 } else { 6643 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6644 bio->bi_opf |= REQ_OP_WRITE; 6645 } 6646 } 6647 btrfs_debug_in_rcu(fs_info, 6648 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6649 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6650 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6651 dev->devid, bio->bi_iter.bi_size); 6652 bio_set_dev(bio, dev->bdev); 6653 6654 btrfs_bio_counter_inc_noblocked(fs_info); 6655 6656 btrfsic_submit_bio(bio); 6657 } 6658 6659 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6660 { 6661 atomic_inc(&bioc->error); 6662 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6663 /* Should be the original bio. */ 6664 WARN_ON(bio != bioc->orig_bio); 6665 6666 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6667 bio->bi_iter.bi_sector = logical >> 9; 6668 if (atomic_read(&bioc->error) > bioc->max_errors) 6669 bio->bi_status = BLK_STS_IOERR; 6670 else 6671 bio->bi_status = BLK_STS_OK; 6672 btrfs_end_bioc(bioc, bio); 6673 } 6674 } 6675 6676 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6677 int mirror_num) 6678 { 6679 struct btrfs_device *dev; 6680 struct bio *first_bio = bio; 6681 u64 logical = bio->bi_iter.bi_sector << 9; 6682 u64 length = 0; 6683 u64 map_length; 6684 int ret; 6685 int dev_nr; 6686 int total_devs; 6687 struct btrfs_io_context *bioc = NULL; 6688 6689 length = bio->bi_iter.bi_size; 6690 map_length = length; 6691 6692 btrfs_bio_counter_inc_blocked(fs_info); 6693 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6694 &map_length, &bioc, mirror_num, 1); 6695 if (ret) { 6696 btrfs_bio_counter_dec(fs_info); 6697 return errno_to_blk_status(ret); 6698 } 6699 6700 total_devs = bioc->num_stripes; 6701 bioc->orig_bio = first_bio; 6702 bioc->private = first_bio->bi_private; 6703 bioc->end_io = first_bio->bi_end_io; 6704 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6705 6706 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6707 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6708 /* In this case, map_length has been set to the length of 6709 a single stripe; not the whole write */ 6710 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6711 ret = raid56_parity_write(fs_info, bio, bioc, 6712 map_length); 6713 } else { 6714 ret = raid56_parity_recover(fs_info, bio, bioc, 6715 map_length, mirror_num, 1); 6716 } 6717 6718 btrfs_bio_counter_dec(fs_info); 6719 return errno_to_blk_status(ret); 6720 } 6721 6722 if (map_length < length) { 6723 btrfs_crit(fs_info, 6724 "mapping failed logical %llu bio len %llu len %llu", 6725 logical, length, map_length); 6726 BUG(); 6727 } 6728 6729 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6730 dev = bioc->stripes[dev_nr].dev; 6731 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6732 &dev->dev_state) || 6733 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6734 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6735 bioc_error(bioc, first_bio, logical); 6736 continue; 6737 } 6738 6739 if (dev_nr < total_devs - 1) 6740 bio = btrfs_bio_clone(first_bio); 6741 else 6742 bio = first_bio; 6743 6744 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6745 } 6746 btrfs_bio_counter_dec(fs_info); 6747 return BLK_STS_OK; 6748 } 6749 6750 /* 6751 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6752 * return NULL. 6753 * 6754 * If devid and uuid are both specified, the match must be exact, otherwise 6755 * only devid is used. 6756 */ 6757 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, 6758 u64 devid, u8 *uuid, u8 *fsid) 6759 { 6760 struct btrfs_device *device; 6761 struct btrfs_fs_devices *seed_devs; 6762 6763 if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { 6764 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6765 if (device->devid == devid && 6766 (!uuid || memcmp(device->uuid, uuid, 6767 BTRFS_UUID_SIZE) == 0)) 6768 return device; 6769 } 6770 } 6771 6772 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6773 if (!fsid || 6774 !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { 6775 list_for_each_entry(device, &seed_devs->devices, 6776 dev_list) { 6777 if (device->devid == devid && 6778 (!uuid || memcmp(device->uuid, uuid, 6779 BTRFS_UUID_SIZE) == 0)) 6780 return device; 6781 } 6782 } 6783 } 6784 6785 return NULL; 6786 } 6787 6788 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6789 u64 devid, u8 *dev_uuid) 6790 { 6791 struct btrfs_device *device; 6792 unsigned int nofs_flag; 6793 6794 /* 6795 * We call this under the chunk_mutex, so we want to use NOFS for this 6796 * allocation, however we don't want to change btrfs_alloc_device() to 6797 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6798 * places. 6799 */ 6800 nofs_flag = memalloc_nofs_save(); 6801 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6802 memalloc_nofs_restore(nofs_flag); 6803 if (IS_ERR(device)) 6804 return device; 6805 6806 list_add(&device->dev_list, &fs_devices->devices); 6807 device->fs_devices = fs_devices; 6808 fs_devices->num_devices++; 6809 6810 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6811 fs_devices->missing_devices++; 6812 6813 return device; 6814 } 6815 6816 /** 6817 * btrfs_alloc_device - allocate struct btrfs_device 6818 * @fs_info: used only for generating a new devid, can be NULL if 6819 * devid is provided (i.e. @devid != NULL). 6820 * @devid: a pointer to devid for this device. If NULL a new devid 6821 * is generated. 6822 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6823 * is generated. 6824 * 6825 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6826 * on error. Returned struct is not linked onto any lists and must be 6827 * destroyed with btrfs_free_device. 6828 */ 6829 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6830 const u64 *devid, 6831 const u8 *uuid) 6832 { 6833 struct btrfs_device *dev; 6834 u64 tmp; 6835 6836 if (WARN_ON(!devid && !fs_info)) 6837 return ERR_PTR(-EINVAL); 6838 6839 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6840 if (!dev) 6841 return ERR_PTR(-ENOMEM); 6842 6843 /* 6844 * Preallocate a bio that's always going to be used for flushing device 6845 * barriers and matches the device lifespan 6846 */ 6847 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6848 if (!dev->flush_bio) { 6849 kfree(dev); 6850 return ERR_PTR(-ENOMEM); 6851 } 6852 6853 INIT_LIST_HEAD(&dev->dev_list); 6854 INIT_LIST_HEAD(&dev->dev_alloc_list); 6855 INIT_LIST_HEAD(&dev->post_commit_list); 6856 6857 atomic_set(&dev->reada_in_flight, 0); 6858 atomic_set(&dev->dev_stats_ccnt, 0); 6859 btrfs_device_data_ordered_init(dev); 6860 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6861 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6862 extent_io_tree_init(fs_info, &dev->alloc_state, 6863 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6864 6865 if (devid) 6866 tmp = *devid; 6867 else { 6868 int ret; 6869 6870 ret = find_next_devid(fs_info, &tmp); 6871 if (ret) { 6872 btrfs_free_device(dev); 6873 return ERR_PTR(ret); 6874 } 6875 } 6876 dev->devid = tmp; 6877 6878 if (uuid) 6879 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6880 else 6881 generate_random_uuid(dev->uuid); 6882 6883 return dev; 6884 } 6885 6886 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6887 u64 devid, u8 *uuid, bool error) 6888 { 6889 if (error) 6890 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6891 devid, uuid); 6892 else 6893 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6894 devid, uuid); 6895 } 6896 6897 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6898 { 6899 const int data_stripes = calc_data_stripes(type, num_stripes); 6900 6901 return div_u64(chunk_len, data_stripes); 6902 } 6903 6904 #if BITS_PER_LONG == 32 6905 /* 6906 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6907 * can't be accessed on 32bit systems. 6908 * 6909 * This function do mount time check to reject the fs if it already has 6910 * metadata chunk beyond that limit. 6911 */ 6912 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6913 u64 logical, u64 length, u64 type) 6914 { 6915 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6916 return 0; 6917 6918 if (logical + length < MAX_LFS_FILESIZE) 6919 return 0; 6920 6921 btrfs_err_32bit_limit(fs_info); 6922 return -EOVERFLOW; 6923 } 6924 6925 /* 6926 * This is to give early warning for any metadata chunk reaching 6927 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6928 * Although we can still access the metadata, it's not going to be possible 6929 * once the limit is reached. 6930 */ 6931 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6932 u64 logical, u64 length, u64 type) 6933 { 6934 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6935 return; 6936 6937 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6938 return; 6939 6940 btrfs_warn_32bit_limit(fs_info); 6941 } 6942 #endif 6943 6944 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6945 struct btrfs_chunk *chunk) 6946 { 6947 struct btrfs_fs_info *fs_info = leaf->fs_info; 6948 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6949 struct map_lookup *map; 6950 struct extent_map *em; 6951 u64 logical; 6952 u64 length; 6953 u64 devid; 6954 u64 type; 6955 u8 uuid[BTRFS_UUID_SIZE]; 6956 int num_stripes; 6957 int ret; 6958 int i; 6959 6960 logical = key->offset; 6961 length = btrfs_chunk_length(leaf, chunk); 6962 type = btrfs_chunk_type(leaf, chunk); 6963 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6964 6965 #if BITS_PER_LONG == 32 6966 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 6967 if (ret < 0) 6968 return ret; 6969 warn_32bit_meta_chunk(fs_info, logical, length, type); 6970 #endif 6971 6972 /* 6973 * Only need to verify chunk item if we're reading from sys chunk array, 6974 * as chunk item in tree block is already verified by tree-checker. 6975 */ 6976 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6977 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6978 if (ret) 6979 return ret; 6980 } 6981 6982 read_lock(&map_tree->lock); 6983 em = lookup_extent_mapping(map_tree, logical, 1); 6984 read_unlock(&map_tree->lock); 6985 6986 /* already mapped? */ 6987 if (em && em->start <= logical && em->start + em->len > logical) { 6988 free_extent_map(em); 6989 return 0; 6990 } else if (em) { 6991 free_extent_map(em); 6992 } 6993 6994 em = alloc_extent_map(); 6995 if (!em) 6996 return -ENOMEM; 6997 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6998 if (!map) { 6999 free_extent_map(em); 7000 return -ENOMEM; 7001 } 7002 7003 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7004 em->map_lookup = map; 7005 em->start = logical; 7006 em->len = length; 7007 em->orig_start = 0; 7008 em->block_start = 0; 7009 em->block_len = em->len; 7010 7011 map->num_stripes = num_stripes; 7012 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7013 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7014 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7015 map->type = type; 7016 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7017 map->verified_stripes = 0; 7018 em->orig_block_len = calc_stripe_length(type, em->len, 7019 map->num_stripes); 7020 for (i = 0; i < num_stripes; i++) { 7021 map->stripes[i].physical = 7022 btrfs_stripe_offset_nr(leaf, chunk, i); 7023 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7024 read_extent_buffer(leaf, uuid, (unsigned long) 7025 btrfs_stripe_dev_uuid_nr(chunk, i), 7026 BTRFS_UUID_SIZE); 7027 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, 7028 devid, uuid, NULL); 7029 if (!map->stripes[i].dev && 7030 !btrfs_test_opt(fs_info, DEGRADED)) { 7031 free_extent_map(em); 7032 btrfs_report_missing_device(fs_info, devid, uuid, true); 7033 return -ENOENT; 7034 } 7035 if (!map->stripes[i].dev) { 7036 map->stripes[i].dev = 7037 add_missing_dev(fs_info->fs_devices, devid, 7038 uuid); 7039 if (IS_ERR(map->stripes[i].dev)) { 7040 free_extent_map(em); 7041 btrfs_err(fs_info, 7042 "failed to init missing dev %llu: %ld", 7043 devid, PTR_ERR(map->stripes[i].dev)); 7044 return PTR_ERR(map->stripes[i].dev); 7045 } 7046 btrfs_report_missing_device(fs_info, devid, uuid, false); 7047 } 7048 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7049 &(map->stripes[i].dev->dev_state)); 7050 7051 } 7052 7053 write_lock(&map_tree->lock); 7054 ret = add_extent_mapping(map_tree, em, 0); 7055 write_unlock(&map_tree->lock); 7056 if (ret < 0) { 7057 btrfs_err(fs_info, 7058 "failed to add chunk map, start=%llu len=%llu: %d", 7059 em->start, em->len, ret); 7060 } 7061 free_extent_map(em); 7062 7063 return ret; 7064 } 7065 7066 static void fill_device_from_item(struct extent_buffer *leaf, 7067 struct btrfs_dev_item *dev_item, 7068 struct btrfs_device *device) 7069 { 7070 unsigned long ptr; 7071 7072 device->devid = btrfs_device_id(leaf, dev_item); 7073 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7074 device->total_bytes = device->disk_total_bytes; 7075 device->commit_total_bytes = device->disk_total_bytes; 7076 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7077 device->commit_bytes_used = device->bytes_used; 7078 device->type = btrfs_device_type(leaf, dev_item); 7079 device->io_align = btrfs_device_io_align(leaf, dev_item); 7080 device->io_width = btrfs_device_io_width(leaf, dev_item); 7081 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7082 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7083 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7084 7085 ptr = btrfs_device_uuid(dev_item); 7086 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7087 } 7088 7089 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7090 u8 *fsid) 7091 { 7092 struct btrfs_fs_devices *fs_devices; 7093 int ret; 7094 7095 lockdep_assert_held(&uuid_mutex); 7096 ASSERT(fsid); 7097 7098 /* This will match only for multi-device seed fs */ 7099 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7100 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7101 return fs_devices; 7102 7103 7104 fs_devices = find_fsid(fsid, NULL); 7105 if (!fs_devices) { 7106 if (!btrfs_test_opt(fs_info, DEGRADED)) 7107 return ERR_PTR(-ENOENT); 7108 7109 fs_devices = alloc_fs_devices(fsid, NULL); 7110 if (IS_ERR(fs_devices)) 7111 return fs_devices; 7112 7113 fs_devices->seeding = true; 7114 fs_devices->opened = 1; 7115 return fs_devices; 7116 } 7117 7118 /* 7119 * Upon first call for a seed fs fsid, just create a private copy of the 7120 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7121 */ 7122 fs_devices = clone_fs_devices(fs_devices); 7123 if (IS_ERR(fs_devices)) 7124 return fs_devices; 7125 7126 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7127 if (ret) { 7128 free_fs_devices(fs_devices); 7129 return ERR_PTR(ret); 7130 } 7131 7132 if (!fs_devices->seeding) { 7133 close_fs_devices(fs_devices); 7134 free_fs_devices(fs_devices); 7135 return ERR_PTR(-EINVAL); 7136 } 7137 7138 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7139 7140 return fs_devices; 7141 } 7142 7143 static int read_one_dev(struct extent_buffer *leaf, 7144 struct btrfs_dev_item *dev_item) 7145 { 7146 struct btrfs_fs_info *fs_info = leaf->fs_info; 7147 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7148 struct btrfs_device *device; 7149 u64 devid; 7150 int ret; 7151 u8 fs_uuid[BTRFS_FSID_SIZE]; 7152 u8 dev_uuid[BTRFS_UUID_SIZE]; 7153 7154 devid = btrfs_device_id(leaf, dev_item); 7155 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7156 BTRFS_UUID_SIZE); 7157 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7158 BTRFS_FSID_SIZE); 7159 7160 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7161 fs_devices = open_seed_devices(fs_info, fs_uuid); 7162 if (IS_ERR(fs_devices)) 7163 return PTR_ERR(fs_devices); 7164 } 7165 7166 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 7167 fs_uuid); 7168 if (!device) { 7169 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7170 btrfs_report_missing_device(fs_info, devid, 7171 dev_uuid, true); 7172 return -ENOENT; 7173 } 7174 7175 device = add_missing_dev(fs_devices, devid, dev_uuid); 7176 if (IS_ERR(device)) { 7177 btrfs_err(fs_info, 7178 "failed to add missing dev %llu: %ld", 7179 devid, PTR_ERR(device)); 7180 return PTR_ERR(device); 7181 } 7182 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7183 } else { 7184 if (!device->bdev) { 7185 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7186 btrfs_report_missing_device(fs_info, 7187 devid, dev_uuid, true); 7188 return -ENOENT; 7189 } 7190 btrfs_report_missing_device(fs_info, devid, 7191 dev_uuid, false); 7192 } 7193 7194 if (!device->bdev && 7195 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7196 /* 7197 * this happens when a device that was properly setup 7198 * in the device info lists suddenly goes bad. 7199 * device->bdev is NULL, and so we have to set 7200 * device->missing to one here 7201 */ 7202 device->fs_devices->missing_devices++; 7203 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7204 } 7205 7206 /* Move the device to its own fs_devices */ 7207 if (device->fs_devices != fs_devices) { 7208 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7209 &device->dev_state)); 7210 7211 list_move(&device->dev_list, &fs_devices->devices); 7212 device->fs_devices->num_devices--; 7213 fs_devices->num_devices++; 7214 7215 device->fs_devices->missing_devices--; 7216 fs_devices->missing_devices++; 7217 7218 device->fs_devices = fs_devices; 7219 } 7220 } 7221 7222 if (device->fs_devices != fs_info->fs_devices) { 7223 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7224 if (device->generation != 7225 btrfs_device_generation(leaf, dev_item)) 7226 return -EINVAL; 7227 } 7228 7229 fill_device_from_item(leaf, dev_item, device); 7230 if (device->bdev) { 7231 u64 max_total_bytes = i_size_read(device->bdev->bd_inode); 7232 7233 if (device->total_bytes > max_total_bytes) { 7234 btrfs_err(fs_info, 7235 "device total_bytes should be at most %llu but found %llu", 7236 max_total_bytes, device->total_bytes); 7237 return -EINVAL; 7238 } 7239 } 7240 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7241 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7242 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7243 device->fs_devices->total_rw_bytes += device->total_bytes; 7244 atomic64_add(device->total_bytes - device->bytes_used, 7245 &fs_info->free_chunk_space); 7246 } 7247 ret = 0; 7248 return ret; 7249 } 7250 7251 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7252 { 7253 struct btrfs_root *root = fs_info->tree_root; 7254 struct btrfs_super_block *super_copy = fs_info->super_copy; 7255 struct extent_buffer *sb; 7256 struct btrfs_disk_key *disk_key; 7257 struct btrfs_chunk *chunk; 7258 u8 *array_ptr; 7259 unsigned long sb_array_offset; 7260 int ret = 0; 7261 u32 num_stripes; 7262 u32 array_size; 7263 u32 len = 0; 7264 u32 cur_offset; 7265 u64 type; 7266 struct btrfs_key key; 7267 7268 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7269 /* 7270 * This will create extent buffer of nodesize, superblock size is 7271 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7272 * overallocate but we can keep it as-is, only the first page is used. 7273 */ 7274 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7275 root->root_key.objectid, 0); 7276 if (IS_ERR(sb)) 7277 return PTR_ERR(sb); 7278 set_extent_buffer_uptodate(sb); 7279 /* 7280 * The sb extent buffer is artificial and just used to read the system array. 7281 * set_extent_buffer_uptodate() call does not properly mark all it's 7282 * pages up-to-date when the page is larger: extent does not cover the 7283 * whole page and consequently check_page_uptodate does not find all 7284 * the page's extents up-to-date (the hole beyond sb), 7285 * write_extent_buffer then triggers a WARN_ON. 7286 * 7287 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7288 * but sb spans only this function. Add an explicit SetPageUptodate call 7289 * to silence the warning eg. on PowerPC 64. 7290 */ 7291 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7292 SetPageUptodate(sb->pages[0]); 7293 7294 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7295 array_size = btrfs_super_sys_array_size(super_copy); 7296 7297 array_ptr = super_copy->sys_chunk_array; 7298 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7299 cur_offset = 0; 7300 7301 while (cur_offset < array_size) { 7302 disk_key = (struct btrfs_disk_key *)array_ptr; 7303 len = sizeof(*disk_key); 7304 if (cur_offset + len > array_size) 7305 goto out_short_read; 7306 7307 btrfs_disk_key_to_cpu(&key, disk_key); 7308 7309 array_ptr += len; 7310 sb_array_offset += len; 7311 cur_offset += len; 7312 7313 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7314 btrfs_err(fs_info, 7315 "unexpected item type %u in sys_array at offset %u", 7316 (u32)key.type, cur_offset); 7317 ret = -EIO; 7318 break; 7319 } 7320 7321 chunk = (struct btrfs_chunk *)sb_array_offset; 7322 /* 7323 * At least one btrfs_chunk with one stripe must be present, 7324 * exact stripe count check comes afterwards 7325 */ 7326 len = btrfs_chunk_item_size(1); 7327 if (cur_offset + len > array_size) 7328 goto out_short_read; 7329 7330 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7331 if (!num_stripes) { 7332 btrfs_err(fs_info, 7333 "invalid number of stripes %u in sys_array at offset %u", 7334 num_stripes, cur_offset); 7335 ret = -EIO; 7336 break; 7337 } 7338 7339 type = btrfs_chunk_type(sb, chunk); 7340 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7341 btrfs_err(fs_info, 7342 "invalid chunk type %llu in sys_array at offset %u", 7343 type, cur_offset); 7344 ret = -EIO; 7345 break; 7346 } 7347 7348 len = btrfs_chunk_item_size(num_stripes); 7349 if (cur_offset + len > array_size) 7350 goto out_short_read; 7351 7352 ret = read_one_chunk(&key, sb, chunk); 7353 if (ret) 7354 break; 7355 7356 array_ptr += len; 7357 sb_array_offset += len; 7358 cur_offset += len; 7359 } 7360 clear_extent_buffer_uptodate(sb); 7361 free_extent_buffer_stale(sb); 7362 return ret; 7363 7364 out_short_read: 7365 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7366 len, cur_offset); 7367 clear_extent_buffer_uptodate(sb); 7368 free_extent_buffer_stale(sb); 7369 return -EIO; 7370 } 7371 7372 /* 7373 * Check if all chunks in the fs are OK for read-write degraded mount 7374 * 7375 * If the @failing_dev is specified, it's accounted as missing. 7376 * 7377 * Return true if all chunks meet the minimal RW mount requirements. 7378 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7379 */ 7380 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7381 struct btrfs_device *failing_dev) 7382 { 7383 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7384 struct extent_map *em; 7385 u64 next_start = 0; 7386 bool ret = true; 7387 7388 read_lock(&map_tree->lock); 7389 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7390 read_unlock(&map_tree->lock); 7391 /* No chunk at all? Return false anyway */ 7392 if (!em) { 7393 ret = false; 7394 goto out; 7395 } 7396 while (em) { 7397 struct map_lookup *map; 7398 int missing = 0; 7399 int max_tolerated; 7400 int i; 7401 7402 map = em->map_lookup; 7403 max_tolerated = 7404 btrfs_get_num_tolerated_disk_barrier_failures( 7405 map->type); 7406 for (i = 0; i < map->num_stripes; i++) { 7407 struct btrfs_device *dev = map->stripes[i].dev; 7408 7409 if (!dev || !dev->bdev || 7410 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7411 dev->last_flush_error) 7412 missing++; 7413 else if (failing_dev && failing_dev == dev) 7414 missing++; 7415 } 7416 if (missing > max_tolerated) { 7417 if (!failing_dev) 7418 btrfs_warn(fs_info, 7419 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7420 em->start, missing, max_tolerated); 7421 free_extent_map(em); 7422 ret = false; 7423 goto out; 7424 } 7425 next_start = extent_map_end(em); 7426 free_extent_map(em); 7427 7428 read_lock(&map_tree->lock); 7429 em = lookup_extent_mapping(map_tree, next_start, 7430 (u64)(-1) - next_start); 7431 read_unlock(&map_tree->lock); 7432 } 7433 out: 7434 return ret; 7435 } 7436 7437 static void readahead_tree_node_children(struct extent_buffer *node) 7438 { 7439 int i; 7440 const int nr_items = btrfs_header_nritems(node); 7441 7442 for (i = 0; i < nr_items; i++) 7443 btrfs_readahead_node_child(node, i); 7444 } 7445 7446 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7447 { 7448 struct btrfs_root *root = fs_info->chunk_root; 7449 struct btrfs_path *path; 7450 struct extent_buffer *leaf; 7451 struct btrfs_key key; 7452 struct btrfs_key found_key; 7453 int ret; 7454 int slot; 7455 u64 total_dev = 0; 7456 u64 last_ra_node = 0; 7457 7458 path = btrfs_alloc_path(); 7459 if (!path) 7460 return -ENOMEM; 7461 7462 /* 7463 * uuid_mutex is needed only if we are mounting a sprout FS 7464 * otherwise we don't need it. 7465 */ 7466 mutex_lock(&uuid_mutex); 7467 7468 /* 7469 * It is possible for mount and umount to race in such a way that 7470 * we execute this code path, but open_fs_devices failed to clear 7471 * total_rw_bytes. We certainly want it cleared before reading the 7472 * device items, so clear it here. 7473 */ 7474 fs_info->fs_devices->total_rw_bytes = 0; 7475 7476 /* 7477 * Read all device items, and then all the chunk items. All 7478 * device items are found before any chunk item (their object id 7479 * is smaller than the lowest possible object id for a chunk 7480 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7481 */ 7482 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7483 key.offset = 0; 7484 key.type = 0; 7485 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7486 if (ret < 0) 7487 goto error; 7488 while (1) { 7489 struct extent_buffer *node; 7490 7491 leaf = path->nodes[0]; 7492 slot = path->slots[0]; 7493 if (slot >= btrfs_header_nritems(leaf)) { 7494 ret = btrfs_next_leaf(root, path); 7495 if (ret == 0) 7496 continue; 7497 if (ret < 0) 7498 goto error; 7499 break; 7500 } 7501 /* 7502 * The nodes on level 1 are not locked but we don't need to do 7503 * that during mount time as nothing else can access the tree 7504 */ 7505 node = path->nodes[1]; 7506 if (node) { 7507 if (last_ra_node != node->start) { 7508 readahead_tree_node_children(node); 7509 last_ra_node = node->start; 7510 } 7511 } 7512 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7513 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7514 struct btrfs_dev_item *dev_item; 7515 dev_item = btrfs_item_ptr(leaf, slot, 7516 struct btrfs_dev_item); 7517 ret = read_one_dev(leaf, dev_item); 7518 if (ret) 7519 goto error; 7520 total_dev++; 7521 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7522 struct btrfs_chunk *chunk; 7523 7524 /* 7525 * We are only called at mount time, so no need to take 7526 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7527 * we always lock first fs_info->chunk_mutex before 7528 * acquiring any locks on the chunk tree. This is a 7529 * requirement for chunk allocation, see the comment on 7530 * top of btrfs_chunk_alloc() for details. 7531 */ 7532 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7533 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7534 ret = read_one_chunk(&found_key, leaf, chunk); 7535 if (ret) 7536 goto error; 7537 } 7538 path->slots[0]++; 7539 } 7540 7541 /* 7542 * After loading chunk tree, we've got all device information, 7543 * do another round of validation checks. 7544 */ 7545 if (total_dev != fs_info->fs_devices->total_devices) { 7546 btrfs_err(fs_info, 7547 "super_num_devices %llu mismatch with num_devices %llu found here", 7548 btrfs_super_num_devices(fs_info->super_copy), 7549 total_dev); 7550 ret = -EINVAL; 7551 goto error; 7552 } 7553 if (btrfs_super_total_bytes(fs_info->super_copy) < 7554 fs_info->fs_devices->total_rw_bytes) { 7555 btrfs_err(fs_info, 7556 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7557 btrfs_super_total_bytes(fs_info->super_copy), 7558 fs_info->fs_devices->total_rw_bytes); 7559 ret = -EINVAL; 7560 goto error; 7561 } 7562 ret = 0; 7563 error: 7564 mutex_unlock(&uuid_mutex); 7565 7566 btrfs_free_path(path); 7567 return ret; 7568 } 7569 7570 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7571 { 7572 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7573 struct btrfs_device *device; 7574 7575 fs_devices->fs_info = fs_info; 7576 7577 mutex_lock(&fs_devices->device_list_mutex); 7578 list_for_each_entry(device, &fs_devices->devices, dev_list) 7579 device->fs_info = fs_info; 7580 7581 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7582 list_for_each_entry(device, &seed_devs->devices, dev_list) 7583 device->fs_info = fs_info; 7584 7585 seed_devs->fs_info = fs_info; 7586 } 7587 mutex_unlock(&fs_devices->device_list_mutex); 7588 } 7589 7590 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7591 const struct btrfs_dev_stats_item *ptr, 7592 int index) 7593 { 7594 u64 val; 7595 7596 read_extent_buffer(eb, &val, 7597 offsetof(struct btrfs_dev_stats_item, values) + 7598 ((unsigned long)ptr) + (index * sizeof(u64)), 7599 sizeof(val)); 7600 return val; 7601 } 7602 7603 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7604 struct btrfs_dev_stats_item *ptr, 7605 int index, u64 val) 7606 { 7607 write_extent_buffer(eb, &val, 7608 offsetof(struct btrfs_dev_stats_item, values) + 7609 ((unsigned long)ptr) + (index * sizeof(u64)), 7610 sizeof(val)); 7611 } 7612 7613 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7614 struct btrfs_path *path) 7615 { 7616 struct btrfs_dev_stats_item *ptr; 7617 struct extent_buffer *eb; 7618 struct btrfs_key key; 7619 int item_size; 7620 int i, ret, slot; 7621 7622 if (!device->fs_info->dev_root) 7623 return 0; 7624 7625 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7626 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7627 key.offset = device->devid; 7628 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7629 if (ret) { 7630 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7631 btrfs_dev_stat_set(device, i, 0); 7632 device->dev_stats_valid = 1; 7633 btrfs_release_path(path); 7634 return ret < 0 ? ret : 0; 7635 } 7636 slot = path->slots[0]; 7637 eb = path->nodes[0]; 7638 item_size = btrfs_item_size_nr(eb, slot); 7639 7640 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7641 7642 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7643 if (item_size >= (1 + i) * sizeof(__le64)) 7644 btrfs_dev_stat_set(device, i, 7645 btrfs_dev_stats_value(eb, ptr, i)); 7646 else 7647 btrfs_dev_stat_set(device, i, 0); 7648 } 7649 7650 device->dev_stats_valid = 1; 7651 btrfs_dev_stat_print_on_load(device); 7652 btrfs_release_path(path); 7653 7654 return 0; 7655 } 7656 7657 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7658 { 7659 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7660 struct btrfs_device *device; 7661 struct btrfs_path *path = NULL; 7662 int ret = 0; 7663 7664 path = btrfs_alloc_path(); 7665 if (!path) 7666 return -ENOMEM; 7667 7668 mutex_lock(&fs_devices->device_list_mutex); 7669 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7670 ret = btrfs_device_init_dev_stats(device, path); 7671 if (ret) 7672 goto out; 7673 } 7674 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7675 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7676 ret = btrfs_device_init_dev_stats(device, path); 7677 if (ret) 7678 goto out; 7679 } 7680 } 7681 out: 7682 mutex_unlock(&fs_devices->device_list_mutex); 7683 7684 btrfs_free_path(path); 7685 return ret; 7686 } 7687 7688 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7689 struct btrfs_device *device) 7690 { 7691 struct btrfs_fs_info *fs_info = trans->fs_info; 7692 struct btrfs_root *dev_root = fs_info->dev_root; 7693 struct btrfs_path *path; 7694 struct btrfs_key key; 7695 struct extent_buffer *eb; 7696 struct btrfs_dev_stats_item *ptr; 7697 int ret; 7698 int i; 7699 7700 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7701 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7702 key.offset = device->devid; 7703 7704 path = btrfs_alloc_path(); 7705 if (!path) 7706 return -ENOMEM; 7707 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7708 if (ret < 0) { 7709 btrfs_warn_in_rcu(fs_info, 7710 "error %d while searching for dev_stats item for device %s", 7711 ret, rcu_str_deref(device->name)); 7712 goto out; 7713 } 7714 7715 if (ret == 0 && 7716 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7717 /* need to delete old one and insert a new one */ 7718 ret = btrfs_del_item(trans, dev_root, path); 7719 if (ret != 0) { 7720 btrfs_warn_in_rcu(fs_info, 7721 "delete too small dev_stats item for device %s failed %d", 7722 rcu_str_deref(device->name), ret); 7723 goto out; 7724 } 7725 ret = 1; 7726 } 7727 7728 if (ret == 1) { 7729 /* need to insert a new item */ 7730 btrfs_release_path(path); 7731 ret = btrfs_insert_empty_item(trans, dev_root, path, 7732 &key, sizeof(*ptr)); 7733 if (ret < 0) { 7734 btrfs_warn_in_rcu(fs_info, 7735 "insert dev_stats item for device %s failed %d", 7736 rcu_str_deref(device->name), ret); 7737 goto out; 7738 } 7739 } 7740 7741 eb = path->nodes[0]; 7742 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7743 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7744 btrfs_set_dev_stats_value(eb, ptr, i, 7745 btrfs_dev_stat_read(device, i)); 7746 btrfs_mark_buffer_dirty(eb); 7747 7748 out: 7749 btrfs_free_path(path); 7750 return ret; 7751 } 7752 7753 /* 7754 * called from commit_transaction. Writes all changed device stats to disk. 7755 */ 7756 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7757 { 7758 struct btrfs_fs_info *fs_info = trans->fs_info; 7759 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7760 struct btrfs_device *device; 7761 int stats_cnt; 7762 int ret = 0; 7763 7764 mutex_lock(&fs_devices->device_list_mutex); 7765 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7766 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7767 if (!device->dev_stats_valid || stats_cnt == 0) 7768 continue; 7769 7770 7771 /* 7772 * There is a LOAD-LOAD control dependency between the value of 7773 * dev_stats_ccnt and updating the on-disk values which requires 7774 * reading the in-memory counters. Such control dependencies 7775 * require explicit read memory barriers. 7776 * 7777 * This memory barriers pairs with smp_mb__before_atomic in 7778 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7779 * barrier implied by atomic_xchg in 7780 * btrfs_dev_stats_read_and_reset 7781 */ 7782 smp_rmb(); 7783 7784 ret = update_dev_stat_item(trans, device); 7785 if (!ret) 7786 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7787 } 7788 mutex_unlock(&fs_devices->device_list_mutex); 7789 7790 return ret; 7791 } 7792 7793 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7794 { 7795 btrfs_dev_stat_inc(dev, index); 7796 btrfs_dev_stat_print_on_error(dev); 7797 } 7798 7799 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7800 { 7801 if (!dev->dev_stats_valid) 7802 return; 7803 btrfs_err_rl_in_rcu(dev->fs_info, 7804 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7805 rcu_str_deref(dev->name), 7806 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7807 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7808 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7809 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7810 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7811 } 7812 7813 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7814 { 7815 int i; 7816 7817 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7818 if (btrfs_dev_stat_read(dev, i) != 0) 7819 break; 7820 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7821 return; /* all values == 0, suppress message */ 7822 7823 btrfs_info_in_rcu(dev->fs_info, 7824 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7825 rcu_str_deref(dev->name), 7826 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7827 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7828 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7829 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7830 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7831 } 7832 7833 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7834 struct btrfs_ioctl_get_dev_stats *stats) 7835 { 7836 struct btrfs_device *dev; 7837 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7838 int i; 7839 7840 mutex_lock(&fs_devices->device_list_mutex); 7841 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL); 7842 mutex_unlock(&fs_devices->device_list_mutex); 7843 7844 if (!dev) { 7845 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7846 return -ENODEV; 7847 } else if (!dev->dev_stats_valid) { 7848 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7849 return -ENODEV; 7850 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7851 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7852 if (stats->nr_items > i) 7853 stats->values[i] = 7854 btrfs_dev_stat_read_and_reset(dev, i); 7855 else 7856 btrfs_dev_stat_set(dev, i, 0); 7857 } 7858 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7859 current->comm, task_pid_nr(current)); 7860 } else { 7861 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7862 if (stats->nr_items > i) 7863 stats->values[i] = btrfs_dev_stat_read(dev, i); 7864 } 7865 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7866 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7867 return 0; 7868 } 7869 7870 /* 7871 * Update the size and bytes used for each device where it changed. This is 7872 * delayed since we would otherwise get errors while writing out the 7873 * superblocks. 7874 * 7875 * Must be invoked during transaction commit. 7876 */ 7877 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7878 { 7879 struct btrfs_device *curr, *next; 7880 7881 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7882 7883 if (list_empty(&trans->dev_update_list)) 7884 return; 7885 7886 /* 7887 * We don't need the device_list_mutex here. This list is owned by the 7888 * transaction and the transaction must complete before the device is 7889 * released. 7890 */ 7891 mutex_lock(&trans->fs_info->chunk_mutex); 7892 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7893 post_commit_list) { 7894 list_del_init(&curr->post_commit_list); 7895 curr->commit_total_bytes = curr->disk_total_bytes; 7896 curr->commit_bytes_used = curr->bytes_used; 7897 } 7898 mutex_unlock(&trans->fs_info->chunk_mutex); 7899 } 7900 7901 /* 7902 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7903 */ 7904 int btrfs_bg_type_to_factor(u64 flags) 7905 { 7906 const int index = btrfs_bg_flags_to_raid_index(flags); 7907 7908 return btrfs_raid_array[index].ncopies; 7909 } 7910 7911 7912 7913 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7914 u64 chunk_offset, u64 devid, 7915 u64 physical_offset, u64 physical_len) 7916 { 7917 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7918 struct extent_map *em; 7919 struct map_lookup *map; 7920 struct btrfs_device *dev; 7921 u64 stripe_len; 7922 bool found = false; 7923 int ret = 0; 7924 int i; 7925 7926 read_lock(&em_tree->lock); 7927 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7928 read_unlock(&em_tree->lock); 7929 7930 if (!em) { 7931 btrfs_err(fs_info, 7932 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7933 physical_offset, devid); 7934 ret = -EUCLEAN; 7935 goto out; 7936 } 7937 7938 map = em->map_lookup; 7939 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 7940 if (physical_len != stripe_len) { 7941 btrfs_err(fs_info, 7942 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7943 physical_offset, devid, em->start, physical_len, 7944 stripe_len); 7945 ret = -EUCLEAN; 7946 goto out; 7947 } 7948 7949 for (i = 0; i < map->num_stripes; i++) { 7950 if (map->stripes[i].dev->devid == devid && 7951 map->stripes[i].physical == physical_offset) { 7952 found = true; 7953 if (map->verified_stripes >= map->num_stripes) { 7954 btrfs_err(fs_info, 7955 "too many dev extents for chunk %llu found", 7956 em->start); 7957 ret = -EUCLEAN; 7958 goto out; 7959 } 7960 map->verified_stripes++; 7961 break; 7962 } 7963 } 7964 if (!found) { 7965 btrfs_err(fs_info, 7966 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7967 physical_offset, devid); 7968 ret = -EUCLEAN; 7969 } 7970 7971 /* Make sure no dev extent is beyond device boundary */ 7972 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); 7973 if (!dev) { 7974 btrfs_err(fs_info, "failed to find devid %llu", devid); 7975 ret = -EUCLEAN; 7976 goto out; 7977 } 7978 7979 if (physical_offset + physical_len > dev->disk_total_bytes) { 7980 btrfs_err(fs_info, 7981 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7982 devid, physical_offset, physical_len, 7983 dev->disk_total_bytes); 7984 ret = -EUCLEAN; 7985 goto out; 7986 } 7987 7988 if (dev->zone_info) { 7989 u64 zone_size = dev->zone_info->zone_size; 7990 7991 if (!IS_ALIGNED(physical_offset, zone_size) || 7992 !IS_ALIGNED(physical_len, zone_size)) { 7993 btrfs_err(fs_info, 7994 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 7995 devid, physical_offset, physical_len); 7996 ret = -EUCLEAN; 7997 goto out; 7998 } 7999 } 8000 8001 out: 8002 free_extent_map(em); 8003 return ret; 8004 } 8005 8006 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8007 { 8008 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8009 struct extent_map *em; 8010 struct rb_node *node; 8011 int ret = 0; 8012 8013 read_lock(&em_tree->lock); 8014 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8015 em = rb_entry(node, struct extent_map, rb_node); 8016 if (em->map_lookup->num_stripes != 8017 em->map_lookup->verified_stripes) { 8018 btrfs_err(fs_info, 8019 "chunk %llu has missing dev extent, have %d expect %d", 8020 em->start, em->map_lookup->verified_stripes, 8021 em->map_lookup->num_stripes); 8022 ret = -EUCLEAN; 8023 goto out; 8024 } 8025 } 8026 out: 8027 read_unlock(&em_tree->lock); 8028 return ret; 8029 } 8030 8031 /* 8032 * Ensure that all dev extents are mapped to correct chunk, otherwise 8033 * later chunk allocation/free would cause unexpected behavior. 8034 * 8035 * NOTE: This will iterate through the whole device tree, which should be of 8036 * the same size level as the chunk tree. This slightly increases mount time. 8037 */ 8038 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8039 { 8040 struct btrfs_path *path; 8041 struct btrfs_root *root = fs_info->dev_root; 8042 struct btrfs_key key; 8043 u64 prev_devid = 0; 8044 u64 prev_dev_ext_end = 0; 8045 int ret = 0; 8046 8047 /* 8048 * We don't have a dev_root because we mounted with ignorebadroots and 8049 * failed to load the root, so we want to skip the verification in this 8050 * case for sure. 8051 * 8052 * However if the dev root is fine, but the tree itself is corrupted 8053 * we'd still fail to mount. This verification is only to make sure 8054 * writes can happen safely, so instead just bypass this check 8055 * completely in the case of IGNOREBADROOTS. 8056 */ 8057 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8058 return 0; 8059 8060 key.objectid = 1; 8061 key.type = BTRFS_DEV_EXTENT_KEY; 8062 key.offset = 0; 8063 8064 path = btrfs_alloc_path(); 8065 if (!path) 8066 return -ENOMEM; 8067 8068 path->reada = READA_FORWARD; 8069 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8070 if (ret < 0) 8071 goto out; 8072 8073 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8074 ret = btrfs_next_leaf(root, path); 8075 if (ret < 0) 8076 goto out; 8077 /* No dev extents at all? Not good */ 8078 if (ret > 0) { 8079 ret = -EUCLEAN; 8080 goto out; 8081 } 8082 } 8083 while (1) { 8084 struct extent_buffer *leaf = path->nodes[0]; 8085 struct btrfs_dev_extent *dext; 8086 int slot = path->slots[0]; 8087 u64 chunk_offset; 8088 u64 physical_offset; 8089 u64 physical_len; 8090 u64 devid; 8091 8092 btrfs_item_key_to_cpu(leaf, &key, slot); 8093 if (key.type != BTRFS_DEV_EXTENT_KEY) 8094 break; 8095 devid = key.objectid; 8096 physical_offset = key.offset; 8097 8098 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8099 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8100 physical_len = btrfs_dev_extent_length(leaf, dext); 8101 8102 /* Check if this dev extent overlaps with the previous one */ 8103 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8104 btrfs_err(fs_info, 8105 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8106 devid, physical_offset, prev_dev_ext_end); 8107 ret = -EUCLEAN; 8108 goto out; 8109 } 8110 8111 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8112 physical_offset, physical_len); 8113 if (ret < 0) 8114 goto out; 8115 prev_devid = devid; 8116 prev_dev_ext_end = physical_offset + physical_len; 8117 8118 ret = btrfs_next_item(root, path); 8119 if (ret < 0) 8120 goto out; 8121 if (ret > 0) { 8122 ret = 0; 8123 break; 8124 } 8125 } 8126 8127 /* Ensure all chunks have corresponding dev extents */ 8128 ret = verify_chunk_dev_extent_mapping(fs_info); 8129 out: 8130 btrfs_free_path(path); 8131 return ret; 8132 } 8133 8134 /* 8135 * Check whether the given block group or device is pinned by any inode being 8136 * used as a swapfile. 8137 */ 8138 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8139 { 8140 struct btrfs_swapfile_pin *sp; 8141 struct rb_node *node; 8142 8143 spin_lock(&fs_info->swapfile_pins_lock); 8144 node = fs_info->swapfile_pins.rb_node; 8145 while (node) { 8146 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8147 if (ptr < sp->ptr) 8148 node = node->rb_left; 8149 else if (ptr > sp->ptr) 8150 node = node->rb_right; 8151 else 8152 break; 8153 } 8154 spin_unlock(&fs_info->swapfile_pins_lock); 8155 return node != NULL; 8156 } 8157 8158 static int relocating_repair_kthread(void *data) 8159 { 8160 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8161 struct btrfs_fs_info *fs_info = cache->fs_info; 8162 u64 target; 8163 int ret = 0; 8164 8165 target = cache->start; 8166 btrfs_put_block_group(cache); 8167 8168 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8169 btrfs_info(fs_info, 8170 "zoned: skip relocating block group %llu to repair: EBUSY", 8171 target); 8172 return -EBUSY; 8173 } 8174 8175 mutex_lock(&fs_info->reclaim_bgs_lock); 8176 8177 /* Ensure block group still exists */ 8178 cache = btrfs_lookup_block_group(fs_info, target); 8179 if (!cache) 8180 goto out; 8181 8182 if (!cache->relocating_repair) 8183 goto out; 8184 8185 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8186 if (ret < 0) 8187 goto out; 8188 8189 btrfs_info(fs_info, 8190 "zoned: relocating block group %llu to repair IO failure", 8191 target); 8192 ret = btrfs_relocate_chunk(fs_info, target); 8193 8194 out: 8195 if (cache) 8196 btrfs_put_block_group(cache); 8197 mutex_unlock(&fs_info->reclaim_bgs_lock); 8198 btrfs_exclop_finish(fs_info); 8199 8200 return ret; 8201 } 8202 8203 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8204 { 8205 struct btrfs_block_group *cache; 8206 8207 /* Do not attempt to repair in degraded state */ 8208 if (btrfs_test_opt(fs_info, DEGRADED)) 8209 return 0; 8210 8211 cache = btrfs_lookup_block_group(fs_info, logical); 8212 if (!cache) 8213 return 0; 8214 8215 spin_lock(&cache->lock); 8216 if (cache->relocating_repair) { 8217 spin_unlock(&cache->lock); 8218 btrfs_put_block_group(cache); 8219 return 0; 8220 } 8221 cache->relocating_repair = 1; 8222 spin_unlock(&cache->lock); 8223 8224 kthread_run(relocating_repair_kthread, cache, 8225 "btrfs-relocating-repair"); 8226 8227 return 0; 8228 } 8229