1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include <linux/namei.h> 18 #include "misc.h" 19 #include "ctree.h" 20 #include "extent_map.h" 21 #include "disk-io.h" 22 #include "transaction.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "async-thread.h" 27 #include "check-integrity.h" 28 #include "rcu-string.h" 29 #include "dev-replace.h" 30 #include "sysfs.h" 31 #include "tree-checker.h" 32 #include "space-info.h" 33 #include "block-group.h" 34 #include "discard.h" 35 #include "zoned.h" 36 37 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 38 [BTRFS_RAID_RAID10] = { 39 .sub_stripes = 2, 40 .dev_stripes = 1, 41 .devs_max = 0, /* 0 == as many as possible */ 42 .devs_min = 2, 43 .tolerated_failures = 1, 44 .devs_increment = 2, 45 .ncopies = 2, 46 .nparity = 0, 47 .raid_name = "raid10", 48 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 49 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 50 }, 51 [BTRFS_RAID_RAID1] = { 52 .sub_stripes = 1, 53 .dev_stripes = 1, 54 .devs_max = 2, 55 .devs_min = 2, 56 .tolerated_failures = 1, 57 .devs_increment = 2, 58 .ncopies = 2, 59 .nparity = 0, 60 .raid_name = "raid1", 61 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 62 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 63 }, 64 [BTRFS_RAID_RAID1C3] = { 65 .sub_stripes = 1, 66 .dev_stripes = 1, 67 .devs_max = 3, 68 .devs_min = 3, 69 .tolerated_failures = 2, 70 .devs_increment = 3, 71 .ncopies = 3, 72 .nparity = 0, 73 .raid_name = "raid1c3", 74 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 75 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 76 }, 77 [BTRFS_RAID_RAID1C4] = { 78 .sub_stripes = 1, 79 .dev_stripes = 1, 80 .devs_max = 4, 81 .devs_min = 4, 82 .tolerated_failures = 3, 83 .devs_increment = 4, 84 .ncopies = 4, 85 .nparity = 0, 86 .raid_name = "raid1c4", 87 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 88 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 89 }, 90 [BTRFS_RAID_DUP] = { 91 .sub_stripes = 1, 92 .dev_stripes = 2, 93 .devs_max = 1, 94 .devs_min = 1, 95 .tolerated_failures = 0, 96 .devs_increment = 1, 97 .ncopies = 2, 98 .nparity = 0, 99 .raid_name = "dup", 100 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 101 .mindev_error = 0, 102 }, 103 [BTRFS_RAID_RAID0] = { 104 .sub_stripes = 1, 105 .dev_stripes = 1, 106 .devs_max = 0, 107 .devs_min = 1, 108 .tolerated_failures = 0, 109 .devs_increment = 1, 110 .ncopies = 1, 111 .nparity = 0, 112 .raid_name = "raid0", 113 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 114 .mindev_error = 0, 115 }, 116 [BTRFS_RAID_SINGLE] = { 117 .sub_stripes = 1, 118 .dev_stripes = 1, 119 .devs_max = 1, 120 .devs_min = 1, 121 .tolerated_failures = 0, 122 .devs_increment = 1, 123 .ncopies = 1, 124 .nparity = 0, 125 .raid_name = "single", 126 .bg_flag = 0, 127 .mindev_error = 0, 128 }, 129 [BTRFS_RAID_RAID5] = { 130 .sub_stripes = 1, 131 .dev_stripes = 1, 132 .devs_max = 0, 133 .devs_min = 2, 134 .tolerated_failures = 1, 135 .devs_increment = 1, 136 .ncopies = 1, 137 .nparity = 1, 138 .raid_name = "raid5", 139 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 140 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 141 }, 142 [BTRFS_RAID_RAID6] = { 143 .sub_stripes = 1, 144 .dev_stripes = 1, 145 .devs_max = 0, 146 .devs_min = 3, 147 .tolerated_failures = 2, 148 .devs_increment = 1, 149 .ncopies = 1, 150 .nparity = 2, 151 .raid_name = "raid6", 152 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 153 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 154 }, 155 }; 156 157 /* 158 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 159 * can be used as index to access btrfs_raid_array[]. 160 */ 161 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 162 { 163 if (flags & BTRFS_BLOCK_GROUP_RAID10) 164 return BTRFS_RAID_RAID10; 165 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 166 return BTRFS_RAID_RAID1; 167 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) 168 return BTRFS_RAID_RAID1C3; 169 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) 170 return BTRFS_RAID_RAID1C4; 171 else if (flags & BTRFS_BLOCK_GROUP_DUP) 172 return BTRFS_RAID_DUP; 173 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 174 return BTRFS_RAID_RAID0; 175 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 176 return BTRFS_RAID_RAID5; 177 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 178 return BTRFS_RAID_RAID6; 179 180 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 181 } 182 183 const char *btrfs_bg_type_to_raid_name(u64 flags) 184 { 185 const int index = btrfs_bg_flags_to_raid_index(flags); 186 187 if (index >= BTRFS_NR_RAID_TYPES) 188 return NULL; 189 190 return btrfs_raid_array[index].raid_name; 191 } 192 193 /* 194 * Fill @buf with textual description of @bg_flags, no more than @size_buf 195 * bytes including terminating null byte. 196 */ 197 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 198 { 199 int i; 200 int ret; 201 char *bp = buf; 202 u64 flags = bg_flags; 203 u32 size_bp = size_buf; 204 205 if (!flags) { 206 strcpy(bp, "NONE"); 207 return; 208 } 209 210 #define DESCRIBE_FLAG(flag, desc) \ 211 do { \ 212 if (flags & (flag)) { \ 213 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 214 if (ret < 0 || ret >= size_bp) \ 215 goto out_overflow; \ 216 size_bp -= ret; \ 217 bp += ret; \ 218 flags &= ~(flag); \ 219 } \ 220 } while (0) 221 222 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 225 226 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 227 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 228 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 229 btrfs_raid_array[i].raid_name); 230 #undef DESCRIBE_FLAG 231 232 if (flags) { 233 ret = snprintf(bp, size_bp, "0x%llx|", flags); 234 size_bp -= ret; 235 } 236 237 if (size_bp < size_buf) 238 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 239 240 /* 241 * The text is trimmed, it's up to the caller to provide sufficiently 242 * large buffer 243 */ 244 out_overflow:; 245 } 246 247 static int init_first_rw_device(struct btrfs_trans_handle *trans); 248 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 249 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 252 enum btrfs_map_op op, 253 u64 logical, u64 *length, 254 struct btrfs_io_context **bioc_ret, 255 int mirror_num, int need_raid_map); 256 257 /* 258 * Device locking 259 * ============== 260 * 261 * There are several mutexes that protect manipulation of devices and low-level 262 * structures like chunks but not block groups, extents or files 263 * 264 * uuid_mutex (global lock) 265 * ------------------------ 266 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 267 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 268 * device) or requested by the device= mount option 269 * 270 * the mutex can be very coarse and can cover long-running operations 271 * 272 * protects: updates to fs_devices counters like missing devices, rw devices, 273 * seeding, structure cloning, opening/closing devices at mount/umount time 274 * 275 * global::fs_devs - add, remove, updates to the global list 276 * 277 * does not protect: manipulation of the fs_devices::devices list in general 278 * but in mount context it could be used to exclude list modifications by eg. 279 * scan ioctl 280 * 281 * btrfs_device::name - renames (write side), read is RCU 282 * 283 * fs_devices::device_list_mutex (per-fs, with RCU) 284 * ------------------------------------------------ 285 * protects updates to fs_devices::devices, ie. adding and deleting 286 * 287 * simple list traversal with read-only actions can be done with RCU protection 288 * 289 * may be used to exclude some operations from running concurrently without any 290 * modifications to the list (see write_all_supers) 291 * 292 * Is not required at mount and close times, because our device list is 293 * protected by the uuid_mutex at that point. 294 * 295 * balance_mutex 296 * ------------- 297 * protects balance structures (status, state) and context accessed from 298 * several places (internally, ioctl) 299 * 300 * chunk_mutex 301 * ----------- 302 * protects chunks, adding or removing during allocation, trim or when a new 303 * device is added/removed. Additionally it also protects post_commit_list of 304 * individual devices, since they can be added to the transaction's 305 * post_commit_list only with chunk_mutex held. 306 * 307 * cleaner_mutex 308 * ------------- 309 * a big lock that is held by the cleaner thread and prevents running subvolume 310 * cleaning together with relocation or delayed iputs 311 * 312 * 313 * Lock nesting 314 * ============ 315 * 316 * uuid_mutex 317 * device_list_mutex 318 * chunk_mutex 319 * balance_mutex 320 * 321 * 322 * Exclusive operations 323 * ==================== 324 * 325 * Maintains the exclusivity of the following operations that apply to the 326 * whole filesystem and cannot run in parallel. 327 * 328 * - Balance (*) 329 * - Device add 330 * - Device remove 331 * - Device replace (*) 332 * - Resize 333 * 334 * The device operations (as above) can be in one of the following states: 335 * 336 * - Running state 337 * - Paused state 338 * - Completed state 339 * 340 * Only device operations marked with (*) can go into the Paused state for the 341 * following reasons: 342 * 343 * - ioctl (only Balance can be Paused through ioctl) 344 * - filesystem remounted as read-only 345 * - filesystem unmounted and mounted as read-only 346 * - system power-cycle and filesystem mounted as read-only 347 * - filesystem or device errors leading to forced read-only 348 * 349 * The status of exclusive operation is set and cleared atomically. 350 * During the course of Paused state, fs_info::exclusive_operation remains set. 351 * A device operation in Paused or Running state can be canceled or resumed 352 * either by ioctl (Balance only) or when remounted as read-write. 353 * The exclusive status is cleared when the device operation is canceled or 354 * completed. 355 */ 356 357 DEFINE_MUTEX(uuid_mutex); 358 static LIST_HEAD(fs_uuids); 359 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 360 { 361 return &fs_uuids; 362 } 363 364 /* 365 * alloc_fs_devices - allocate struct btrfs_fs_devices 366 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 367 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 368 * 369 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 370 * The returned struct is not linked onto any lists and can be destroyed with 371 * kfree() right away. 372 */ 373 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 374 const u8 *metadata_fsid) 375 { 376 struct btrfs_fs_devices *fs_devs; 377 378 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 379 if (!fs_devs) 380 return ERR_PTR(-ENOMEM); 381 382 mutex_init(&fs_devs->device_list_mutex); 383 384 INIT_LIST_HEAD(&fs_devs->devices); 385 INIT_LIST_HEAD(&fs_devs->alloc_list); 386 INIT_LIST_HEAD(&fs_devs->fs_list); 387 INIT_LIST_HEAD(&fs_devs->seed_list); 388 if (fsid) 389 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 390 391 if (metadata_fsid) 392 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 393 else if (fsid) 394 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 395 396 return fs_devs; 397 } 398 399 void btrfs_free_device(struct btrfs_device *device) 400 { 401 WARN_ON(!list_empty(&device->post_commit_list)); 402 rcu_string_free(device->name); 403 extent_io_tree_release(&device->alloc_state); 404 bio_put(device->flush_bio); 405 btrfs_destroy_dev_zone_info(device); 406 kfree(device); 407 } 408 409 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 410 { 411 struct btrfs_device *device; 412 WARN_ON(fs_devices->opened); 413 while (!list_empty(&fs_devices->devices)) { 414 device = list_entry(fs_devices->devices.next, 415 struct btrfs_device, dev_list); 416 list_del(&device->dev_list); 417 btrfs_free_device(device); 418 } 419 kfree(fs_devices); 420 } 421 422 void __exit btrfs_cleanup_fs_uuids(void) 423 { 424 struct btrfs_fs_devices *fs_devices; 425 426 while (!list_empty(&fs_uuids)) { 427 fs_devices = list_entry(fs_uuids.next, 428 struct btrfs_fs_devices, fs_list); 429 list_del(&fs_devices->fs_list); 430 free_fs_devices(fs_devices); 431 } 432 } 433 434 static noinline struct btrfs_fs_devices *find_fsid( 435 const u8 *fsid, const u8 *metadata_fsid) 436 { 437 struct btrfs_fs_devices *fs_devices; 438 439 ASSERT(fsid); 440 441 /* Handle non-split brain cases */ 442 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 443 if (metadata_fsid) { 444 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 445 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 446 BTRFS_FSID_SIZE) == 0) 447 return fs_devices; 448 } else { 449 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 450 return fs_devices; 451 } 452 } 453 return NULL; 454 } 455 456 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 457 struct btrfs_super_block *disk_super) 458 { 459 460 struct btrfs_fs_devices *fs_devices; 461 462 /* 463 * Handle scanned device having completed its fsid change but 464 * belonging to a fs_devices that was created by first scanning 465 * a device which didn't have its fsid/metadata_uuid changed 466 * at all and the CHANGING_FSID_V2 flag set. 467 */ 468 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 469 if (fs_devices->fsid_change && 470 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 471 BTRFS_FSID_SIZE) == 0 && 472 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 473 BTRFS_FSID_SIZE) == 0) { 474 return fs_devices; 475 } 476 } 477 /* 478 * Handle scanned device having completed its fsid change but 479 * belonging to a fs_devices that was created by a device that 480 * has an outdated pair of fsid/metadata_uuid and 481 * CHANGING_FSID_V2 flag set. 482 */ 483 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 484 if (fs_devices->fsid_change && 485 memcmp(fs_devices->metadata_uuid, 486 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 487 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 488 BTRFS_FSID_SIZE) == 0) { 489 return fs_devices; 490 } 491 } 492 493 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 494 } 495 496 497 static int 498 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 499 int flush, struct block_device **bdev, 500 struct btrfs_super_block **disk_super) 501 { 502 int ret; 503 504 *bdev = blkdev_get_by_path(device_path, flags, holder); 505 506 if (IS_ERR(*bdev)) { 507 ret = PTR_ERR(*bdev); 508 goto error; 509 } 510 511 if (flush) 512 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 513 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 514 if (ret) { 515 blkdev_put(*bdev, flags); 516 goto error; 517 } 518 invalidate_bdev(*bdev); 519 *disk_super = btrfs_read_dev_super(*bdev); 520 if (IS_ERR(*disk_super)) { 521 ret = PTR_ERR(*disk_super); 522 blkdev_put(*bdev, flags); 523 goto error; 524 } 525 526 return 0; 527 528 error: 529 *bdev = NULL; 530 return ret; 531 } 532 533 static bool device_path_matched(const char *path, struct btrfs_device *device) 534 { 535 int found; 536 537 rcu_read_lock(); 538 found = strcmp(rcu_str_deref(device->name), path); 539 rcu_read_unlock(); 540 541 return found == 0; 542 } 543 544 /* 545 * Search and remove all stale (devices which are not mounted) devices. 546 * When both inputs are NULL, it will search and release all stale devices. 547 * path: Optional. When provided will it release all unmounted devices 548 * matching this path only. 549 * skip_dev: Optional. Will skip this device when searching for the stale 550 * devices. 551 * Return: 0 for success or if @path is NULL. 552 * -EBUSY if @path is a mounted device. 553 * -ENOENT if @path does not match any device in the list. 554 */ 555 static int btrfs_free_stale_devices(const char *path, 556 struct btrfs_device *skip_device) 557 { 558 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 559 struct btrfs_device *device, *tmp_device; 560 int ret = 0; 561 562 lockdep_assert_held(&uuid_mutex); 563 564 if (path) 565 ret = -ENOENT; 566 567 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 568 569 mutex_lock(&fs_devices->device_list_mutex); 570 list_for_each_entry_safe(device, tmp_device, 571 &fs_devices->devices, dev_list) { 572 if (skip_device && skip_device == device) 573 continue; 574 if (path && !device->name) 575 continue; 576 if (path && !device_path_matched(path, device)) 577 continue; 578 if (fs_devices->opened) { 579 /* for an already deleted device return 0 */ 580 if (path && ret != 0) 581 ret = -EBUSY; 582 break; 583 } 584 585 /* delete the stale device */ 586 fs_devices->num_devices--; 587 list_del(&device->dev_list); 588 btrfs_free_device(device); 589 590 ret = 0; 591 } 592 mutex_unlock(&fs_devices->device_list_mutex); 593 594 if (fs_devices->num_devices == 0) { 595 btrfs_sysfs_remove_fsid(fs_devices); 596 list_del(&fs_devices->fs_list); 597 free_fs_devices(fs_devices); 598 } 599 } 600 601 return ret; 602 } 603 604 /* 605 * This is only used on mount, and we are protected from competing things 606 * messing with our fs_devices by the uuid_mutex, thus we do not need the 607 * fs_devices->device_list_mutex here. 608 */ 609 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 610 struct btrfs_device *device, fmode_t flags, 611 void *holder) 612 { 613 struct request_queue *q; 614 struct block_device *bdev; 615 struct btrfs_super_block *disk_super; 616 u64 devid; 617 int ret; 618 619 if (device->bdev) 620 return -EINVAL; 621 if (!device->name) 622 return -EINVAL; 623 624 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 625 &bdev, &disk_super); 626 if (ret) 627 return ret; 628 629 devid = btrfs_stack_device_id(&disk_super->dev_item); 630 if (devid != device->devid) 631 goto error_free_page; 632 633 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 634 goto error_free_page; 635 636 device->generation = btrfs_super_generation(disk_super); 637 638 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 639 if (btrfs_super_incompat_flags(disk_super) & 640 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 641 pr_err( 642 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 643 goto error_free_page; 644 } 645 646 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 647 fs_devices->seeding = true; 648 } else { 649 if (bdev_read_only(bdev)) 650 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 651 else 652 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 653 } 654 655 q = bdev_get_queue(bdev); 656 if (!blk_queue_nonrot(q)) 657 fs_devices->rotating = true; 658 659 device->bdev = bdev; 660 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 661 device->mode = flags; 662 663 fs_devices->open_devices++; 664 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 665 device->devid != BTRFS_DEV_REPLACE_DEVID) { 666 fs_devices->rw_devices++; 667 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 668 } 669 btrfs_release_disk_super(disk_super); 670 671 return 0; 672 673 error_free_page: 674 btrfs_release_disk_super(disk_super); 675 blkdev_put(bdev, flags); 676 677 return -EINVAL; 678 } 679 680 /* 681 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 682 * being created with a disk that has already completed its fsid change. Such 683 * disk can belong to an fs which has its FSID changed or to one which doesn't. 684 * Handle both cases here. 685 */ 686 static struct btrfs_fs_devices *find_fsid_inprogress( 687 struct btrfs_super_block *disk_super) 688 { 689 struct btrfs_fs_devices *fs_devices; 690 691 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 692 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 693 BTRFS_FSID_SIZE) != 0 && 694 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 695 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 696 return fs_devices; 697 } 698 } 699 700 return find_fsid(disk_super->fsid, NULL); 701 } 702 703 704 static struct btrfs_fs_devices *find_fsid_changed( 705 struct btrfs_super_block *disk_super) 706 { 707 struct btrfs_fs_devices *fs_devices; 708 709 /* 710 * Handles the case where scanned device is part of an fs that had 711 * multiple successful changes of FSID but currently device didn't 712 * observe it. Meaning our fsid will be different than theirs. We need 713 * to handle two subcases : 714 * 1 - The fs still continues to have different METADATA/FSID uuids. 715 * 2 - The fs is switched back to its original FSID (METADATA/FSID 716 * are equal). 717 */ 718 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 719 /* Changed UUIDs */ 720 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 721 BTRFS_FSID_SIZE) != 0 && 722 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 723 BTRFS_FSID_SIZE) == 0 && 724 memcmp(fs_devices->fsid, disk_super->fsid, 725 BTRFS_FSID_SIZE) != 0) 726 return fs_devices; 727 728 /* Unchanged UUIDs */ 729 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 730 BTRFS_FSID_SIZE) == 0 && 731 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 732 BTRFS_FSID_SIZE) == 0) 733 return fs_devices; 734 } 735 736 return NULL; 737 } 738 739 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 740 struct btrfs_super_block *disk_super) 741 { 742 struct btrfs_fs_devices *fs_devices; 743 744 /* 745 * Handle the case where the scanned device is part of an fs whose last 746 * metadata UUID change reverted it to the original FSID. At the same 747 * time * fs_devices was first created by another constitutent device 748 * which didn't fully observe the operation. This results in an 749 * btrfs_fs_devices created with metadata/fsid different AND 750 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 751 * fs_devices equal to the FSID of the disk. 752 */ 753 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 754 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 755 BTRFS_FSID_SIZE) != 0 && 756 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 757 BTRFS_FSID_SIZE) == 0 && 758 fs_devices->fsid_change) 759 return fs_devices; 760 } 761 762 return NULL; 763 } 764 /* 765 * Add new device to list of registered devices 766 * 767 * Returns: 768 * device pointer which was just added or updated when successful 769 * error pointer when failed 770 */ 771 static noinline struct btrfs_device *device_list_add(const char *path, 772 struct btrfs_super_block *disk_super, 773 bool *new_device_added) 774 { 775 struct btrfs_device *device; 776 struct btrfs_fs_devices *fs_devices = NULL; 777 struct rcu_string *name; 778 u64 found_transid = btrfs_super_generation(disk_super); 779 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 780 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 781 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 782 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 783 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 784 785 if (fsid_change_in_progress) { 786 if (!has_metadata_uuid) 787 fs_devices = find_fsid_inprogress(disk_super); 788 else 789 fs_devices = find_fsid_changed(disk_super); 790 } else if (has_metadata_uuid) { 791 fs_devices = find_fsid_with_metadata_uuid(disk_super); 792 } else { 793 fs_devices = find_fsid_reverted_metadata(disk_super); 794 if (!fs_devices) 795 fs_devices = find_fsid(disk_super->fsid, NULL); 796 } 797 798 799 if (!fs_devices) { 800 if (has_metadata_uuid) 801 fs_devices = alloc_fs_devices(disk_super->fsid, 802 disk_super->metadata_uuid); 803 else 804 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 805 806 if (IS_ERR(fs_devices)) 807 return ERR_CAST(fs_devices); 808 809 fs_devices->fsid_change = fsid_change_in_progress; 810 811 mutex_lock(&fs_devices->device_list_mutex); 812 list_add(&fs_devices->fs_list, &fs_uuids); 813 814 device = NULL; 815 } else { 816 struct btrfs_dev_lookup_args args = { 817 .devid = devid, 818 .uuid = disk_super->dev_item.uuid, 819 }; 820 821 mutex_lock(&fs_devices->device_list_mutex); 822 device = btrfs_find_device(fs_devices, &args); 823 824 /* 825 * If this disk has been pulled into an fs devices created by 826 * a device which had the CHANGING_FSID_V2 flag then replace the 827 * metadata_uuid/fsid values of the fs_devices. 828 */ 829 if (fs_devices->fsid_change && 830 found_transid > fs_devices->latest_generation) { 831 memcpy(fs_devices->fsid, disk_super->fsid, 832 BTRFS_FSID_SIZE); 833 834 if (has_metadata_uuid) 835 memcpy(fs_devices->metadata_uuid, 836 disk_super->metadata_uuid, 837 BTRFS_FSID_SIZE); 838 else 839 memcpy(fs_devices->metadata_uuid, 840 disk_super->fsid, BTRFS_FSID_SIZE); 841 842 fs_devices->fsid_change = false; 843 } 844 } 845 846 if (!device) { 847 if (fs_devices->opened) { 848 mutex_unlock(&fs_devices->device_list_mutex); 849 return ERR_PTR(-EBUSY); 850 } 851 852 device = btrfs_alloc_device(NULL, &devid, 853 disk_super->dev_item.uuid); 854 if (IS_ERR(device)) { 855 mutex_unlock(&fs_devices->device_list_mutex); 856 /* we can safely leave the fs_devices entry around */ 857 return device; 858 } 859 860 name = rcu_string_strdup(path, GFP_NOFS); 861 if (!name) { 862 btrfs_free_device(device); 863 mutex_unlock(&fs_devices->device_list_mutex); 864 return ERR_PTR(-ENOMEM); 865 } 866 rcu_assign_pointer(device->name, name); 867 868 list_add_rcu(&device->dev_list, &fs_devices->devices); 869 fs_devices->num_devices++; 870 871 device->fs_devices = fs_devices; 872 *new_device_added = true; 873 874 if (disk_super->label[0]) 875 pr_info( 876 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 877 disk_super->label, devid, found_transid, path, 878 current->comm, task_pid_nr(current)); 879 else 880 pr_info( 881 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 882 disk_super->fsid, devid, found_transid, path, 883 current->comm, task_pid_nr(current)); 884 885 } else if (!device->name || strcmp(device->name->str, path)) { 886 /* 887 * When FS is already mounted. 888 * 1. If you are here and if the device->name is NULL that 889 * means this device was missing at time of FS mount. 890 * 2. If you are here and if the device->name is different 891 * from 'path' that means either 892 * a. The same device disappeared and reappeared with 893 * different name. or 894 * b. The missing-disk-which-was-replaced, has 895 * reappeared now. 896 * 897 * We must allow 1 and 2a above. But 2b would be a spurious 898 * and unintentional. 899 * 900 * Further in case of 1 and 2a above, the disk at 'path' 901 * would have missed some transaction when it was away and 902 * in case of 2a the stale bdev has to be updated as well. 903 * 2b must not be allowed at all time. 904 */ 905 906 /* 907 * For now, we do allow update to btrfs_fs_device through the 908 * btrfs dev scan cli after FS has been mounted. We're still 909 * tracking a problem where systems fail mount by subvolume id 910 * when we reject replacement on a mounted FS. 911 */ 912 if (!fs_devices->opened && found_transid < device->generation) { 913 /* 914 * That is if the FS is _not_ mounted and if you 915 * are here, that means there is more than one 916 * disk with same uuid and devid.We keep the one 917 * with larger generation number or the last-in if 918 * generation are equal. 919 */ 920 mutex_unlock(&fs_devices->device_list_mutex); 921 return ERR_PTR(-EEXIST); 922 } 923 924 /* 925 * We are going to replace the device path for a given devid, 926 * make sure it's the same device if the device is mounted 927 */ 928 if (device->bdev) { 929 int error; 930 dev_t path_dev; 931 932 error = lookup_bdev(path, &path_dev); 933 if (error) { 934 mutex_unlock(&fs_devices->device_list_mutex); 935 return ERR_PTR(error); 936 } 937 938 if (device->bdev->bd_dev != path_dev) { 939 mutex_unlock(&fs_devices->device_list_mutex); 940 /* 941 * device->fs_info may not be reliable here, so 942 * pass in a NULL instead. This avoids a 943 * possible use-after-free when the fs_info and 944 * fs_info->sb are already torn down. 945 */ 946 btrfs_warn_in_rcu(NULL, 947 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 948 path, devid, found_transid, 949 current->comm, 950 task_pid_nr(current)); 951 return ERR_PTR(-EEXIST); 952 } 953 btrfs_info_in_rcu(device->fs_info, 954 "devid %llu device path %s changed to %s scanned by %s (%d)", 955 devid, rcu_str_deref(device->name), 956 path, current->comm, 957 task_pid_nr(current)); 958 } 959 960 name = rcu_string_strdup(path, GFP_NOFS); 961 if (!name) { 962 mutex_unlock(&fs_devices->device_list_mutex); 963 return ERR_PTR(-ENOMEM); 964 } 965 rcu_string_free(device->name); 966 rcu_assign_pointer(device->name, name); 967 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 968 fs_devices->missing_devices--; 969 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 970 } 971 } 972 973 /* 974 * Unmount does not free the btrfs_device struct but would zero 975 * generation along with most of the other members. So just update 976 * it back. We need it to pick the disk with largest generation 977 * (as above). 978 */ 979 if (!fs_devices->opened) { 980 device->generation = found_transid; 981 fs_devices->latest_generation = max_t(u64, found_transid, 982 fs_devices->latest_generation); 983 } 984 985 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 986 987 mutex_unlock(&fs_devices->device_list_mutex); 988 return device; 989 } 990 991 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 992 { 993 struct btrfs_fs_devices *fs_devices; 994 struct btrfs_device *device; 995 struct btrfs_device *orig_dev; 996 int ret = 0; 997 998 lockdep_assert_held(&uuid_mutex); 999 1000 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1001 if (IS_ERR(fs_devices)) 1002 return fs_devices; 1003 1004 fs_devices->total_devices = orig->total_devices; 1005 1006 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1007 struct rcu_string *name; 1008 1009 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1010 orig_dev->uuid); 1011 if (IS_ERR(device)) { 1012 ret = PTR_ERR(device); 1013 goto error; 1014 } 1015 1016 /* 1017 * This is ok to do without rcu read locked because we hold the 1018 * uuid mutex so nothing we touch in here is going to disappear. 1019 */ 1020 if (orig_dev->name) { 1021 name = rcu_string_strdup(orig_dev->name->str, 1022 GFP_KERNEL); 1023 if (!name) { 1024 btrfs_free_device(device); 1025 ret = -ENOMEM; 1026 goto error; 1027 } 1028 rcu_assign_pointer(device->name, name); 1029 } 1030 1031 list_add(&device->dev_list, &fs_devices->devices); 1032 device->fs_devices = fs_devices; 1033 fs_devices->num_devices++; 1034 } 1035 return fs_devices; 1036 error: 1037 free_fs_devices(fs_devices); 1038 return ERR_PTR(ret); 1039 } 1040 1041 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1042 struct btrfs_device **latest_dev) 1043 { 1044 struct btrfs_device *device, *next; 1045 1046 /* This is the initialized path, it is safe to release the devices. */ 1047 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1048 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1049 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1050 &device->dev_state) && 1051 !test_bit(BTRFS_DEV_STATE_MISSING, 1052 &device->dev_state) && 1053 (!*latest_dev || 1054 device->generation > (*latest_dev)->generation)) { 1055 *latest_dev = device; 1056 } 1057 continue; 1058 } 1059 1060 /* 1061 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1062 * in btrfs_init_dev_replace() so just continue. 1063 */ 1064 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1065 continue; 1066 1067 if (device->bdev) { 1068 blkdev_put(device->bdev, device->mode); 1069 device->bdev = NULL; 1070 fs_devices->open_devices--; 1071 } 1072 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1073 list_del_init(&device->dev_alloc_list); 1074 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1075 fs_devices->rw_devices--; 1076 } 1077 list_del_init(&device->dev_list); 1078 fs_devices->num_devices--; 1079 btrfs_free_device(device); 1080 } 1081 1082 } 1083 1084 /* 1085 * After we have read the system tree and know devids belonging to this 1086 * filesystem, remove the device which does not belong there. 1087 */ 1088 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1089 { 1090 struct btrfs_device *latest_dev = NULL; 1091 struct btrfs_fs_devices *seed_dev; 1092 1093 mutex_lock(&uuid_mutex); 1094 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1095 1096 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1097 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1098 1099 fs_devices->latest_dev = latest_dev; 1100 1101 mutex_unlock(&uuid_mutex); 1102 } 1103 1104 static void btrfs_close_bdev(struct btrfs_device *device) 1105 { 1106 if (!device->bdev) 1107 return; 1108 1109 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1110 sync_blockdev(device->bdev); 1111 invalidate_bdev(device->bdev); 1112 } 1113 1114 blkdev_put(device->bdev, device->mode); 1115 } 1116 1117 static void btrfs_close_one_device(struct btrfs_device *device) 1118 { 1119 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1120 1121 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1122 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1123 list_del_init(&device->dev_alloc_list); 1124 fs_devices->rw_devices--; 1125 } 1126 1127 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1128 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1129 1130 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1131 fs_devices->missing_devices--; 1132 1133 btrfs_close_bdev(device); 1134 if (device->bdev) { 1135 fs_devices->open_devices--; 1136 device->bdev = NULL; 1137 } 1138 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1139 btrfs_destroy_dev_zone_info(device); 1140 1141 device->fs_info = NULL; 1142 atomic_set(&device->dev_stats_ccnt, 0); 1143 extent_io_tree_release(&device->alloc_state); 1144 1145 /* 1146 * Reset the flush error record. We might have a transient flush error 1147 * in this mount, and if so we aborted the current transaction and set 1148 * the fs to an error state, guaranteeing no super blocks can be further 1149 * committed. However that error might be transient and if we unmount the 1150 * filesystem and mount it again, we should allow the mount to succeed 1151 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1152 * filesystem again we still get flush errors, then we will again abort 1153 * any transaction and set the error state, guaranteeing no commits of 1154 * unsafe super blocks. 1155 */ 1156 device->last_flush_error = 0; 1157 1158 /* Verify the device is back in a pristine state */ 1159 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1160 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1161 ASSERT(list_empty(&device->dev_alloc_list)); 1162 ASSERT(list_empty(&device->post_commit_list)); 1163 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1164 } 1165 1166 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1167 { 1168 struct btrfs_device *device, *tmp; 1169 1170 lockdep_assert_held(&uuid_mutex); 1171 1172 if (--fs_devices->opened > 0) 1173 return; 1174 1175 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1176 btrfs_close_one_device(device); 1177 1178 WARN_ON(fs_devices->open_devices); 1179 WARN_ON(fs_devices->rw_devices); 1180 fs_devices->opened = 0; 1181 fs_devices->seeding = false; 1182 fs_devices->fs_info = NULL; 1183 } 1184 1185 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1186 { 1187 LIST_HEAD(list); 1188 struct btrfs_fs_devices *tmp; 1189 1190 mutex_lock(&uuid_mutex); 1191 close_fs_devices(fs_devices); 1192 if (!fs_devices->opened) 1193 list_splice_init(&fs_devices->seed_list, &list); 1194 1195 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1196 close_fs_devices(fs_devices); 1197 list_del(&fs_devices->seed_list); 1198 free_fs_devices(fs_devices); 1199 } 1200 mutex_unlock(&uuid_mutex); 1201 } 1202 1203 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1204 fmode_t flags, void *holder) 1205 { 1206 struct btrfs_device *device; 1207 struct btrfs_device *latest_dev = NULL; 1208 struct btrfs_device *tmp_device; 1209 1210 flags |= FMODE_EXCL; 1211 1212 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1213 dev_list) { 1214 int ret; 1215 1216 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1217 if (ret == 0 && 1218 (!latest_dev || device->generation > latest_dev->generation)) { 1219 latest_dev = device; 1220 } else if (ret == -ENODATA) { 1221 fs_devices->num_devices--; 1222 list_del(&device->dev_list); 1223 btrfs_free_device(device); 1224 } 1225 } 1226 if (fs_devices->open_devices == 0) 1227 return -EINVAL; 1228 1229 fs_devices->opened = 1; 1230 fs_devices->latest_dev = latest_dev; 1231 fs_devices->total_rw_bytes = 0; 1232 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1233 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1234 1235 return 0; 1236 } 1237 1238 static int devid_cmp(void *priv, const struct list_head *a, 1239 const struct list_head *b) 1240 { 1241 const struct btrfs_device *dev1, *dev2; 1242 1243 dev1 = list_entry(a, struct btrfs_device, dev_list); 1244 dev2 = list_entry(b, struct btrfs_device, dev_list); 1245 1246 if (dev1->devid < dev2->devid) 1247 return -1; 1248 else if (dev1->devid > dev2->devid) 1249 return 1; 1250 return 0; 1251 } 1252 1253 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1254 fmode_t flags, void *holder) 1255 { 1256 int ret; 1257 1258 lockdep_assert_held(&uuid_mutex); 1259 /* 1260 * The device_list_mutex cannot be taken here in case opening the 1261 * underlying device takes further locks like open_mutex. 1262 * 1263 * We also don't need the lock here as this is called during mount and 1264 * exclusion is provided by uuid_mutex 1265 */ 1266 1267 if (fs_devices->opened) { 1268 fs_devices->opened++; 1269 ret = 0; 1270 } else { 1271 list_sort(NULL, &fs_devices->devices, devid_cmp); 1272 ret = open_fs_devices(fs_devices, flags, holder); 1273 } 1274 1275 return ret; 1276 } 1277 1278 void btrfs_release_disk_super(struct btrfs_super_block *super) 1279 { 1280 struct page *page = virt_to_page(super); 1281 1282 put_page(page); 1283 } 1284 1285 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1286 u64 bytenr, u64 bytenr_orig) 1287 { 1288 struct btrfs_super_block *disk_super; 1289 struct page *page; 1290 void *p; 1291 pgoff_t index; 1292 1293 /* make sure our super fits in the device */ 1294 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1295 return ERR_PTR(-EINVAL); 1296 1297 /* make sure our super fits in the page */ 1298 if (sizeof(*disk_super) > PAGE_SIZE) 1299 return ERR_PTR(-EINVAL); 1300 1301 /* make sure our super doesn't straddle pages on disk */ 1302 index = bytenr >> PAGE_SHIFT; 1303 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1304 return ERR_PTR(-EINVAL); 1305 1306 /* pull in the page with our super */ 1307 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1308 1309 if (IS_ERR(page)) 1310 return ERR_CAST(page); 1311 1312 p = page_address(page); 1313 1314 /* align our pointer to the offset of the super block */ 1315 disk_super = p + offset_in_page(bytenr); 1316 1317 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1318 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1319 btrfs_release_disk_super(p); 1320 return ERR_PTR(-EINVAL); 1321 } 1322 1323 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1324 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1325 1326 return disk_super; 1327 } 1328 1329 int btrfs_forget_devices(const char *path) 1330 { 1331 int ret; 1332 1333 mutex_lock(&uuid_mutex); 1334 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1335 mutex_unlock(&uuid_mutex); 1336 1337 return ret; 1338 } 1339 1340 /* 1341 * Look for a btrfs signature on a device. This may be called out of the mount path 1342 * and we are not allowed to call set_blocksize during the scan. The superblock 1343 * is read via pagecache 1344 */ 1345 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1346 void *holder) 1347 { 1348 struct btrfs_super_block *disk_super; 1349 bool new_device_added = false; 1350 struct btrfs_device *device = NULL; 1351 struct block_device *bdev; 1352 u64 bytenr, bytenr_orig; 1353 int ret; 1354 1355 lockdep_assert_held(&uuid_mutex); 1356 1357 /* 1358 * we would like to check all the supers, but that would make 1359 * a btrfs mount succeed after a mkfs from a different FS. 1360 * So, we need to add a special mount option to scan for 1361 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1362 */ 1363 flags |= FMODE_EXCL; 1364 1365 bdev = blkdev_get_by_path(path, flags, holder); 1366 if (IS_ERR(bdev)) 1367 return ERR_CAST(bdev); 1368 1369 bytenr_orig = btrfs_sb_offset(0); 1370 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1371 if (ret) 1372 return ERR_PTR(ret); 1373 1374 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1375 if (IS_ERR(disk_super)) { 1376 device = ERR_CAST(disk_super); 1377 goto error_bdev_put; 1378 } 1379 1380 device = device_list_add(path, disk_super, &new_device_added); 1381 if (!IS_ERR(device)) { 1382 if (new_device_added) 1383 btrfs_free_stale_devices(path, device); 1384 } 1385 1386 btrfs_release_disk_super(disk_super); 1387 1388 error_bdev_put: 1389 blkdev_put(bdev, flags); 1390 1391 return device; 1392 } 1393 1394 /* 1395 * Try to find a chunk that intersects [start, start + len] range and when one 1396 * such is found, record the end of it in *start 1397 */ 1398 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1399 u64 len) 1400 { 1401 u64 physical_start, physical_end; 1402 1403 lockdep_assert_held(&device->fs_info->chunk_mutex); 1404 1405 if (!find_first_extent_bit(&device->alloc_state, *start, 1406 &physical_start, &physical_end, 1407 CHUNK_ALLOCATED, NULL)) { 1408 1409 if (in_range(physical_start, *start, len) || 1410 in_range(*start, physical_start, 1411 physical_end - physical_start)) { 1412 *start = physical_end + 1; 1413 return true; 1414 } 1415 } 1416 return false; 1417 } 1418 1419 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1420 { 1421 switch (device->fs_devices->chunk_alloc_policy) { 1422 case BTRFS_CHUNK_ALLOC_REGULAR: 1423 /* 1424 * We don't want to overwrite the superblock on the drive nor 1425 * any area used by the boot loader (grub for example), so we 1426 * make sure to start at an offset of at least 1MB. 1427 */ 1428 return max_t(u64, start, SZ_1M); 1429 case BTRFS_CHUNK_ALLOC_ZONED: 1430 /* 1431 * We don't care about the starting region like regular 1432 * allocator, because we anyway use/reserve the first two zones 1433 * for superblock logging. 1434 */ 1435 return ALIGN(start, device->zone_info->zone_size); 1436 default: 1437 BUG(); 1438 } 1439 } 1440 1441 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1442 u64 *hole_start, u64 *hole_size, 1443 u64 num_bytes) 1444 { 1445 u64 zone_size = device->zone_info->zone_size; 1446 u64 pos; 1447 int ret; 1448 bool changed = false; 1449 1450 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1451 1452 while (*hole_size > 0) { 1453 pos = btrfs_find_allocatable_zones(device, *hole_start, 1454 *hole_start + *hole_size, 1455 num_bytes); 1456 if (pos != *hole_start) { 1457 *hole_size = *hole_start + *hole_size - pos; 1458 *hole_start = pos; 1459 changed = true; 1460 if (*hole_size < num_bytes) 1461 break; 1462 } 1463 1464 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1465 1466 /* Range is ensured to be empty */ 1467 if (!ret) 1468 return changed; 1469 1470 /* Given hole range was invalid (outside of device) */ 1471 if (ret == -ERANGE) { 1472 *hole_start += *hole_size; 1473 *hole_size = 0; 1474 return true; 1475 } 1476 1477 *hole_start += zone_size; 1478 *hole_size -= zone_size; 1479 changed = true; 1480 } 1481 1482 return changed; 1483 } 1484 1485 /** 1486 * dev_extent_hole_check - check if specified hole is suitable for allocation 1487 * @device: the device which we have the hole 1488 * @hole_start: starting position of the hole 1489 * @hole_size: the size of the hole 1490 * @num_bytes: the size of the free space that we need 1491 * 1492 * This function may modify @hole_start and @hole_size to reflect the suitable 1493 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1494 */ 1495 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1496 u64 *hole_size, u64 num_bytes) 1497 { 1498 bool changed = false; 1499 u64 hole_end = *hole_start + *hole_size; 1500 1501 for (;;) { 1502 /* 1503 * Check before we set max_hole_start, otherwise we could end up 1504 * sending back this offset anyway. 1505 */ 1506 if (contains_pending_extent(device, hole_start, *hole_size)) { 1507 if (hole_end >= *hole_start) 1508 *hole_size = hole_end - *hole_start; 1509 else 1510 *hole_size = 0; 1511 changed = true; 1512 } 1513 1514 switch (device->fs_devices->chunk_alloc_policy) { 1515 case BTRFS_CHUNK_ALLOC_REGULAR: 1516 /* No extra check */ 1517 break; 1518 case BTRFS_CHUNK_ALLOC_ZONED: 1519 if (dev_extent_hole_check_zoned(device, hole_start, 1520 hole_size, num_bytes)) { 1521 changed = true; 1522 /* 1523 * The changed hole can contain pending extent. 1524 * Loop again to check that. 1525 */ 1526 continue; 1527 } 1528 break; 1529 default: 1530 BUG(); 1531 } 1532 1533 break; 1534 } 1535 1536 return changed; 1537 } 1538 1539 /* 1540 * find_free_dev_extent_start - find free space in the specified device 1541 * @device: the device which we search the free space in 1542 * @num_bytes: the size of the free space that we need 1543 * @search_start: the position from which to begin the search 1544 * @start: store the start of the free space. 1545 * @len: the size of the free space. that we find, or the size 1546 * of the max free space if we don't find suitable free space 1547 * 1548 * this uses a pretty simple search, the expectation is that it is 1549 * called very infrequently and that a given device has a small number 1550 * of extents 1551 * 1552 * @start is used to store the start of the free space if we find. But if we 1553 * don't find suitable free space, it will be used to store the start position 1554 * of the max free space. 1555 * 1556 * @len is used to store the size of the free space that we find. 1557 * But if we don't find suitable free space, it is used to store the size of 1558 * the max free space. 1559 * 1560 * NOTE: This function will search *commit* root of device tree, and does extra 1561 * check to ensure dev extents are not double allocated. 1562 * This makes the function safe to allocate dev extents but may not report 1563 * correct usable device space, as device extent freed in current transaction 1564 * is not reported as available. 1565 */ 1566 static int find_free_dev_extent_start(struct btrfs_device *device, 1567 u64 num_bytes, u64 search_start, u64 *start, 1568 u64 *len) 1569 { 1570 struct btrfs_fs_info *fs_info = device->fs_info; 1571 struct btrfs_root *root = fs_info->dev_root; 1572 struct btrfs_key key; 1573 struct btrfs_dev_extent *dev_extent; 1574 struct btrfs_path *path; 1575 u64 hole_size; 1576 u64 max_hole_start; 1577 u64 max_hole_size; 1578 u64 extent_end; 1579 u64 search_end = device->total_bytes; 1580 int ret; 1581 int slot; 1582 struct extent_buffer *l; 1583 1584 search_start = dev_extent_search_start(device, search_start); 1585 1586 WARN_ON(device->zone_info && 1587 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1588 1589 path = btrfs_alloc_path(); 1590 if (!path) 1591 return -ENOMEM; 1592 1593 max_hole_start = search_start; 1594 max_hole_size = 0; 1595 1596 again: 1597 if (search_start >= search_end || 1598 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1599 ret = -ENOSPC; 1600 goto out; 1601 } 1602 1603 path->reada = READA_FORWARD; 1604 path->search_commit_root = 1; 1605 path->skip_locking = 1; 1606 1607 key.objectid = device->devid; 1608 key.offset = search_start; 1609 key.type = BTRFS_DEV_EXTENT_KEY; 1610 1611 ret = btrfs_search_backwards(root, &key, path); 1612 if (ret < 0) 1613 goto out; 1614 1615 while (1) { 1616 l = path->nodes[0]; 1617 slot = path->slots[0]; 1618 if (slot >= btrfs_header_nritems(l)) { 1619 ret = btrfs_next_leaf(root, path); 1620 if (ret == 0) 1621 continue; 1622 if (ret < 0) 1623 goto out; 1624 1625 break; 1626 } 1627 btrfs_item_key_to_cpu(l, &key, slot); 1628 1629 if (key.objectid < device->devid) 1630 goto next; 1631 1632 if (key.objectid > device->devid) 1633 break; 1634 1635 if (key.type != BTRFS_DEV_EXTENT_KEY) 1636 goto next; 1637 1638 if (key.offset > search_start) { 1639 hole_size = key.offset - search_start; 1640 dev_extent_hole_check(device, &search_start, &hole_size, 1641 num_bytes); 1642 1643 if (hole_size > max_hole_size) { 1644 max_hole_start = search_start; 1645 max_hole_size = hole_size; 1646 } 1647 1648 /* 1649 * If this free space is greater than which we need, 1650 * it must be the max free space that we have found 1651 * until now, so max_hole_start must point to the start 1652 * of this free space and the length of this free space 1653 * is stored in max_hole_size. Thus, we return 1654 * max_hole_start and max_hole_size and go back to the 1655 * caller. 1656 */ 1657 if (hole_size >= num_bytes) { 1658 ret = 0; 1659 goto out; 1660 } 1661 } 1662 1663 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1664 extent_end = key.offset + btrfs_dev_extent_length(l, 1665 dev_extent); 1666 if (extent_end > search_start) 1667 search_start = extent_end; 1668 next: 1669 path->slots[0]++; 1670 cond_resched(); 1671 } 1672 1673 /* 1674 * At this point, search_start should be the end of 1675 * allocated dev extents, and when shrinking the device, 1676 * search_end may be smaller than search_start. 1677 */ 1678 if (search_end > search_start) { 1679 hole_size = search_end - search_start; 1680 if (dev_extent_hole_check(device, &search_start, &hole_size, 1681 num_bytes)) { 1682 btrfs_release_path(path); 1683 goto again; 1684 } 1685 1686 if (hole_size > max_hole_size) { 1687 max_hole_start = search_start; 1688 max_hole_size = hole_size; 1689 } 1690 } 1691 1692 /* See above. */ 1693 if (max_hole_size < num_bytes) 1694 ret = -ENOSPC; 1695 else 1696 ret = 0; 1697 1698 out: 1699 btrfs_free_path(path); 1700 *start = max_hole_start; 1701 if (len) 1702 *len = max_hole_size; 1703 return ret; 1704 } 1705 1706 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1707 u64 *start, u64 *len) 1708 { 1709 /* FIXME use last free of some kind */ 1710 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1711 } 1712 1713 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1714 struct btrfs_device *device, 1715 u64 start, u64 *dev_extent_len) 1716 { 1717 struct btrfs_fs_info *fs_info = device->fs_info; 1718 struct btrfs_root *root = fs_info->dev_root; 1719 int ret; 1720 struct btrfs_path *path; 1721 struct btrfs_key key; 1722 struct btrfs_key found_key; 1723 struct extent_buffer *leaf = NULL; 1724 struct btrfs_dev_extent *extent = NULL; 1725 1726 path = btrfs_alloc_path(); 1727 if (!path) 1728 return -ENOMEM; 1729 1730 key.objectid = device->devid; 1731 key.offset = start; 1732 key.type = BTRFS_DEV_EXTENT_KEY; 1733 again: 1734 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1735 if (ret > 0) { 1736 ret = btrfs_previous_item(root, path, key.objectid, 1737 BTRFS_DEV_EXTENT_KEY); 1738 if (ret) 1739 goto out; 1740 leaf = path->nodes[0]; 1741 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1742 extent = btrfs_item_ptr(leaf, path->slots[0], 1743 struct btrfs_dev_extent); 1744 BUG_ON(found_key.offset > start || found_key.offset + 1745 btrfs_dev_extent_length(leaf, extent) < start); 1746 key = found_key; 1747 btrfs_release_path(path); 1748 goto again; 1749 } else if (ret == 0) { 1750 leaf = path->nodes[0]; 1751 extent = btrfs_item_ptr(leaf, path->slots[0], 1752 struct btrfs_dev_extent); 1753 } else { 1754 goto out; 1755 } 1756 1757 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1758 1759 ret = btrfs_del_item(trans, root, path); 1760 if (ret == 0) 1761 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1762 out: 1763 btrfs_free_path(path); 1764 return ret; 1765 } 1766 1767 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1768 { 1769 struct extent_map_tree *em_tree; 1770 struct extent_map *em; 1771 struct rb_node *n; 1772 u64 ret = 0; 1773 1774 em_tree = &fs_info->mapping_tree; 1775 read_lock(&em_tree->lock); 1776 n = rb_last(&em_tree->map.rb_root); 1777 if (n) { 1778 em = rb_entry(n, struct extent_map, rb_node); 1779 ret = em->start + em->len; 1780 } 1781 read_unlock(&em_tree->lock); 1782 1783 return ret; 1784 } 1785 1786 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1787 u64 *devid_ret) 1788 { 1789 int ret; 1790 struct btrfs_key key; 1791 struct btrfs_key found_key; 1792 struct btrfs_path *path; 1793 1794 path = btrfs_alloc_path(); 1795 if (!path) 1796 return -ENOMEM; 1797 1798 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1799 key.type = BTRFS_DEV_ITEM_KEY; 1800 key.offset = (u64)-1; 1801 1802 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1803 if (ret < 0) 1804 goto error; 1805 1806 if (ret == 0) { 1807 /* Corruption */ 1808 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1809 ret = -EUCLEAN; 1810 goto error; 1811 } 1812 1813 ret = btrfs_previous_item(fs_info->chunk_root, path, 1814 BTRFS_DEV_ITEMS_OBJECTID, 1815 BTRFS_DEV_ITEM_KEY); 1816 if (ret) { 1817 *devid_ret = 1; 1818 } else { 1819 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1820 path->slots[0]); 1821 *devid_ret = found_key.offset + 1; 1822 } 1823 ret = 0; 1824 error: 1825 btrfs_free_path(path); 1826 return ret; 1827 } 1828 1829 /* 1830 * the device information is stored in the chunk root 1831 * the btrfs_device struct should be fully filled in 1832 */ 1833 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1834 struct btrfs_device *device) 1835 { 1836 int ret; 1837 struct btrfs_path *path; 1838 struct btrfs_dev_item *dev_item; 1839 struct extent_buffer *leaf; 1840 struct btrfs_key key; 1841 unsigned long ptr; 1842 1843 path = btrfs_alloc_path(); 1844 if (!path) 1845 return -ENOMEM; 1846 1847 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1848 key.type = BTRFS_DEV_ITEM_KEY; 1849 key.offset = device->devid; 1850 1851 btrfs_reserve_chunk_metadata(trans, true); 1852 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1853 &key, sizeof(*dev_item)); 1854 btrfs_trans_release_chunk_metadata(trans); 1855 if (ret) 1856 goto out; 1857 1858 leaf = path->nodes[0]; 1859 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1860 1861 btrfs_set_device_id(leaf, dev_item, device->devid); 1862 btrfs_set_device_generation(leaf, dev_item, 0); 1863 btrfs_set_device_type(leaf, dev_item, device->type); 1864 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1865 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1866 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1867 btrfs_set_device_total_bytes(leaf, dev_item, 1868 btrfs_device_get_disk_total_bytes(device)); 1869 btrfs_set_device_bytes_used(leaf, dev_item, 1870 btrfs_device_get_bytes_used(device)); 1871 btrfs_set_device_group(leaf, dev_item, 0); 1872 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1873 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1874 btrfs_set_device_start_offset(leaf, dev_item, 0); 1875 1876 ptr = btrfs_device_uuid(dev_item); 1877 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1878 ptr = btrfs_device_fsid(dev_item); 1879 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1880 ptr, BTRFS_FSID_SIZE); 1881 btrfs_mark_buffer_dirty(leaf); 1882 1883 ret = 0; 1884 out: 1885 btrfs_free_path(path); 1886 return ret; 1887 } 1888 1889 /* 1890 * Function to update ctime/mtime for a given device path. 1891 * Mainly used for ctime/mtime based probe like libblkid. 1892 * 1893 * We don't care about errors here, this is just to be kind to userspace. 1894 */ 1895 static void update_dev_time(const char *device_path) 1896 { 1897 struct path path; 1898 struct timespec64 now; 1899 int ret; 1900 1901 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1902 if (ret) 1903 return; 1904 1905 now = current_time(d_inode(path.dentry)); 1906 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1907 path_put(&path); 1908 } 1909 1910 static int btrfs_rm_dev_item(struct btrfs_device *device) 1911 { 1912 struct btrfs_root *root = device->fs_info->chunk_root; 1913 int ret; 1914 struct btrfs_path *path; 1915 struct btrfs_key key; 1916 struct btrfs_trans_handle *trans; 1917 1918 path = btrfs_alloc_path(); 1919 if (!path) 1920 return -ENOMEM; 1921 1922 trans = btrfs_start_transaction(root, 0); 1923 if (IS_ERR(trans)) { 1924 btrfs_free_path(path); 1925 return PTR_ERR(trans); 1926 } 1927 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1928 key.type = BTRFS_DEV_ITEM_KEY; 1929 key.offset = device->devid; 1930 1931 btrfs_reserve_chunk_metadata(trans, false); 1932 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1933 btrfs_trans_release_chunk_metadata(trans); 1934 if (ret) { 1935 if (ret > 0) 1936 ret = -ENOENT; 1937 btrfs_abort_transaction(trans, ret); 1938 btrfs_end_transaction(trans); 1939 goto out; 1940 } 1941 1942 ret = btrfs_del_item(trans, root, path); 1943 if (ret) { 1944 btrfs_abort_transaction(trans, ret); 1945 btrfs_end_transaction(trans); 1946 } 1947 1948 out: 1949 btrfs_free_path(path); 1950 if (!ret) 1951 ret = btrfs_commit_transaction(trans); 1952 return ret; 1953 } 1954 1955 /* 1956 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1957 * filesystem. It's up to the caller to adjust that number regarding eg. device 1958 * replace. 1959 */ 1960 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1961 u64 num_devices) 1962 { 1963 u64 all_avail; 1964 unsigned seq; 1965 int i; 1966 1967 do { 1968 seq = read_seqbegin(&fs_info->profiles_lock); 1969 1970 all_avail = fs_info->avail_data_alloc_bits | 1971 fs_info->avail_system_alloc_bits | 1972 fs_info->avail_metadata_alloc_bits; 1973 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1974 1975 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1976 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1977 continue; 1978 1979 if (num_devices < btrfs_raid_array[i].devs_min) 1980 return btrfs_raid_array[i].mindev_error; 1981 } 1982 1983 return 0; 1984 } 1985 1986 static struct btrfs_device * btrfs_find_next_active_device( 1987 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1988 { 1989 struct btrfs_device *next_device; 1990 1991 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1992 if (next_device != device && 1993 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1994 && next_device->bdev) 1995 return next_device; 1996 } 1997 1998 return NULL; 1999 } 2000 2001 /* 2002 * Helper function to check if the given device is part of s_bdev / latest_dev 2003 * and replace it with the provided or the next active device, in the context 2004 * where this function called, there should be always be another device (or 2005 * this_dev) which is active. 2006 */ 2007 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2008 struct btrfs_device *next_device) 2009 { 2010 struct btrfs_fs_info *fs_info = device->fs_info; 2011 2012 if (!next_device) 2013 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2014 device); 2015 ASSERT(next_device); 2016 2017 if (fs_info->sb->s_bdev && 2018 (fs_info->sb->s_bdev == device->bdev)) 2019 fs_info->sb->s_bdev = next_device->bdev; 2020 2021 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2022 fs_info->fs_devices->latest_dev = next_device; 2023 } 2024 2025 /* 2026 * Return btrfs_fs_devices::num_devices excluding the device that's being 2027 * currently replaced. 2028 */ 2029 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2030 { 2031 u64 num_devices = fs_info->fs_devices->num_devices; 2032 2033 down_read(&fs_info->dev_replace.rwsem); 2034 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2035 ASSERT(num_devices > 1); 2036 num_devices--; 2037 } 2038 up_read(&fs_info->dev_replace.rwsem); 2039 2040 return num_devices; 2041 } 2042 2043 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2044 struct block_device *bdev, 2045 const char *device_path) 2046 { 2047 struct btrfs_super_block *disk_super; 2048 int copy_num; 2049 2050 if (!bdev) 2051 return; 2052 2053 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2054 struct page *page; 2055 int ret; 2056 2057 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2058 if (IS_ERR(disk_super)) 2059 continue; 2060 2061 if (bdev_is_zoned(bdev)) { 2062 btrfs_reset_sb_log_zones(bdev, copy_num); 2063 continue; 2064 } 2065 2066 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2067 2068 page = virt_to_page(disk_super); 2069 set_page_dirty(page); 2070 lock_page(page); 2071 /* write_on_page() unlocks the page */ 2072 ret = write_one_page(page); 2073 if (ret) 2074 btrfs_warn(fs_info, 2075 "error clearing superblock number %d (%d)", 2076 copy_num, ret); 2077 btrfs_release_disk_super(disk_super); 2078 2079 } 2080 2081 /* Notify udev that device has changed */ 2082 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2083 2084 /* Update ctime/mtime for device path for libblkid */ 2085 update_dev_time(device_path); 2086 } 2087 2088 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2089 struct btrfs_dev_lookup_args *args, 2090 struct block_device **bdev, fmode_t *mode) 2091 { 2092 struct btrfs_device *device; 2093 struct btrfs_fs_devices *cur_devices; 2094 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2095 u64 num_devices; 2096 int ret = 0; 2097 2098 /* 2099 * The device list in fs_devices is accessed without locks (neither 2100 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2101 * filesystem and another device rm cannot run. 2102 */ 2103 num_devices = btrfs_num_devices(fs_info); 2104 2105 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2106 if (ret) 2107 goto out; 2108 2109 device = btrfs_find_device(fs_info->fs_devices, args); 2110 if (!device) { 2111 if (args->missing) 2112 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2113 else 2114 ret = -ENOENT; 2115 goto out; 2116 } 2117 2118 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2119 btrfs_warn_in_rcu(fs_info, 2120 "cannot remove device %s (devid %llu) due to active swapfile", 2121 rcu_str_deref(device->name), device->devid); 2122 ret = -ETXTBSY; 2123 goto out; 2124 } 2125 2126 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2127 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2128 goto out; 2129 } 2130 2131 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2132 fs_info->fs_devices->rw_devices == 1) { 2133 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2134 goto out; 2135 } 2136 2137 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2138 mutex_lock(&fs_info->chunk_mutex); 2139 list_del_init(&device->dev_alloc_list); 2140 device->fs_devices->rw_devices--; 2141 mutex_unlock(&fs_info->chunk_mutex); 2142 } 2143 2144 ret = btrfs_shrink_device(device, 0); 2145 if (!ret) 2146 btrfs_reada_remove_dev(device); 2147 if (ret) 2148 goto error_undo; 2149 2150 /* 2151 * TODO: the superblock still includes this device in its num_devices 2152 * counter although write_all_supers() is not locked out. This 2153 * could give a filesystem state which requires a degraded mount. 2154 */ 2155 ret = btrfs_rm_dev_item(device); 2156 if (ret) 2157 goto error_undo; 2158 2159 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2160 btrfs_scrub_cancel_dev(device); 2161 2162 /* 2163 * the device list mutex makes sure that we don't change 2164 * the device list while someone else is writing out all 2165 * the device supers. Whoever is writing all supers, should 2166 * lock the device list mutex before getting the number of 2167 * devices in the super block (super_copy). Conversely, 2168 * whoever updates the number of devices in the super block 2169 * (super_copy) should hold the device list mutex. 2170 */ 2171 2172 /* 2173 * In normal cases the cur_devices == fs_devices. But in case 2174 * of deleting a seed device, the cur_devices should point to 2175 * its own fs_devices listed under the fs_devices->seed_list. 2176 */ 2177 cur_devices = device->fs_devices; 2178 mutex_lock(&fs_devices->device_list_mutex); 2179 list_del_rcu(&device->dev_list); 2180 2181 cur_devices->num_devices--; 2182 cur_devices->total_devices--; 2183 /* Update total_devices of the parent fs_devices if it's seed */ 2184 if (cur_devices != fs_devices) 2185 fs_devices->total_devices--; 2186 2187 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2188 cur_devices->missing_devices--; 2189 2190 btrfs_assign_next_active_device(device, NULL); 2191 2192 if (device->bdev) { 2193 cur_devices->open_devices--; 2194 /* remove sysfs entry */ 2195 btrfs_sysfs_remove_device(device); 2196 } 2197 2198 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2199 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2200 mutex_unlock(&fs_devices->device_list_mutex); 2201 2202 /* 2203 * At this point, the device is zero sized and detached from the 2204 * devices list. All that's left is to zero out the old supers and 2205 * free the device. 2206 * 2207 * We cannot call btrfs_close_bdev() here because we're holding the sb 2208 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2209 * block device and it's dependencies. Instead just flush the device 2210 * and let the caller do the final blkdev_put. 2211 */ 2212 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2213 btrfs_scratch_superblocks(fs_info, device->bdev, 2214 device->name->str); 2215 if (device->bdev) { 2216 sync_blockdev(device->bdev); 2217 invalidate_bdev(device->bdev); 2218 } 2219 } 2220 2221 *bdev = device->bdev; 2222 *mode = device->mode; 2223 synchronize_rcu(); 2224 btrfs_free_device(device); 2225 2226 /* 2227 * This can happen if cur_devices is the private seed devices list. We 2228 * cannot call close_fs_devices() here because it expects the uuid_mutex 2229 * to be held, but in fact we don't need that for the private 2230 * seed_devices, we can simply decrement cur_devices->opened and then 2231 * remove it from our list and free the fs_devices. 2232 */ 2233 if (cur_devices->num_devices == 0) { 2234 list_del_init(&cur_devices->seed_list); 2235 ASSERT(cur_devices->opened == 1); 2236 cur_devices->opened--; 2237 free_fs_devices(cur_devices); 2238 } 2239 2240 out: 2241 return ret; 2242 2243 error_undo: 2244 btrfs_reada_undo_remove_dev(device); 2245 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2246 mutex_lock(&fs_info->chunk_mutex); 2247 list_add(&device->dev_alloc_list, 2248 &fs_devices->alloc_list); 2249 device->fs_devices->rw_devices++; 2250 mutex_unlock(&fs_info->chunk_mutex); 2251 } 2252 goto out; 2253 } 2254 2255 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2256 { 2257 struct btrfs_fs_devices *fs_devices; 2258 2259 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2260 2261 /* 2262 * in case of fs with no seed, srcdev->fs_devices will point 2263 * to fs_devices of fs_info. However when the dev being replaced is 2264 * a seed dev it will point to the seed's local fs_devices. In short 2265 * srcdev will have its correct fs_devices in both the cases. 2266 */ 2267 fs_devices = srcdev->fs_devices; 2268 2269 list_del_rcu(&srcdev->dev_list); 2270 list_del(&srcdev->dev_alloc_list); 2271 fs_devices->num_devices--; 2272 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2273 fs_devices->missing_devices--; 2274 2275 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2276 fs_devices->rw_devices--; 2277 2278 if (srcdev->bdev) 2279 fs_devices->open_devices--; 2280 } 2281 2282 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2283 { 2284 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2285 2286 mutex_lock(&uuid_mutex); 2287 2288 btrfs_close_bdev(srcdev); 2289 synchronize_rcu(); 2290 btrfs_free_device(srcdev); 2291 2292 /* if this is no devs we rather delete the fs_devices */ 2293 if (!fs_devices->num_devices) { 2294 /* 2295 * On a mounted FS, num_devices can't be zero unless it's a 2296 * seed. In case of a seed device being replaced, the replace 2297 * target added to the sprout FS, so there will be no more 2298 * device left under the seed FS. 2299 */ 2300 ASSERT(fs_devices->seeding); 2301 2302 list_del_init(&fs_devices->seed_list); 2303 close_fs_devices(fs_devices); 2304 free_fs_devices(fs_devices); 2305 } 2306 mutex_unlock(&uuid_mutex); 2307 } 2308 2309 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2310 { 2311 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2312 2313 mutex_lock(&fs_devices->device_list_mutex); 2314 2315 btrfs_sysfs_remove_device(tgtdev); 2316 2317 if (tgtdev->bdev) 2318 fs_devices->open_devices--; 2319 2320 fs_devices->num_devices--; 2321 2322 btrfs_assign_next_active_device(tgtdev, NULL); 2323 2324 list_del_rcu(&tgtdev->dev_list); 2325 2326 mutex_unlock(&fs_devices->device_list_mutex); 2327 2328 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2329 tgtdev->name->str); 2330 2331 btrfs_close_bdev(tgtdev); 2332 synchronize_rcu(); 2333 btrfs_free_device(tgtdev); 2334 } 2335 2336 /** 2337 * Populate args from device at path 2338 * 2339 * @fs_info: the filesystem 2340 * @args: the args to populate 2341 * @path: the path to the device 2342 * 2343 * This will read the super block of the device at @path and populate @args with 2344 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2345 * lookup a device to operate on, but need to do it before we take any locks. 2346 * This properly handles the special case of "missing" that a user may pass in, 2347 * and does some basic sanity checks. The caller must make sure that @path is 2348 * properly NUL terminated before calling in, and must call 2349 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2350 * uuid buffers. 2351 * 2352 * Return: 0 for success, -errno for failure 2353 */ 2354 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2355 struct btrfs_dev_lookup_args *args, 2356 const char *path) 2357 { 2358 struct btrfs_super_block *disk_super; 2359 struct block_device *bdev; 2360 int ret; 2361 2362 if (!path || !path[0]) 2363 return -EINVAL; 2364 if (!strcmp(path, "missing")) { 2365 args->missing = true; 2366 return 0; 2367 } 2368 2369 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2370 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2371 if (!args->uuid || !args->fsid) { 2372 btrfs_put_dev_args_from_path(args); 2373 return -ENOMEM; 2374 } 2375 2376 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2377 &bdev, &disk_super); 2378 if (ret) 2379 return ret; 2380 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2381 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2382 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2383 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2384 else 2385 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2386 btrfs_release_disk_super(disk_super); 2387 blkdev_put(bdev, FMODE_READ); 2388 return 0; 2389 } 2390 2391 /* 2392 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2393 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2394 * that don't need to be freed. 2395 */ 2396 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2397 { 2398 kfree(args->uuid); 2399 kfree(args->fsid); 2400 args->uuid = NULL; 2401 args->fsid = NULL; 2402 } 2403 2404 struct btrfs_device *btrfs_find_device_by_devspec( 2405 struct btrfs_fs_info *fs_info, u64 devid, 2406 const char *device_path) 2407 { 2408 BTRFS_DEV_LOOKUP_ARGS(args); 2409 struct btrfs_device *device; 2410 int ret; 2411 2412 if (devid) { 2413 args.devid = devid; 2414 device = btrfs_find_device(fs_info->fs_devices, &args); 2415 if (!device) 2416 return ERR_PTR(-ENOENT); 2417 return device; 2418 } 2419 2420 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2421 if (ret) 2422 return ERR_PTR(ret); 2423 device = btrfs_find_device(fs_info->fs_devices, &args); 2424 btrfs_put_dev_args_from_path(&args); 2425 if (!device) 2426 return ERR_PTR(-ENOENT); 2427 return device; 2428 } 2429 2430 /* 2431 * does all the dirty work required for changing file system's UUID. 2432 */ 2433 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2434 { 2435 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2436 struct btrfs_fs_devices *old_devices; 2437 struct btrfs_fs_devices *seed_devices; 2438 struct btrfs_super_block *disk_super = fs_info->super_copy; 2439 struct btrfs_device *device; 2440 u64 super_flags; 2441 2442 lockdep_assert_held(&uuid_mutex); 2443 if (!fs_devices->seeding) 2444 return -EINVAL; 2445 2446 /* 2447 * Private copy of the seed devices, anchored at 2448 * fs_info->fs_devices->seed_list 2449 */ 2450 seed_devices = alloc_fs_devices(NULL, NULL); 2451 if (IS_ERR(seed_devices)) 2452 return PTR_ERR(seed_devices); 2453 2454 /* 2455 * It's necessary to retain a copy of the original seed fs_devices in 2456 * fs_uuids so that filesystems which have been seeded can successfully 2457 * reference the seed device from open_seed_devices. This also supports 2458 * multiple fs seed. 2459 */ 2460 old_devices = clone_fs_devices(fs_devices); 2461 if (IS_ERR(old_devices)) { 2462 kfree(seed_devices); 2463 return PTR_ERR(old_devices); 2464 } 2465 2466 list_add(&old_devices->fs_list, &fs_uuids); 2467 2468 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2469 seed_devices->opened = 1; 2470 INIT_LIST_HEAD(&seed_devices->devices); 2471 INIT_LIST_HEAD(&seed_devices->alloc_list); 2472 mutex_init(&seed_devices->device_list_mutex); 2473 2474 mutex_lock(&fs_devices->device_list_mutex); 2475 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2476 synchronize_rcu); 2477 list_for_each_entry(device, &seed_devices->devices, dev_list) 2478 device->fs_devices = seed_devices; 2479 2480 fs_devices->seeding = false; 2481 fs_devices->num_devices = 0; 2482 fs_devices->open_devices = 0; 2483 fs_devices->missing_devices = 0; 2484 fs_devices->rotating = false; 2485 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2486 2487 generate_random_uuid(fs_devices->fsid); 2488 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2489 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2490 mutex_unlock(&fs_devices->device_list_mutex); 2491 2492 super_flags = btrfs_super_flags(disk_super) & 2493 ~BTRFS_SUPER_FLAG_SEEDING; 2494 btrfs_set_super_flags(disk_super, super_flags); 2495 2496 return 0; 2497 } 2498 2499 /* 2500 * Store the expected generation for seed devices in device items. 2501 */ 2502 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2503 { 2504 BTRFS_DEV_LOOKUP_ARGS(args); 2505 struct btrfs_fs_info *fs_info = trans->fs_info; 2506 struct btrfs_root *root = fs_info->chunk_root; 2507 struct btrfs_path *path; 2508 struct extent_buffer *leaf; 2509 struct btrfs_dev_item *dev_item; 2510 struct btrfs_device *device; 2511 struct btrfs_key key; 2512 u8 fs_uuid[BTRFS_FSID_SIZE]; 2513 u8 dev_uuid[BTRFS_UUID_SIZE]; 2514 int ret; 2515 2516 path = btrfs_alloc_path(); 2517 if (!path) 2518 return -ENOMEM; 2519 2520 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2521 key.offset = 0; 2522 key.type = BTRFS_DEV_ITEM_KEY; 2523 2524 while (1) { 2525 btrfs_reserve_chunk_metadata(trans, false); 2526 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2527 btrfs_trans_release_chunk_metadata(trans); 2528 if (ret < 0) 2529 goto error; 2530 2531 leaf = path->nodes[0]; 2532 next_slot: 2533 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2534 ret = btrfs_next_leaf(root, path); 2535 if (ret > 0) 2536 break; 2537 if (ret < 0) 2538 goto error; 2539 leaf = path->nodes[0]; 2540 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2541 btrfs_release_path(path); 2542 continue; 2543 } 2544 2545 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2546 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2547 key.type != BTRFS_DEV_ITEM_KEY) 2548 break; 2549 2550 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2551 struct btrfs_dev_item); 2552 args.devid = btrfs_device_id(leaf, dev_item); 2553 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2554 BTRFS_UUID_SIZE); 2555 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2556 BTRFS_FSID_SIZE); 2557 args.uuid = dev_uuid; 2558 args.fsid = fs_uuid; 2559 device = btrfs_find_device(fs_info->fs_devices, &args); 2560 BUG_ON(!device); /* Logic error */ 2561 2562 if (device->fs_devices->seeding) { 2563 btrfs_set_device_generation(leaf, dev_item, 2564 device->generation); 2565 btrfs_mark_buffer_dirty(leaf); 2566 } 2567 2568 path->slots[0]++; 2569 goto next_slot; 2570 } 2571 ret = 0; 2572 error: 2573 btrfs_free_path(path); 2574 return ret; 2575 } 2576 2577 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2578 { 2579 struct btrfs_root *root = fs_info->dev_root; 2580 struct request_queue *q; 2581 struct btrfs_trans_handle *trans; 2582 struct btrfs_device *device; 2583 struct block_device *bdev; 2584 struct super_block *sb = fs_info->sb; 2585 struct rcu_string *name; 2586 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2587 u64 orig_super_total_bytes; 2588 u64 orig_super_num_devices; 2589 int seeding_dev = 0; 2590 int ret = 0; 2591 bool locked = false; 2592 2593 if (sb_rdonly(sb) && !fs_devices->seeding) 2594 return -EROFS; 2595 2596 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2597 fs_info->bdev_holder); 2598 if (IS_ERR(bdev)) 2599 return PTR_ERR(bdev); 2600 2601 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2602 ret = -EINVAL; 2603 goto error; 2604 } 2605 2606 if (fs_devices->seeding) { 2607 seeding_dev = 1; 2608 down_write(&sb->s_umount); 2609 mutex_lock(&uuid_mutex); 2610 locked = true; 2611 } 2612 2613 sync_blockdev(bdev); 2614 2615 rcu_read_lock(); 2616 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2617 if (device->bdev == bdev) { 2618 ret = -EEXIST; 2619 rcu_read_unlock(); 2620 goto error; 2621 } 2622 } 2623 rcu_read_unlock(); 2624 2625 device = btrfs_alloc_device(fs_info, NULL, NULL); 2626 if (IS_ERR(device)) { 2627 /* we can safely leave the fs_devices entry around */ 2628 ret = PTR_ERR(device); 2629 goto error; 2630 } 2631 2632 name = rcu_string_strdup(device_path, GFP_KERNEL); 2633 if (!name) { 2634 ret = -ENOMEM; 2635 goto error_free_device; 2636 } 2637 rcu_assign_pointer(device->name, name); 2638 2639 device->fs_info = fs_info; 2640 device->bdev = bdev; 2641 2642 ret = btrfs_get_dev_zone_info(device); 2643 if (ret) 2644 goto error_free_device; 2645 2646 trans = btrfs_start_transaction(root, 0); 2647 if (IS_ERR(trans)) { 2648 ret = PTR_ERR(trans); 2649 goto error_free_zone; 2650 } 2651 2652 q = bdev_get_queue(bdev); 2653 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2654 device->generation = trans->transid; 2655 device->io_width = fs_info->sectorsize; 2656 device->io_align = fs_info->sectorsize; 2657 device->sector_size = fs_info->sectorsize; 2658 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2659 fs_info->sectorsize); 2660 device->disk_total_bytes = device->total_bytes; 2661 device->commit_total_bytes = device->total_bytes; 2662 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2663 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2664 device->mode = FMODE_EXCL; 2665 device->dev_stats_valid = 1; 2666 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2667 2668 if (seeding_dev) { 2669 btrfs_clear_sb_rdonly(sb); 2670 ret = btrfs_prepare_sprout(fs_info); 2671 if (ret) { 2672 btrfs_abort_transaction(trans, ret); 2673 goto error_trans; 2674 } 2675 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2676 device); 2677 } 2678 2679 device->fs_devices = fs_devices; 2680 2681 mutex_lock(&fs_devices->device_list_mutex); 2682 mutex_lock(&fs_info->chunk_mutex); 2683 list_add_rcu(&device->dev_list, &fs_devices->devices); 2684 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2685 fs_devices->num_devices++; 2686 fs_devices->open_devices++; 2687 fs_devices->rw_devices++; 2688 fs_devices->total_devices++; 2689 fs_devices->total_rw_bytes += device->total_bytes; 2690 2691 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2692 2693 if (!blk_queue_nonrot(q)) 2694 fs_devices->rotating = true; 2695 2696 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2697 btrfs_set_super_total_bytes(fs_info->super_copy, 2698 round_down(orig_super_total_bytes + device->total_bytes, 2699 fs_info->sectorsize)); 2700 2701 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2702 btrfs_set_super_num_devices(fs_info->super_copy, 2703 orig_super_num_devices + 1); 2704 2705 /* 2706 * we've got more storage, clear any full flags on the space 2707 * infos 2708 */ 2709 btrfs_clear_space_info_full(fs_info); 2710 2711 mutex_unlock(&fs_info->chunk_mutex); 2712 2713 /* Add sysfs device entry */ 2714 btrfs_sysfs_add_device(device); 2715 2716 mutex_unlock(&fs_devices->device_list_mutex); 2717 2718 if (seeding_dev) { 2719 mutex_lock(&fs_info->chunk_mutex); 2720 ret = init_first_rw_device(trans); 2721 mutex_unlock(&fs_info->chunk_mutex); 2722 if (ret) { 2723 btrfs_abort_transaction(trans, ret); 2724 goto error_sysfs; 2725 } 2726 } 2727 2728 ret = btrfs_add_dev_item(trans, device); 2729 if (ret) { 2730 btrfs_abort_transaction(trans, ret); 2731 goto error_sysfs; 2732 } 2733 2734 if (seeding_dev) { 2735 ret = btrfs_finish_sprout(trans); 2736 if (ret) { 2737 btrfs_abort_transaction(trans, ret); 2738 goto error_sysfs; 2739 } 2740 2741 /* 2742 * fs_devices now represents the newly sprouted filesystem and 2743 * its fsid has been changed by btrfs_prepare_sprout 2744 */ 2745 btrfs_sysfs_update_sprout_fsid(fs_devices); 2746 } 2747 2748 ret = btrfs_commit_transaction(trans); 2749 2750 if (seeding_dev) { 2751 mutex_unlock(&uuid_mutex); 2752 up_write(&sb->s_umount); 2753 locked = false; 2754 2755 if (ret) /* transaction commit */ 2756 return ret; 2757 2758 ret = btrfs_relocate_sys_chunks(fs_info); 2759 if (ret < 0) 2760 btrfs_handle_fs_error(fs_info, ret, 2761 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2762 trans = btrfs_attach_transaction(root); 2763 if (IS_ERR(trans)) { 2764 if (PTR_ERR(trans) == -ENOENT) 2765 return 0; 2766 ret = PTR_ERR(trans); 2767 trans = NULL; 2768 goto error_sysfs; 2769 } 2770 ret = btrfs_commit_transaction(trans); 2771 } 2772 2773 /* 2774 * Now that we have written a new super block to this device, check all 2775 * other fs_devices list if device_path alienates any other scanned 2776 * device. 2777 * We can ignore the return value as it typically returns -EINVAL and 2778 * only succeeds if the device was an alien. 2779 */ 2780 btrfs_forget_devices(device_path); 2781 2782 /* Update ctime/mtime for blkid or udev */ 2783 update_dev_time(device_path); 2784 2785 return ret; 2786 2787 error_sysfs: 2788 btrfs_sysfs_remove_device(device); 2789 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2790 mutex_lock(&fs_info->chunk_mutex); 2791 list_del_rcu(&device->dev_list); 2792 list_del(&device->dev_alloc_list); 2793 fs_info->fs_devices->num_devices--; 2794 fs_info->fs_devices->open_devices--; 2795 fs_info->fs_devices->rw_devices--; 2796 fs_info->fs_devices->total_devices--; 2797 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2798 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2799 btrfs_set_super_total_bytes(fs_info->super_copy, 2800 orig_super_total_bytes); 2801 btrfs_set_super_num_devices(fs_info->super_copy, 2802 orig_super_num_devices); 2803 mutex_unlock(&fs_info->chunk_mutex); 2804 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2805 error_trans: 2806 if (seeding_dev) 2807 btrfs_set_sb_rdonly(sb); 2808 if (trans) 2809 btrfs_end_transaction(trans); 2810 error_free_zone: 2811 btrfs_destroy_dev_zone_info(device); 2812 error_free_device: 2813 btrfs_free_device(device); 2814 error: 2815 blkdev_put(bdev, FMODE_EXCL); 2816 if (locked) { 2817 mutex_unlock(&uuid_mutex); 2818 up_write(&sb->s_umount); 2819 } 2820 return ret; 2821 } 2822 2823 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2824 struct btrfs_device *device) 2825 { 2826 int ret; 2827 struct btrfs_path *path; 2828 struct btrfs_root *root = device->fs_info->chunk_root; 2829 struct btrfs_dev_item *dev_item; 2830 struct extent_buffer *leaf; 2831 struct btrfs_key key; 2832 2833 path = btrfs_alloc_path(); 2834 if (!path) 2835 return -ENOMEM; 2836 2837 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2838 key.type = BTRFS_DEV_ITEM_KEY; 2839 key.offset = device->devid; 2840 2841 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2842 if (ret < 0) 2843 goto out; 2844 2845 if (ret > 0) { 2846 ret = -ENOENT; 2847 goto out; 2848 } 2849 2850 leaf = path->nodes[0]; 2851 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2852 2853 btrfs_set_device_id(leaf, dev_item, device->devid); 2854 btrfs_set_device_type(leaf, dev_item, device->type); 2855 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2856 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2857 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2858 btrfs_set_device_total_bytes(leaf, dev_item, 2859 btrfs_device_get_disk_total_bytes(device)); 2860 btrfs_set_device_bytes_used(leaf, dev_item, 2861 btrfs_device_get_bytes_used(device)); 2862 btrfs_mark_buffer_dirty(leaf); 2863 2864 out: 2865 btrfs_free_path(path); 2866 return ret; 2867 } 2868 2869 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2870 struct btrfs_device *device, u64 new_size) 2871 { 2872 struct btrfs_fs_info *fs_info = device->fs_info; 2873 struct btrfs_super_block *super_copy = fs_info->super_copy; 2874 u64 old_total; 2875 u64 diff; 2876 int ret; 2877 2878 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2879 return -EACCES; 2880 2881 new_size = round_down(new_size, fs_info->sectorsize); 2882 2883 mutex_lock(&fs_info->chunk_mutex); 2884 old_total = btrfs_super_total_bytes(super_copy); 2885 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2886 2887 if (new_size <= device->total_bytes || 2888 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2889 mutex_unlock(&fs_info->chunk_mutex); 2890 return -EINVAL; 2891 } 2892 2893 btrfs_set_super_total_bytes(super_copy, 2894 round_down(old_total + diff, fs_info->sectorsize)); 2895 device->fs_devices->total_rw_bytes += diff; 2896 2897 btrfs_device_set_total_bytes(device, new_size); 2898 btrfs_device_set_disk_total_bytes(device, new_size); 2899 btrfs_clear_space_info_full(device->fs_info); 2900 if (list_empty(&device->post_commit_list)) 2901 list_add_tail(&device->post_commit_list, 2902 &trans->transaction->dev_update_list); 2903 mutex_unlock(&fs_info->chunk_mutex); 2904 2905 btrfs_reserve_chunk_metadata(trans, false); 2906 ret = btrfs_update_device(trans, device); 2907 btrfs_trans_release_chunk_metadata(trans); 2908 2909 return ret; 2910 } 2911 2912 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2913 { 2914 struct btrfs_fs_info *fs_info = trans->fs_info; 2915 struct btrfs_root *root = fs_info->chunk_root; 2916 int ret; 2917 struct btrfs_path *path; 2918 struct btrfs_key key; 2919 2920 path = btrfs_alloc_path(); 2921 if (!path) 2922 return -ENOMEM; 2923 2924 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2925 key.offset = chunk_offset; 2926 key.type = BTRFS_CHUNK_ITEM_KEY; 2927 2928 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2929 if (ret < 0) 2930 goto out; 2931 else if (ret > 0) { /* Logic error or corruption */ 2932 btrfs_handle_fs_error(fs_info, -ENOENT, 2933 "Failed lookup while freeing chunk."); 2934 ret = -ENOENT; 2935 goto out; 2936 } 2937 2938 ret = btrfs_del_item(trans, root, path); 2939 if (ret < 0) 2940 btrfs_handle_fs_error(fs_info, ret, 2941 "Failed to delete chunk item."); 2942 out: 2943 btrfs_free_path(path); 2944 return ret; 2945 } 2946 2947 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2948 { 2949 struct btrfs_super_block *super_copy = fs_info->super_copy; 2950 struct btrfs_disk_key *disk_key; 2951 struct btrfs_chunk *chunk; 2952 u8 *ptr; 2953 int ret = 0; 2954 u32 num_stripes; 2955 u32 array_size; 2956 u32 len = 0; 2957 u32 cur; 2958 struct btrfs_key key; 2959 2960 lockdep_assert_held(&fs_info->chunk_mutex); 2961 array_size = btrfs_super_sys_array_size(super_copy); 2962 2963 ptr = super_copy->sys_chunk_array; 2964 cur = 0; 2965 2966 while (cur < array_size) { 2967 disk_key = (struct btrfs_disk_key *)ptr; 2968 btrfs_disk_key_to_cpu(&key, disk_key); 2969 2970 len = sizeof(*disk_key); 2971 2972 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2973 chunk = (struct btrfs_chunk *)(ptr + len); 2974 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2975 len += btrfs_chunk_item_size(num_stripes); 2976 } else { 2977 ret = -EIO; 2978 break; 2979 } 2980 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2981 key.offset == chunk_offset) { 2982 memmove(ptr, ptr + len, array_size - (cur + len)); 2983 array_size -= len; 2984 btrfs_set_super_sys_array_size(super_copy, array_size); 2985 } else { 2986 ptr += len; 2987 cur += len; 2988 } 2989 } 2990 return ret; 2991 } 2992 2993 /* 2994 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2995 * @logical: Logical block offset in bytes. 2996 * @length: Length of extent in bytes. 2997 * 2998 * Return: Chunk mapping or ERR_PTR. 2999 */ 3000 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3001 u64 logical, u64 length) 3002 { 3003 struct extent_map_tree *em_tree; 3004 struct extent_map *em; 3005 3006 em_tree = &fs_info->mapping_tree; 3007 read_lock(&em_tree->lock); 3008 em = lookup_extent_mapping(em_tree, logical, length); 3009 read_unlock(&em_tree->lock); 3010 3011 if (!em) { 3012 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3013 logical, length); 3014 return ERR_PTR(-EINVAL); 3015 } 3016 3017 if (em->start > logical || em->start + em->len < logical) { 3018 btrfs_crit(fs_info, 3019 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3020 logical, length, em->start, em->start + em->len); 3021 free_extent_map(em); 3022 return ERR_PTR(-EINVAL); 3023 } 3024 3025 /* callers are responsible for dropping em's ref. */ 3026 return em; 3027 } 3028 3029 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3030 struct map_lookup *map, u64 chunk_offset) 3031 { 3032 int i; 3033 3034 /* 3035 * Removing chunk items and updating the device items in the chunks btree 3036 * requires holding the chunk_mutex. 3037 * See the comment at btrfs_chunk_alloc() for the details. 3038 */ 3039 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3040 3041 for (i = 0; i < map->num_stripes; i++) { 3042 int ret; 3043 3044 ret = btrfs_update_device(trans, map->stripes[i].dev); 3045 if (ret) 3046 return ret; 3047 } 3048 3049 return btrfs_free_chunk(trans, chunk_offset); 3050 } 3051 3052 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3053 { 3054 struct btrfs_fs_info *fs_info = trans->fs_info; 3055 struct extent_map *em; 3056 struct map_lookup *map; 3057 u64 dev_extent_len = 0; 3058 int i, ret = 0; 3059 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3060 3061 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3062 if (IS_ERR(em)) { 3063 /* 3064 * This is a logic error, but we don't want to just rely on the 3065 * user having built with ASSERT enabled, so if ASSERT doesn't 3066 * do anything we still error out. 3067 */ 3068 ASSERT(0); 3069 return PTR_ERR(em); 3070 } 3071 map = em->map_lookup; 3072 3073 /* 3074 * First delete the device extent items from the devices btree. 3075 * We take the device_list_mutex to avoid racing with the finishing phase 3076 * of a device replace operation. See the comment below before acquiring 3077 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3078 * because that can result in a deadlock when deleting the device extent 3079 * items from the devices btree - COWing an extent buffer from the btree 3080 * may result in allocating a new metadata chunk, which would attempt to 3081 * lock again fs_info->chunk_mutex. 3082 */ 3083 mutex_lock(&fs_devices->device_list_mutex); 3084 for (i = 0; i < map->num_stripes; i++) { 3085 struct btrfs_device *device = map->stripes[i].dev; 3086 ret = btrfs_free_dev_extent(trans, device, 3087 map->stripes[i].physical, 3088 &dev_extent_len); 3089 if (ret) { 3090 mutex_unlock(&fs_devices->device_list_mutex); 3091 btrfs_abort_transaction(trans, ret); 3092 goto out; 3093 } 3094 3095 if (device->bytes_used > 0) { 3096 mutex_lock(&fs_info->chunk_mutex); 3097 btrfs_device_set_bytes_used(device, 3098 device->bytes_used - dev_extent_len); 3099 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3100 btrfs_clear_space_info_full(fs_info); 3101 mutex_unlock(&fs_info->chunk_mutex); 3102 } 3103 } 3104 mutex_unlock(&fs_devices->device_list_mutex); 3105 3106 /* 3107 * We acquire fs_info->chunk_mutex for 2 reasons: 3108 * 3109 * 1) Just like with the first phase of the chunk allocation, we must 3110 * reserve system space, do all chunk btree updates and deletions, and 3111 * update the system chunk array in the superblock while holding this 3112 * mutex. This is for similar reasons as explained on the comment at 3113 * the top of btrfs_chunk_alloc(); 3114 * 3115 * 2) Prevent races with the final phase of a device replace operation 3116 * that replaces the device object associated with the map's stripes, 3117 * because the device object's id can change at any time during that 3118 * final phase of the device replace operation 3119 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3120 * replaced device and then see it with an ID of 3121 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3122 * the device item, which does not exists on the chunk btree. 3123 * The finishing phase of device replace acquires both the 3124 * device_list_mutex and the chunk_mutex, in that order, so we are 3125 * safe by just acquiring the chunk_mutex. 3126 */ 3127 trans->removing_chunk = true; 3128 mutex_lock(&fs_info->chunk_mutex); 3129 3130 check_system_chunk(trans, map->type); 3131 3132 ret = remove_chunk_item(trans, map, chunk_offset); 3133 /* 3134 * Normally we should not get -ENOSPC since we reserved space before 3135 * through the call to check_system_chunk(). 3136 * 3137 * Despite our system space_info having enough free space, we may not 3138 * be able to allocate extents from its block groups, because all have 3139 * an incompatible profile, which will force us to allocate a new system 3140 * block group with the right profile, or right after we called 3141 * check_system_space() above, a scrub turned the only system block group 3142 * with enough free space into RO mode. 3143 * This is explained with more detail at do_chunk_alloc(). 3144 * 3145 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3146 */ 3147 if (ret == -ENOSPC) { 3148 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3149 struct btrfs_block_group *sys_bg; 3150 3151 sys_bg = btrfs_create_chunk(trans, sys_flags); 3152 if (IS_ERR(sys_bg)) { 3153 ret = PTR_ERR(sys_bg); 3154 btrfs_abort_transaction(trans, ret); 3155 goto out; 3156 } 3157 3158 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3159 if (ret) { 3160 btrfs_abort_transaction(trans, ret); 3161 goto out; 3162 } 3163 3164 ret = remove_chunk_item(trans, map, chunk_offset); 3165 if (ret) { 3166 btrfs_abort_transaction(trans, ret); 3167 goto out; 3168 } 3169 } else if (ret) { 3170 btrfs_abort_transaction(trans, ret); 3171 goto out; 3172 } 3173 3174 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3175 3176 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3177 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3178 if (ret) { 3179 btrfs_abort_transaction(trans, ret); 3180 goto out; 3181 } 3182 } 3183 3184 mutex_unlock(&fs_info->chunk_mutex); 3185 trans->removing_chunk = false; 3186 3187 /* 3188 * We are done with chunk btree updates and deletions, so release the 3189 * system space we previously reserved (with check_system_chunk()). 3190 */ 3191 btrfs_trans_release_chunk_metadata(trans); 3192 3193 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3194 if (ret) { 3195 btrfs_abort_transaction(trans, ret); 3196 goto out; 3197 } 3198 3199 out: 3200 if (trans->removing_chunk) { 3201 mutex_unlock(&fs_info->chunk_mutex); 3202 trans->removing_chunk = false; 3203 } 3204 /* once for us */ 3205 free_extent_map(em); 3206 return ret; 3207 } 3208 3209 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3210 { 3211 struct btrfs_root *root = fs_info->chunk_root; 3212 struct btrfs_trans_handle *trans; 3213 struct btrfs_block_group *block_group; 3214 u64 length; 3215 int ret; 3216 3217 /* 3218 * Prevent races with automatic removal of unused block groups. 3219 * After we relocate and before we remove the chunk with offset 3220 * chunk_offset, automatic removal of the block group can kick in, 3221 * resulting in a failure when calling btrfs_remove_chunk() below. 3222 * 3223 * Make sure to acquire this mutex before doing a tree search (dev 3224 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3225 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3226 * we release the path used to search the chunk/dev tree and before 3227 * the current task acquires this mutex and calls us. 3228 */ 3229 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3230 3231 /* step one, relocate all the extents inside this chunk */ 3232 btrfs_scrub_pause(fs_info); 3233 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3234 btrfs_scrub_continue(fs_info); 3235 if (ret) 3236 return ret; 3237 3238 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3239 if (!block_group) 3240 return -ENOENT; 3241 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3242 length = block_group->length; 3243 btrfs_put_block_group(block_group); 3244 3245 /* 3246 * On a zoned file system, discard the whole block group, this will 3247 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3248 * resetting the zone fails, don't treat it as a fatal problem from the 3249 * filesystem's point of view. 3250 */ 3251 if (btrfs_is_zoned(fs_info)) { 3252 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3253 if (ret) 3254 btrfs_info(fs_info, 3255 "failed to reset zone %llu after relocation", 3256 chunk_offset); 3257 } 3258 3259 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3260 chunk_offset); 3261 if (IS_ERR(trans)) { 3262 ret = PTR_ERR(trans); 3263 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3264 return ret; 3265 } 3266 3267 /* 3268 * step two, delete the device extents and the 3269 * chunk tree entries 3270 */ 3271 ret = btrfs_remove_chunk(trans, chunk_offset); 3272 btrfs_end_transaction(trans); 3273 return ret; 3274 } 3275 3276 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3277 { 3278 struct btrfs_root *chunk_root = fs_info->chunk_root; 3279 struct btrfs_path *path; 3280 struct extent_buffer *leaf; 3281 struct btrfs_chunk *chunk; 3282 struct btrfs_key key; 3283 struct btrfs_key found_key; 3284 u64 chunk_type; 3285 bool retried = false; 3286 int failed = 0; 3287 int ret; 3288 3289 path = btrfs_alloc_path(); 3290 if (!path) 3291 return -ENOMEM; 3292 3293 again: 3294 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3295 key.offset = (u64)-1; 3296 key.type = BTRFS_CHUNK_ITEM_KEY; 3297 3298 while (1) { 3299 mutex_lock(&fs_info->reclaim_bgs_lock); 3300 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3301 if (ret < 0) { 3302 mutex_unlock(&fs_info->reclaim_bgs_lock); 3303 goto error; 3304 } 3305 BUG_ON(ret == 0); /* Corruption */ 3306 3307 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3308 key.type); 3309 if (ret) 3310 mutex_unlock(&fs_info->reclaim_bgs_lock); 3311 if (ret < 0) 3312 goto error; 3313 if (ret > 0) 3314 break; 3315 3316 leaf = path->nodes[0]; 3317 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3318 3319 chunk = btrfs_item_ptr(leaf, path->slots[0], 3320 struct btrfs_chunk); 3321 chunk_type = btrfs_chunk_type(leaf, chunk); 3322 btrfs_release_path(path); 3323 3324 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3325 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3326 if (ret == -ENOSPC) 3327 failed++; 3328 else 3329 BUG_ON(ret); 3330 } 3331 mutex_unlock(&fs_info->reclaim_bgs_lock); 3332 3333 if (found_key.offset == 0) 3334 break; 3335 key.offset = found_key.offset - 1; 3336 } 3337 ret = 0; 3338 if (failed && !retried) { 3339 failed = 0; 3340 retried = true; 3341 goto again; 3342 } else if (WARN_ON(failed && retried)) { 3343 ret = -ENOSPC; 3344 } 3345 error: 3346 btrfs_free_path(path); 3347 return ret; 3348 } 3349 3350 /* 3351 * return 1 : allocate a data chunk successfully, 3352 * return <0: errors during allocating a data chunk, 3353 * return 0 : no need to allocate a data chunk. 3354 */ 3355 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3356 u64 chunk_offset) 3357 { 3358 struct btrfs_block_group *cache; 3359 u64 bytes_used; 3360 u64 chunk_type; 3361 3362 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3363 ASSERT(cache); 3364 chunk_type = cache->flags; 3365 btrfs_put_block_group(cache); 3366 3367 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3368 return 0; 3369 3370 spin_lock(&fs_info->data_sinfo->lock); 3371 bytes_used = fs_info->data_sinfo->bytes_used; 3372 spin_unlock(&fs_info->data_sinfo->lock); 3373 3374 if (!bytes_used) { 3375 struct btrfs_trans_handle *trans; 3376 int ret; 3377 3378 trans = btrfs_join_transaction(fs_info->tree_root); 3379 if (IS_ERR(trans)) 3380 return PTR_ERR(trans); 3381 3382 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3383 btrfs_end_transaction(trans); 3384 if (ret < 0) 3385 return ret; 3386 return 1; 3387 } 3388 3389 return 0; 3390 } 3391 3392 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3393 struct btrfs_balance_control *bctl) 3394 { 3395 struct btrfs_root *root = fs_info->tree_root; 3396 struct btrfs_trans_handle *trans; 3397 struct btrfs_balance_item *item; 3398 struct btrfs_disk_balance_args disk_bargs; 3399 struct btrfs_path *path; 3400 struct extent_buffer *leaf; 3401 struct btrfs_key key; 3402 int ret, err; 3403 3404 path = btrfs_alloc_path(); 3405 if (!path) 3406 return -ENOMEM; 3407 3408 trans = btrfs_start_transaction(root, 0); 3409 if (IS_ERR(trans)) { 3410 btrfs_free_path(path); 3411 return PTR_ERR(trans); 3412 } 3413 3414 key.objectid = BTRFS_BALANCE_OBJECTID; 3415 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3416 key.offset = 0; 3417 3418 ret = btrfs_insert_empty_item(trans, root, path, &key, 3419 sizeof(*item)); 3420 if (ret) 3421 goto out; 3422 3423 leaf = path->nodes[0]; 3424 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3425 3426 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3427 3428 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3429 btrfs_set_balance_data(leaf, item, &disk_bargs); 3430 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3431 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3432 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3433 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3434 3435 btrfs_set_balance_flags(leaf, item, bctl->flags); 3436 3437 btrfs_mark_buffer_dirty(leaf); 3438 out: 3439 btrfs_free_path(path); 3440 err = btrfs_commit_transaction(trans); 3441 if (err && !ret) 3442 ret = err; 3443 return ret; 3444 } 3445 3446 static int del_balance_item(struct btrfs_fs_info *fs_info) 3447 { 3448 struct btrfs_root *root = fs_info->tree_root; 3449 struct btrfs_trans_handle *trans; 3450 struct btrfs_path *path; 3451 struct btrfs_key key; 3452 int ret, err; 3453 3454 path = btrfs_alloc_path(); 3455 if (!path) 3456 return -ENOMEM; 3457 3458 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3459 if (IS_ERR(trans)) { 3460 btrfs_free_path(path); 3461 return PTR_ERR(trans); 3462 } 3463 3464 key.objectid = BTRFS_BALANCE_OBJECTID; 3465 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3466 key.offset = 0; 3467 3468 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3469 if (ret < 0) 3470 goto out; 3471 if (ret > 0) { 3472 ret = -ENOENT; 3473 goto out; 3474 } 3475 3476 ret = btrfs_del_item(trans, root, path); 3477 out: 3478 btrfs_free_path(path); 3479 err = btrfs_commit_transaction(trans); 3480 if (err && !ret) 3481 ret = err; 3482 return ret; 3483 } 3484 3485 /* 3486 * This is a heuristic used to reduce the number of chunks balanced on 3487 * resume after balance was interrupted. 3488 */ 3489 static void update_balance_args(struct btrfs_balance_control *bctl) 3490 { 3491 /* 3492 * Turn on soft mode for chunk types that were being converted. 3493 */ 3494 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3495 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3496 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3497 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3498 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3499 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3500 3501 /* 3502 * Turn on usage filter if is not already used. The idea is 3503 * that chunks that we have already balanced should be 3504 * reasonably full. Don't do it for chunks that are being 3505 * converted - that will keep us from relocating unconverted 3506 * (albeit full) chunks. 3507 */ 3508 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3509 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3510 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3511 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3512 bctl->data.usage = 90; 3513 } 3514 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3515 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3516 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3517 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3518 bctl->sys.usage = 90; 3519 } 3520 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3521 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3522 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3523 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3524 bctl->meta.usage = 90; 3525 } 3526 } 3527 3528 /* 3529 * Clear the balance status in fs_info and delete the balance item from disk. 3530 */ 3531 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3532 { 3533 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3534 int ret; 3535 3536 BUG_ON(!fs_info->balance_ctl); 3537 3538 spin_lock(&fs_info->balance_lock); 3539 fs_info->balance_ctl = NULL; 3540 spin_unlock(&fs_info->balance_lock); 3541 3542 kfree(bctl); 3543 ret = del_balance_item(fs_info); 3544 if (ret) 3545 btrfs_handle_fs_error(fs_info, ret, NULL); 3546 } 3547 3548 /* 3549 * Balance filters. Return 1 if chunk should be filtered out 3550 * (should not be balanced). 3551 */ 3552 static int chunk_profiles_filter(u64 chunk_type, 3553 struct btrfs_balance_args *bargs) 3554 { 3555 chunk_type = chunk_to_extended(chunk_type) & 3556 BTRFS_EXTENDED_PROFILE_MASK; 3557 3558 if (bargs->profiles & chunk_type) 3559 return 0; 3560 3561 return 1; 3562 } 3563 3564 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3565 struct btrfs_balance_args *bargs) 3566 { 3567 struct btrfs_block_group *cache; 3568 u64 chunk_used; 3569 u64 user_thresh_min; 3570 u64 user_thresh_max; 3571 int ret = 1; 3572 3573 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3574 chunk_used = cache->used; 3575 3576 if (bargs->usage_min == 0) 3577 user_thresh_min = 0; 3578 else 3579 user_thresh_min = div_factor_fine(cache->length, 3580 bargs->usage_min); 3581 3582 if (bargs->usage_max == 0) 3583 user_thresh_max = 1; 3584 else if (bargs->usage_max > 100) 3585 user_thresh_max = cache->length; 3586 else 3587 user_thresh_max = div_factor_fine(cache->length, 3588 bargs->usage_max); 3589 3590 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3591 ret = 0; 3592 3593 btrfs_put_block_group(cache); 3594 return ret; 3595 } 3596 3597 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3598 u64 chunk_offset, struct btrfs_balance_args *bargs) 3599 { 3600 struct btrfs_block_group *cache; 3601 u64 chunk_used, user_thresh; 3602 int ret = 1; 3603 3604 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3605 chunk_used = cache->used; 3606 3607 if (bargs->usage_min == 0) 3608 user_thresh = 1; 3609 else if (bargs->usage > 100) 3610 user_thresh = cache->length; 3611 else 3612 user_thresh = div_factor_fine(cache->length, bargs->usage); 3613 3614 if (chunk_used < user_thresh) 3615 ret = 0; 3616 3617 btrfs_put_block_group(cache); 3618 return ret; 3619 } 3620 3621 static int chunk_devid_filter(struct extent_buffer *leaf, 3622 struct btrfs_chunk *chunk, 3623 struct btrfs_balance_args *bargs) 3624 { 3625 struct btrfs_stripe *stripe; 3626 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3627 int i; 3628 3629 for (i = 0; i < num_stripes; i++) { 3630 stripe = btrfs_stripe_nr(chunk, i); 3631 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3632 return 0; 3633 } 3634 3635 return 1; 3636 } 3637 3638 static u64 calc_data_stripes(u64 type, int num_stripes) 3639 { 3640 const int index = btrfs_bg_flags_to_raid_index(type); 3641 const int ncopies = btrfs_raid_array[index].ncopies; 3642 const int nparity = btrfs_raid_array[index].nparity; 3643 3644 return (num_stripes - nparity) / ncopies; 3645 } 3646 3647 /* [pstart, pend) */ 3648 static int chunk_drange_filter(struct extent_buffer *leaf, 3649 struct btrfs_chunk *chunk, 3650 struct btrfs_balance_args *bargs) 3651 { 3652 struct btrfs_stripe *stripe; 3653 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3654 u64 stripe_offset; 3655 u64 stripe_length; 3656 u64 type; 3657 int factor; 3658 int i; 3659 3660 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3661 return 0; 3662 3663 type = btrfs_chunk_type(leaf, chunk); 3664 factor = calc_data_stripes(type, num_stripes); 3665 3666 for (i = 0; i < num_stripes; i++) { 3667 stripe = btrfs_stripe_nr(chunk, i); 3668 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3669 continue; 3670 3671 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3672 stripe_length = btrfs_chunk_length(leaf, chunk); 3673 stripe_length = div_u64(stripe_length, factor); 3674 3675 if (stripe_offset < bargs->pend && 3676 stripe_offset + stripe_length > bargs->pstart) 3677 return 0; 3678 } 3679 3680 return 1; 3681 } 3682 3683 /* [vstart, vend) */ 3684 static int chunk_vrange_filter(struct extent_buffer *leaf, 3685 struct btrfs_chunk *chunk, 3686 u64 chunk_offset, 3687 struct btrfs_balance_args *bargs) 3688 { 3689 if (chunk_offset < bargs->vend && 3690 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3691 /* at least part of the chunk is inside this vrange */ 3692 return 0; 3693 3694 return 1; 3695 } 3696 3697 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3698 struct btrfs_chunk *chunk, 3699 struct btrfs_balance_args *bargs) 3700 { 3701 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3702 3703 if (bargs->stripes_min <= num_stripes 3704 && num_stripes <= bargs->stripes_max) 3705 return 0; 3706 3707 return 1; 3708 } 3709 3710 static int chunk_soft_convert_filter(u64 chunk_type, 3711 struct btrfs_balance_args *bargs) 3712 { 3713 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3714 return 0; 3715 3716 chunk_type = chunk_to_extended(chunk_type) & 3717 BTRFS_EXTENDED_PROFILE_MASK; 3718 3719 if (bargs->target == chunk_type) 3720 return 1; 3721 3722 return 0; 3723 } 3724 3725 static int should_balance_chunk(struct extent_buffer *leaf, 3726 struct btrfs_chunk *chunk, u64 chunk_offset) 3727 { 3728 struct btrfs_fs_info *fs_info = leaf->fs_info; 3729 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3730 struct btrfs_balance_args *bargs = NULL; 3731 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3732 3733 /* type filter */ 3734 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3735 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3736 return 0; 3737 } 3738 3739 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3740 bargs = &bctl->data; 3741 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3742 bargs = &bctl->sys; 3743 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3744 bargs = &bctl->meta; 3745 3746 /* profiles filter */ 3747 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3748 chunk_profiles_filter(chunk_type, bargs)) { 3749 return 0; 3750 } 3751 3752 /* usage filter */ 3753 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3754 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3755 return 0; 3756 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3757 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3758 return 0; 3759 } 3760 3761 /* devid filter */ 3762 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3763 chunk_devid_filter(leaf, chunk, bargs)) { 3764 return 0; 3765 } 3766 3767 /* drange filter, makes sense only with devid filter */ 3768 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3769 chunk_drange_filter(leaf, chunk, bargs)) { 3770 return 0; 3771 } 3772 3773 /* vrange filter */ 3774 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3775 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3776 return 0; 3777 } 3778 3779 /* stripes filter */ 3780 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3781 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3782 return 0; 3783 } 3784 3785 /* soft profile changing mode */ 3786 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3787 chunk_soft_convert_filter(chunk_type, bargs)) { 3788 return 0; 3789 } 3790 3791 /* 3792 * limited by count, must be the last filter 3793 */ 3794 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3795 if (bargs->limit == 0) 3796 return 0; 3797 else 3798 bargs->limit--; 3799 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3800 /* 3801 * Same logic as the 'limit' filter; the minimum cannot be 3802 * determined here because we do not have the global information 3803 * about the count of all chunks that satisfy the filters. 3804 */ 3805 if (bargs->limit_max == 0) 3806 return 0; 3807 else 3808 bargs->limit_max--; 3809 } 3810 3811 return 1; 3812 } 3813 3814 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3815 { 3816 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3817 struct btrfs_root *chunk_root = fs_info->chunk_root; 3818 u64 chunk_type; 3819 struct btrfs_chunk *chunk; 3820 struct btrfs_path *path = NULL; 3821 struct btrfs_key key; 3822 struct btrfs_key found_key; 3823 struct extent_buffer *leaf; 3824 int slot; 3825 int ret; 3826 int enospc_errors = 0; 3827 bool counting = true; 3828 /* The single value limit and min/max limits use the same bytes in the */ 3829 u64 limit_data = bctl->data.limit; 3830 u64 limit_meta = bctl->meta.limit; 3831 u64 limit_sys = bctl->sys.limit; 3832 u32 count_data = 0; 3833 u32 count_meta = 0; 3834 u32 count_sys = 0; 3835 int chunk_reserved = 0; 3836 3837 path = btrfs_alloc_path(); 3838 if (!path) { 3839 ret = -ENOMEM; 3840 goto error; 3841 } 3842 3843 /* zero out stat counters */ 3844 spin_lock(&fs_info->balance_lock); 3845 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3846 spin_unlock(&fs_info->balance_lock); 3847 again: 3848 if (!counting) { 3849 /* 3850 * The single value limit and min/max limits use the same bytes 3851 * in the 3852 */ 3853 bctl->data.limit = limit_data; 3854 bctl->meta.limit = limit_meta; 3855 bctl->sys.limit = limit_sys; 3856 } 3857 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3858 key.offset = (u64)-1; 3859 key.type = BTRFS_CHUNK_ITEM_KEY; 3860 3861 while (1) { 3862 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3863 atomic_read(&fs_info->balance_cancel_req)) { 3864 ret = -ECANCELED; 3865 goto error; 3866 } 3867 3868 mutex_lock(&fs_info->reclaim_bgs_lock); 3869 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3870 if (ret < 0) { 3871 mutex_unlock(&fs_info->reclaim_bgs_lock); 3872 goto error; 3873 } 3874 3875 /* 3876 * this shouldn't happen, it means the last relocate 3877 * failed 3878 */ 3879 if (ret == 0) 3880 BUG(); /* FIXME break ? */ 3881 3882 ret = btrfs_previous_item(chunk_root, path, 0, 3883 BTRFS_CHUNK_ITEM_KEY); 3884 if (ret) { 3885 mutex_unlock(&fs_info->reclaim_bgs_lock); 3886 ret = 0; 3887 break; 3888 } 3889 3890 leaf = path->nodes[0]; 3891 slot = path->slots[0]; 3892 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3893 3894 if (found_key.objectid != key.objectid) { 3895 mutex_unlock(&fs_info->reclaim_bgs_lock); 3896 break; 3897 } 3898 3899 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3900 chunk_type = btrfs_chunk_type(leaf, chunk); 3901 3902 if (!counting) { 3903 spin_lock(&fs_info->balance_lock); 3904 bctl->stat.considered++; 3905 spin_unlock(&fs_info->balance_lock); 3906 } 3907 3908 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3909 3910 btrfs_release_path(path); 3911 if (!ret) { 3912 mutex_unlock(&fs_info->reclaim_bgs_lock); 3913 goto loop; 3914 } 3915 3916 if (counting) { 3917 mutex_unlock(&fs_info->reclaim_bgs_lock); 3918 spin_lock(&fs_info->balance_lock); 3919 bctl->stat.expected++; 3920 spin_unlock(&fs_info->balance_lock); 3921 3922 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3923 count_data++; 3924 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3925 count_sys++; 3926 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3927 count_meta++; 3928 3929 goto loop; 3930 } 3931 3932 /* 3933 * Apply limit_min filter, no need to check if the LIMITS 3934 * filter is used, limit_min is 0 by default 3935 */ 3936 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3937 count_data < bctl->data.limit_min) 3938 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3939 count_meta < bctl->meta.limit_min) 3940 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3941 count_sys < bctl->sys.limit_min)) { 3942 mutex_unlock(&fs_info->reclaim_bgs_lock); 3943 goto loop; 3944 } 3945 3946 if (!chunk_reserved) { 3947 /* 3948 * We may be relocating the only data chunk we have, 3949 * which could potentially end up with losing data's 3950 * raid profile, so lets allocate an empty one in 3951 * advance. 3952 */ 3953 ret = btrfs_may_alloc_data_chunk(fs_info, 3954 found_key.offset); 3955 if (ret < 0) { 3956 mutex_unlock(&fs_info->reclaim_bgs_lock); 3957 goto error; 3958 } else if (ret == 1) { 3959 chunk_reserved = 1; 3960 } 3961 } 3962 3963 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3964 mutex_unlock(&fs_info->reclaim_bgs_lock); 3965 if (ret == -ENOSPC) { 3966 enospc_errors++; 3967 } else if (ret == -ETXTBSY) { 3968 btrfs_info(fs_info, 3969 "skipping relocation of block group %llu due to active swapfile", 3970 found_key.offset); 3971 ret = 0; 3972 } else if (ret) { 3973 goto error; 3974 } else { 3975 spin_lock(&fs_info->balance_lock); 3976 bctl->stat.completed++; 3977 spin_unlock(&fs_info->balance_lock); 3978 } 3979 loop: 3980 if (found_key.offset == 0) 3981 break; 3982 key.offset = found_key.offset - 1; 3983 } 3984 3985 if (counting) { 3986 btrfs_release_path(path); 3987 counting = false; 3988 goto again; 3989 } 3990 error: 3991 btrfs_free_path(path); 3992 if (enospc_errors) { 3993 btrfs_info(fs_info, "%d enospc errors during balance", 3994 enospc_errors); 3995 if (!ret) 3996 ret = -ENOSPC; 3997 } 3998 3999 return ret; 4000 } 4001 4002 /** 4003 * alloc_profile_is_valid - see if a given profile is valid and reduced 4004 * @flags: profile to validate 4005 * @extended: if true @flags is treated as an extended profile 4006 */ 4007 static int alloc_profile_is_valid(u64 flags, int extended) 4008 { 4009 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4010 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4011 4012 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4013 4014 /* 1) check that all other bits are zeroed */ 4015 if (flags & ~mask) 4016 return 0; 4017 4018 /* 2) see if profile is reduced */ 4019 if (flags == 0) 4020 return !extended; /* "0" is valid for usual profiles */ 4021 4022 return has_single_bit_set(flags); 4023 } 4024 4025 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4026 { 4027 /* cancel requested || normal exit path */ 4028 return atomic_read(&fs_info->balance_cancel_req) || 4029 (atomic_read(&fs_info->balance_pause_req) == 0 && 4030 atomic_read(&fs_info->balance_cancel_req) == 0); 4031 } 4032 4033 /* 4034 * Validate target profile against allowed profiles and return true if it's OK. 4035 * Otherwise print the error message and return false. 4036 */ 4037 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4038 const struct btrfs_balance_args *bargs, 4039 u64 allowed, const char *type) 4040 { 4041 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4042 return true; 4043 4044 if (fs_info->sectorsize < PAGE_SIZE && 4045 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) { 4046 btrfs_err(fs_info, 4047 "RAID56 is not yet supported for sectorsize %u with page size %lu", 4048 fs_info->sectorsize, PAGE_SIZE); 4049 return false; 4050 } 4051 /* Profile is valid and does not have bits outside of the allowed set */ 4052 if (alloc_profile_is_valid(bargs->target, 1) && 4053 (bargs->target & ~allowed) == 0) 4054 return true; 4055 4056 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4057 type, btrfs_bg_type_to_raid_name(bargs->target)); 4058 return false; 4059 } 4060 4061 /* 4062 * Fill @buf with textual description of balance filter flags @bargs, up to 4063 * @size_buf including the terminating null. The output may be trimmed if it 4064 * does not fit into the provided buffer. 4065 */ 4066 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4067 u32 size_buf) 4068 { 4069 int ret; 4070 u32 size_bp = size_buf; 4071 char *bp = buf; 4072 u64 flags = bargs->flags; 4073 char tmp_buf[128] = {'\0'}; 4074 4075 if (!flags) 4076 return; 4077 4078 #define CHECK_APPEND_NOARG(a) \ 4079 do { \ 4080 ret = snprintf(bp, size_bp, (a)); \ 4081 if (ret < 0 || ret >= size_bp) \ 4082 goto out_overflow; \ 4083 size_bp -= ret; \ 4084 bp += ret; \ 4085 } while (0) 4086 4087 #define CHECK_APPEND_1ARG(a, v1) \ 4088 do { \ 4089 ret = snprintf(bp, size_bp, (a), (v1)); \ 4090 if (ret < 0 || ret >= size_bp) \ 4091 goto out_overflow; \ 4092 size_bp -= ret; \ 4093 bp += ret; \ 4094 } while (0) 4095 4096 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4097 do { \ 4098 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4099 if (ret < 0 || ret >= size_bp) \ 4100 goto out_overflow; \ 4101 size_bp -= ret; \ 4102 bp += ret; \ 4103 } while (0) 4104 4105 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4106 CHECK_APPEND_1ARG("convert=%s,", 4107 btrfs_bg_type_to_raid_name(bargs->target)); 4108 4109 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4110 CHECK_APPEND_NOARG("soft,"); 4111 4112 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4113 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4114 sizeof(tmp_buf)); 4115 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4116 } 4117 4118 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4119 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4120 4121 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4122 CHECK_APPEND_2ARG("usage=%u..%u,", 4123 bargs->usage_min, bargs->usage_max); 4124 4125 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4126 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4127 4128 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4129 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4130 bargs->pstart, bargs->pend); 4131 4132 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4133 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4134 bargs->vstart, bargs->vend); 4135 4136 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4137 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4138 4139 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4140 CHECK_APPEND_2ARG("limit=%u..%u,", 4141 bargs->limit_min, bargs->limit_max); 4142 4143 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4144 CHECK_APPEND_2ARG("stripes=%u..%u,", 4145 bargs->stripes_min, bargs->stripes_max); 4146 4147 #undef CHECK_APPEND_2ARG 4148 #undef CHECK_APPEND_1ARG 4149 #undef CHECK_APPEND_NOARG 4150 4151 out_overflow: 4152 4153 if (size_bp < size_buf) 4154 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4155 else 4156 buf[0] = '\0'; 4157 } 4158 4159 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4160 { 4161 u32 size_buf = 1024; 4162 char tmp_buf[192] = {'\0'}; 4163 char *buf; 4164 char *bp; 4165 u32 size_bp = size_buf; 4166 int ret; 4167 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4168 4169 buf = kzalloc(size_buf, GFP_KERNEL); 4170 if (!buf) 4171 return; 4172 4173 bp = buf; 4174 4175 #define CHECK_APPEND_1ARG(a, v1) \ 4176 do { \ 4177 ret = snprintf(bp, size_bp, (a), (v1)); \ 4178 if (ret < 0 || ret >= size_bp) \ 4179 goto out_overflow; \ 4180 size_bp -= ret; \ 4181 bp += ret; \ 4182 } while (0) 4183 4184 if (bctl->flags & BTRFS_BALANCE_FORCE) 4185 CHECK_APPEND_1ARG("%s", "-f "); 4186 4187 if (bctl->flags & BTRFS_BALANCE_DATA) { 4188 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4189 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4190 } 4191 4192 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4193 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4194 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4195 } 4196 4197 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4198 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4199 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4200 } 4201 4202 #undef CHECK_APPEND_1ARG 4203 4204 out_overflow: 4205 4206 if (size_bp < size_buf) 4207 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4208 btrfs_info(fs_info, "balance: %s %s", 4209 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4210 "resume" : "start", buf); 4211 4212 kfree(buf); 4213 } 4214 4215 /* 4216 * Should be called with balance mutexe held 4217 */ 4218 int btrfs_balance(struct btrfs_fs_info *fs_info, 4219 struct btrfs_balance_control *bctl, 4220 struct btrfs_ioctl_balance_args *bargs) 4221 { 4222 u64 meta_target, data_target; 4223 u64 allowed; 4224 int mixed = 0; 4225 int ret; 4226 u64 num_devices; 4227 unsigned seq; 4228 bool reducing_redundancy; 4229 int i; 4230 4231 if (btrfs_fs_closing(fs_info) || 4232 atomic_read(&fs_info->balance_pause_req) || 4233 btrfs_should_cancel_balance(fs_info)) { 4234 ret = -EINVAL; 4235 goto out; 4236 } 4237 4238 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4239 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4240 mixed = 1; 4241 4242 /* 4243 * In case of mixed groups both data and meta should be picked, 4244 * and identical options should be given for both of them. 4245 */ 4246 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4247 if (mixed && (bctl->flags & allowed)) { 4248 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4249 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4250 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4251 btrfs_err(fs_info, 4252 "balance: mixed groups data and metadata options must be the same"); 4253 ret = -EINVAL; 4254 goto out; 4255 } 4256 } 4257 4258 /* 4259 * rw_devices will not change at the moment, device add/delete/replace 4260 * are exclusive 4261 */ 4262 num_devices = fs_info->fs_devices->rw_devices; 4263 4264 /* 4265 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4266 * special bit for it, to make it easier to distinguish. Thus we need 4267 * to set it manually, or balance would refuse the profile. 4268 */ 4269 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4270 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4271 if (num_devices >= btrfs_raid_array[i].devs_min) 4272 allowed |= btrfs_raid_array[i].bg_flag; 4273 4274 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4275 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4276 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4277 ret = -EINVAL; 4278 goto out; 4279 } 4280 4281 /* 4282 * Allow to reduce metadata or system integrity only if force set for 4283 * profiles with redundancy (copies, parity) 4284 */ 4285 allowed = 0; 4286 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4287 if (btrfs_raid_array[i].ncopies >= 2 || 4288 btrfs_raid_array[i].tolerated_failures >= 1) 4289 allowed |= btrfs_raid_array[i].bg_flag; 4290 } 4291 do { 4292 seq = read_seqbegin(&fs_info->profiles_lock); 4293 4294 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4295 (fs_info->avail_system_alloc_bits & allowed) && 4296 !(bctl->sys.target & allowed)) || 4297 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4298 (fs_info->avail_metadata_alloc_bits & allowed) && 4299 !(bctl->meta.target & allowed))) 4300 reducing_redundancy = true; 4301 else 4302 reducing_redundancy = false; 4303 4304 /* if we're not converting, the target field is uninitialized */ 4305 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4306 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4307 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4308 bctl->data.target : fs_info->avail_data_alloc_bits; 4309 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4310 4311 if (reducing_redundancy) { 4312 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4313 btrfs_info(fs_info, 4314 "balance: force reducing metadata redundancy"); 4315 } else { 4316 btrfs_err(fs_info, 4317 "balance: reduces metadata redundancy, use --force if you want this"); 4318 ret = -EINVAL; 4319 goto out; 4320 } 4321 } 4322 4323 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4324 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4325 btrfs_warn(fs_info, 4326 "balance: metadata profile %s has lower redundancy than data profile %s", 4327 btrfs_bg_type_to_raid_name(meta_target), 4328 btrfs_bg_type_to_raid_name(data_target)); 4329 } 4330 4331 ret = insert_balance_item(fs_info, bctl); 4332 if (ret && ret != -EEXIST) 4333 goto out; 4334 4335 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4336 BUG_ON(ret == -EEXIST); 4337 BUG_ON(fs_info->balance_ctl); 4338 spin_lock(&fs_info->balance_lock); 4339 fs_info->balance_ctl = bctl; 4340 spin_unlock(&fs_info->balance_lock); 4341 } else { 4342 BUG_ON(ret != -EEXIST); 4343 spin_lock(&fs_info->balance_lock); 4344 update_balance_args(bctl); 4345 spin_unlock(&fs_info->balance_lock); 4346 } 4347 4348 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4349 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4350 describe_balance_start_or_resume(fs_info); 4351 mutex_unlock(&fs_info->balance_mutex); 4352 4353 ret = __btrfs_balance(fs_info); 4354 4355 mutex_lock(&fs_info->balance_mutex); 4356 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4357 btrfs_info(fs_info, "balance: paused"); 4358 /* 4359 * Balance can be canceled by: 4360 * 4361 * - Regular cancel request 4362 * Then ret == -ECANCELED and balance_cancel_req > 0 4363 * 4364 * - Fatal signal to "btrfs" process 4365 * Either the signal caught by wait_reserve_ticket() and callers 4366 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4367 * got -ECANCELED. 4368 * Either way, in this case balance_cancel_req = 0, and 4369 * ret == -EINTR or ret == -ECANCELED. 4370 * 4371 * So here we only check the return value to catch canceled balance. 4372 */ 4373 else if (ret == -ECANCELED || ret == -EINTR) 4374 btrfs_info(fs_info, "balance: canceled"); 4375 else 4376 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4377 4378 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4379 4380 if (bargs) { 4381 memset(bargs, 0, sizeof(*bargs)); 4382 btrfs_update_ioctl_balance_args(fs_info, bargs); 4383 } 4384 4385 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4386 balance_need_close(fs_info)) { 4387 reset_balance_state(fs_info); 4388 btrfs_exclop_finish(fs_info); 4389 } 4390 4391 wake_up(&fs_info->balance_wait_q); 4392 4393 return ret; 4394 out: 4395 if (bctl->flags & BTRFS_BALANCE_RESUME) 4396 reset_balance_state(fs_info); 4397 else 4398 kfree(bctl); 4399 btrfs_exclop_finish(fs_info); 4400 4401 return ret; 4402 } 4403 4404 static int balance_kthread(void *data) 4405 { 4406 struct btrfs_fs_info *fs_info = data; 4407 int ret = 0; 4408 4409 mutex_lock(&fs_info->balance_mutex); 4410 if (fs_info->balance_ctl) 4411 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4412 mutex_unlock(&fs_info->balance_mutex); 4413 4414 return ret; 4415 } 4416 4417 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4418 { 4419 struct task_struct *tsk; 4420 4421 mutex_lock(&fs_info->balance_mutex); 4422 if (!fs_info->balance_ctl) { 4423 mutex_unlock(&fs_info->balance_mutex); 4424 return 0; 4425 } 4426 mutex_unlock(&fs_info->balance_mutex); 4427 4428 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4429 btrfs_info(fs_info, "balance: resume skipped"); 4430 return 0; 4431 } 4432 4433 /* 4434 * A ro->rw remount sequence should continue with the paused balance 4435 * regardless of who pauses it, system or the user as of now, so set 4436 * the resume flag. 4437 */ 4438 spin_lock(&fs_info->balance_lock); 4439 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4440 spin_unlock(&fs_info->balance_lock); 4441 4442 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4443 return PTR_ERR_OR_ZERO(tsk); 4444 } 4445 4446 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4447 { 4448 struct btrfs_balance_control *bctl; 4449 struct btrfs_balance_item *item; 4450 struct btrfs_disk_balance_args disk_bargs; 4451 struct btrfs_path *path; 4452 struct extent_buffer *leaf; 4453 struct btrfs_key key; 4454 int ret; 4455 4456 path = btrfs_alloc_path(); 4457 if (!path) 4458 return -ENOMEM; 4459 4460 key.objectid = BTRFS_BALANCE_OBJECTID; 4461 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4462 key.offset = 0; 4463 4464 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4465 if (ret < 0) 4466 goto out; 4467 if (ret > 0) { /* ret = -ENOENT; */ 4468 ret = 0; 4469 goto out; 4470 } 4471 4472 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4473 if (!bctl) { 4474 ret = -ENOMEM; 4475 goto out; 4476 } 4477 4478 leaf = path->nodes[0]; 4479 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4480 4481 bctl->flags = btrfs_balance_flags(leaf, item); 4482 bctl->flags |= BTRFS_BALANCE_RESUME; 4483 4484 btrfs_balance_data(leaf, item, &disk_bargs); 4485 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4486 btrfs_balance_meta(leaf, item, &disk_bargs); 4487 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4488 btrfs_balance_sys(leaf, item, &disk_bargs); 4489 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4490 4491 /* 4492 * This should never happen, as the paused balance state is recovered 4493 * during mount without any chance of other exclusive ops to collide. 4494 * 4495 * This gives the exclusive op status to balance and keeps in paused 4496 * state until user intervention (cancel or umount). If the ownership 4497 * cannot be assigned, show a message but do not fail. The balance 4498 * is in a paused state and must have fs_info::balance_ctl properly 4499 * set up. 4500 */ 4501 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 4502 btrfs_warn(fs_info, 4503 "balance: cannot set exclusive op status, resume manually"); 4504 4505 btrfs_release_path(path); 4506 4507 mutex_lock(&fs_info->balance_mutex); 4508 BUG_ON(fs_info->balance_ctl); 4509 spin_lock(&fs_info->balance_lock); 4510 fs_info->balance_ctl = bctl; 4511 spin_unlock(&fs_info->balance_lock); 4512 mutex_unlock(&fs_info->balance_mutex); 4513 out: 4514 btrfs_free_path(path); 4515 return ret; 4516 } 4517 4518 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4519 { 4520 int ret = 0; 4521 4522 mutex_lock(&fs_info->balance_mutex); 4523 if (!fs_info->balance_ctl) { 4524 mutex_unlock(&fs_info->balance_mutex); 4525 return -ENOTCONN; 4526 } 4527 4528 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4529 atomic_inc(&fs_info->balance_pause_req); 4530 mutex_unlock(&fs_info->balance_mutex); 4531 4532 wait_event(fs_info->balance_wait_q, 4533 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4534 4535 mutex_lock(&fs_info->balance_mutex); 4536 /* we are good with balance_ctl ripped off from under us */ 4537 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4538 atomic_dec(&fs_info->balance_pause_req); 4539 } else { 4540 ret = -ENOTCONN; 4541 } 4542 4543 mutex_unlock(&fs_info->balance_mutex); 4544 return ret; 4545 } 4546 4547 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4548 { 4549 mutex_lock(&fs_info->balance_mutex); 4550 if (!fs_info->balance_ctl) { 4551 mutex_unlock(&fs_info->balance_mutex); 4552 return -ENOTCONN; 4553 } 4554 4555 /* 4556 * A paused balance with the item stored on disk can be resumed at 4557 * mount time if the mount is read-write. Otherwise it's still paused 4558 * and we must not allow cancelling as it deletes the item. 4559 */ 4560 if (sb_rdonly(fs_info->sb)) { 4561 mutex_unlock(&fs_info->balance_mutex); 4562 return -EROFS; 4563 } 4564 4565 atomic_inc(&fs_info->balance_cancel_req); 4566 /* 4567 * if we are running just wait and return, balance item is 4568 * deleted in btrfs_balance in this case 4569 */ 4570 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4571 mutex_unlock(&fs_info->balance_mutex); 4572 wait_event(fs_info->balance_wait_q, 4573 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4574 mutex_lock(&fs_info->balance_mutex); 4575 } else { 4576 mutex_unlock(&fs_info->balance_mutex); 4577 /* 4578 * Lock released to allow other waiters to continue, we'll 4579 * reexamine the status again. 4580 */ 4581 mutex_lock(&fs_info->balance_mutex); 4582 4583 if (fs_info->balance_ctl) { 4584 reset_balance_state(fs_info); 4585 btrfs_exclop_finish(fs_info); 4586 btrfs_info(fs_info, "balance: canceled"); 4587 } 4588 } 4589 4590 BUG_ON(fs_info->balance_ctl || 4591 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4592 atomic_dec(&fs_info->balance_cancel_req); 4593 mutex_unlock(&fs_info->balance_mutex); 4594 return 0; 4595 } 4596 4597 int btrfs_uuid_scan_kthread(void *data) 4598 { 4599 struct btrfs_fs_info *fs_info = data; 4600 struct btrfs_root *root = fs_info->tree_root; 4601 struct btrfs_key key; 4602 struct btrfs_path *path = NULL; 4603 int ret = 0; 4604 struct extent_buffer *eb; 4605 int slot; 4606 struct btrfs_root_item root_item; 4607 u32 item_size; 4608 struct btrfs_trans_handle *trans = NULL; 4609 bool closing = false; 4610 4611 path = btrfs_alloc_path(); 4612 if (!path) { 4613 ret = -ENOMEM; 4614 goto out; 4615 } 4616 4617 key.objectid = 0; 4618 key.type = BTRFS_ROOT_ITEM_KEY; 4619 key.offset = 0; 4620 4621 while (1) { 4622 if (btrfs_fs_closing(fs_info)) { 4623 closing = true; 4624 break; 4625 } 4626 ret = btrfs_search_forward(root, &key, path, 4627 BTRFS_OLDEST_GENERATION); 4628 if (ret) { 4629 if (ret > 0) 4630 ret = 0; 4631 break; 4632 } 4633 4634 if (key.type != BTRFS_ROOT_ITEM_KEY || 4635 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4636 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4637 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4638 goto skip; 4639 4640 eb = path->nodes[0]; 4641 slot = path->slots[0]; 4642 item_size = btrfs_item_size_nr(eb, slot); 4643 if (item_size < sizeof(root_item)) 4644 goto skip; 4645 4646 read_extent_buffer(eb, &root_item, 4647 btrfs_item_ptr_offset(eb, slot), 4648 (int)sizeof(root_item)); 4649 if (btrfs_root_refs(&root_item) == 0) 4650 goto skip; 4651 4652 if (!btrfs_is_empty_uuid(root_item.uuid) || 4653 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4654 if (trans) 4655 goto update_tree; 4656 4657 btrfs_release_path(path); 4658 /* 4659 * 1 - subvol uuid item 4660 * 1 - received_subvol uuid item 4661 */ 4662 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4663 if (IS_ERR(trans)) { 4664 ret = PTR_ERR(trans); 4665 break; 4666 } 4667 continue; 4668 } else { 4669 goto skip; 4670 } 4671 update_tree: 4672 btrfs_release_path(path); 4673 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4674 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4675 BTRFS_UUID_KEY_SUBVOL, 4676 key.objectid); 4677 if (ret < 0) { 4678 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4679 ret); 4680 break; 4681 } 4682 } 4683 4684 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4685 ret = btrfs_uuid_tree_add(trans, 4686 root_item.received_uuid, 4687 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4688 key.objectid); 4689 if (ret < 0) { 4690 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4691 ret); 4692 break; 4693 } 4694 } 4695 4696 skip: 4697 btrfs_release_path(path); 4698 if (trans) { 4699 ret = btrfs_end_transaction(trans); 4700 trans = NULL; 4701 if (ret) 4702 break; 4703 } 4704 4705 if (key.offset < (u64)-1) { 4706 key.offset++; 4707 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4708 key.offset = 0; 4709 key.type = BTRFS_ROOT_ITEM_KEY; 4710 } else if (key.objectid < (u64)-1) { 4711 key.offset = 0; 4712 key.type = BTRFS_ROOT_ITEM_KEY; 4713 key.objectid++; 4714 } else { 4715 break; 4716 } 4717 cond_resched(); 4718 } 4719 4720 out: 4721 btrfs_free_path(path); 4722 if (trans && !IS_ERR(trans)) 4723 btrfs_end_transaction(trans); 4724 if (ret) 4725 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4726 else if (!closing) 4727 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4728 up(&fs_info->uuid_tree_rescan_sem); 4729 return 0; 4730 } 4731 4732 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4733 { 4734 struct btrfs_trans_handle *trans; 4735 struct btrfs_root *tree_root = fs_info->tree_root; 4736 struct btrfs_root *uuid_root; 4737 struct task_struct *task; 4738 int ret; 4739 4740 /* 4741 * 1 - root node 4742 * 1 - root item 4743 */ 4744 trans = btrfs_start_transaction(tree_root, 2); 4745 if (IS_ERR(trans)) 4746 return PTR_ERR(trans); 4747 4748 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4749 if (IS_ERR(uuid_root)) { 4750 ret = PTR_ERR(uuid_root); 4751 btrfs_abort_transaction(trans, ret); 4752 btrfs_end_transaction(trans); 4753 return ret; 4754 } 4755 4756 fs_info->uuid_root = uuid_root; 4757 4758 ret = btrfs_commit_transaction(trans); 4759 if (ret) 4760 return ret; 4761 4762 down(&fs_info->uuid_tree_rescan_sem); 4763 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4764 if (IS_ERR(task)) { 4765 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4766 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4767 up(&fs_info->uuid_tree_rescan_sem); 4768 return PTR_ERR(task); 4769 } 4770 4771 return 0; 4772 } 4773 4774 /* 4775 * shrinking a device means finding all of the device extents past 4776 * the new size, and then following the back refs to the chunks. 4777 * The chunk relocation code actually frees the device extent 4778 */ 4779 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4780 { 4781 struct btrfs_fs_info *fs_info = device->fs_info; 4782 struct btrfs_root *root = fs_info->dev_root; 4783 struct btrfs_trans_handle *trans; 4784 struct btrfs_dev_extent *dev_extent = NULL; 4785 struct btrfs_path *path; 4786 u64 length; 4787 u64 chunk_offset; 4788 int ret; 4789 int slot; 4790 int failed = 0; 4791 bool retried = false; 4792 struct extent_buffer *l; 4793 struct btrfs_key key; 4794 struct btrfs_super_block *super_copy = fs_info->super_copy; 4795 u64 old_total = btrfs_super_total_bytes(super_copy); 4796 u64 old_size = btrfs_device_get_total_bytes(device); 4797 u64 diff; 4798 u64 start; 4799 4800 new_size = round_down(new_size, fs_info->sectorsize); 4801 start = new_size; 4802 diff = round_down(old_size - new_size, fs_info->sectorsize); 4803 4804 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4805 return -EINVAL; 4806 4807 path = btrfs_alloc_path(); 4808 if (!path) 4809 return -ENOMEM; 4810 4811 path->reada = READA_BACK; 4812 4813 trans = btrfs_start_transaction(root, 0); 4814 if (IS_ERR(trans)) { 4815 btrfs_free_path(path); 4816 return PTR_ERR(trans); 4817 } 4818 4819 mutex_lock(&fs_info->chunk_mutex); 4820 4821 btrfs_device_set_total_bytes(device, new_size); 4822 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4823 device->fs_devices->total_rw_bytes -= diff; 4824 atomic64_sub(diff, &fs_info->free_chunk_space); 4825 } 4826 4827 /* 4828 * Once the device's size has been set to the new size, ensure all 4829 * in-memory chunks are synced to disk so that the loop below sees them 4830 * and relocates them accordingly. 4831 */ 4832 if (contains_pending_extent(device, &start, diff)) { 4833 mutex_unlock(&fs_info->chunk_mutex); 4834 ret = btrfs_commit_transaction(trans); 4835 if (ret) 4836 goto done; 4837 } else { 4838 mutex_unlock(&fs_info->chunk_mutex); 4839 btrfs_end_transaction(trans); 4840 } 4841 4842 again: 4843 key.objectid = device->devid; 4844 key.offset = (u64)-1; 4845 key.type = BTRFS_DEV_EXTENT_KEY; 4846 4847 do { 4848 mutex_lock(&fs_info->reclaim_bgs_lock); 4849 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4850 if (ret < 0) { 4851 mutex_unlock(&fs_info->reclaim_bgs_lock); 4852 goto done; 4853 } 4854 4855 ret = btrfs_previous_item(root, path, 0, key.type); 4856 if (ret) { 4857 mutex_unlock(&fs_info->reclaim_bgs_lock); 4858 if (ret < 0) 4859 goto done; 4860 ret = 0; 4861 btrfs_release_path(path); 4862 break; 4863 } 4864 4865 l = path->nodes[0]; 4866 slot = path->slots[0]; 4867 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4868 4869 if (key.objectid != device->devid) { 4870 mutex_unlock(&fs_info->reclaim_bgs_lock); 4871 btrfs_release_path(path); 4872 break; 4873 } 4874 4875 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4876 length = btrfs_dev_extent_length(l, dev_extent); 4877 4878 if (key.offset + length <= new_size) { 4879 mutex_unlock(&fs_info->reclaim_bgs_lock); 4880 btrfs_release_path(path); 4881 break; 4882 } 4883 4884 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4885 btrfs_release_path(path); 4886 4887 /* 4888 * We may be relocating the only data chunk we have, 4889 * which could potentially end up with losing data's 4890 * raid profile, so lets allocate an empty one in 4891 * advance. 4892 */ 4893 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4894 if (ret < 0) { 4895 mutex_unlock(&fs_info->reclaim_bgs_lock); 4896 goto done; 4897 } 4898 4899 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4900 mutex_unlock(&fs_info->reclaim_bgs_lock); 4901 if (ret == -ENOSPC) { 4902 failed++; 4903 } else if (ret) { 4904 if (ret == -ETXTBSY) { 4905 btrfs_warn(fs_info, 4906 "could not shrink block group %llu due to active swapfile", 4907 chunk_offset); 4908 } 4909 goto done; 4910 } 4911 } while (key.offset-- > 0); 4912 4913 if (failed && !retried) { 4914 failed = 0; 4915 retried = true; 4916 goto again; 4917 } else if (failed && retried) { 4918 ret = -ENOSPC; 4919 goto done; 4920 } 4921 4922 /* Shrinking succeeded, else we would be at "done". */ 4923 trans = btrfs_start_transaction(root, 0); 4924 if (IS_ERR(trans)) { 4925 ret = PTR_ERR(trans); 4926 goto done; 4927 } 4928 4929 mutex_lock(&fs_info->chunk_mutex); 4930 /* Clear all state bits beyond the shrunk device size */ 4931 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4932 CHUNK_STATE_MASK); 4933 4934 btrfs_device_set_disk_total_bytes(device, new_size); 4935 if (list_empty(&device->post_commit_list)) 4936 list_add_tail(&device->post_commit_list, 4937 &trans->transaction->dev_update_list); 4938 4939 WARN_ON(diff > old_total); 4940 btrfs_set_super_total_bytes(super_copy, 4941 round_down(old_total - diff, fs_info->sectorsize)); 4942 mutex_unlock(&fs_info->chunk_mutex); 4943 4944 btrfs_reserve_chunk_metadata(trans, false); 4945 /* Now btrfs_update_device() will change the on-disk size. */ 4946 ret = btrfs_update_device(trans, device); 4947 btrfs_trans_release_chunk_metadata(trans); 4948 if (ret < 0) { 4949 btrfs_abort_transaction(trans, ret); 4950 btrfs_end_transaction(trans); 4951 } else { 4952 ret = btrfs_commit_transaction(trans); 4953 } 4954 done: 4955 btrfs_free_path(path); 4956 if (ret) { 4957 mutex_lock(&fs_info->chunk_mutex); 4958 btrfs_device_set_total_bytes(device, old_size); 4959 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4960 device->fs_devices->total_rw_bytes += diff; 4961 atomic64_add(diff, &fs_info->free_chunk_space); 4962 mutex_unlock(&fs_info->chunk_mutex); 4963 } 4964 return ret; 4965 } 4966 4967 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4968 struct btrfs_key *key, 4969 struct btrfs_chunk *chunk, int item_size) 4970 { 4971 struct btrfs_super_block *super_copy = fs_info->super_copy; 4972 struct btrfs_disk_key disk_key; 4973 u32 array_size; 4974 u8 *ptr; 4975 4976 lockdep_assert_held(&fs_info->chunk_mutex); 4977 4978 array_size = btrfs_super_sys_array_size(super_copy); 4979 if (array_size + item_size + sizeof(disk_key) 4980 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 4981 return -EFBIG; 4982 4983 ptr = super_copy->sys_chunk_array + array_size; 4984 btrfs_cpu_key_to_disk(&disk_key, key); 4985 memcpy(ptr, &disk_key, sizeof(disk_key)); 4986 ptr += sizeof(disk_key); 4987 memcpy(ptr, chunk, item_size); 4988 item_size += sizeof(disk_key); 4989 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4990 4991 return 0; 4992 } 4993 4994 /* 4995 * sort the devices in descending order by max_avail, total_avail 4996 */ 4997 static int btrfs_cmp_device_info(const void *a, const void *b) 4998 { 4999 const struct btrfs_device_info *di_a = a; 5000 const struct btrfs_device_info *di_b = b; 5001 5002 if (di_a->max_avail > di_b->max_avail) 5003 return -1; 5004 if (di_a->max_avail < di_b->max_avail) 5005 return 1; 5006 if (di_a->total_avail > di_b->total_avail) 5007 return -1; 5008 if (di_a->total_avail < di_b->total_avail) 5009 return 1; 5010 return 0; 5011 } 5012 5013 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5014 { 5015 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5016 return; 5017 5018 btrfs_set_fs_incompat(info, RAID56); 5019 } 5020 5021 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5022 { 5023 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5024 return; 5025 5026 btrfs_set_fs_incompat(info, RAID1C34); 5027 } 5028 5029 /* 5030 * Structure used internally for btrfs_create_chunk() function. 5031 * Wraps needed parameters. 5032 */ 5033 struct alloc_chunk_ctl { 5034 u64 start; 5035 u64 type; 5036 /* Total number of stripes to allocate */ 5037 int num_stripes; 5038 /* sub_stripes info for map */ 5039 int sub_stripes; 5040 /* Stripes per device */ 5041 int dev_stripes; 5042 /* Maximum number of devices to use */ 5043 int devs_max; 5044 /* Minimum number of devices to use */ 5045 int devs_min; 5046 /* ndevs has to be a multiple of this */ 5047 int devs_increment; 5048 /* Number of copies */ 5049 int ncopies; 5050 /* Number of stripes worth of bytes to store parity information */ 5051 int nparity; 5052 u64 max_stripe_size; 5053 u64 max_chunk_size; 5054 u64 dev_extent_min; 5055 u64 stripe_size; 5056 u64 chunk_size; 5057 int ndevs; 5058 }; 5059 5060 static void init_alloc_chunk_ctl_policy_regular( 5061 struct btrfs_fs_devices *fs_devices, 5062 struct alloc_chunk_ctl *ctl) 5063 { 5064 u64 type = ctl->type; 5065 5066 if (type & BTRFS_BLOCK_GROUP_DATA) { 5067 ctl->max_stripe_size = SZ_1G; 5068 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 5069 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5070 /* For larger filesystems, use larger metadata chunks */ 5071 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 5072 ctl->max_stripe_size = SZ_1G; 5073 else 5074 ctl->max_stripe_size = SZ_256M; 5075 ctl->max_chunk_size = ctl->max_stripe_size; 5076 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5077 ctl->max_stripe_size = SZ_32M; 5078 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5079 ctl->devs_max = min_t(int, ctl->devs_max, 5080 BTRFS_MAX_DEVS_SYS_CHUNK); 5081 } else { 5082 BUG(); 5083 } 5084 5085 /* We don't want a chunk larger than 10% of writable space */ 5086 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5087 ctl->max_chunk_size); 5088 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 5089 } 5090 5091 static void init_alloc_chunk_ctl_policy_zoned( 5092 struct btrfs_fs_devices *fs_devices, 5093 struct alloc_chunk_ctl *ctl) 5094 { 5095 u64 zone_size = fs_devices->fs_info->zone_size; 5096 u64 limit; 5097 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5098 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5099 u64 min_chunk_size = min_data_stripes * zone_size; 5100 u64 type = ctl->type; 5101 5102 ctl->max_stripe_size = zone_size; 5103 if (type & BTRFS_BLOCK_GROUP_DATA) { 5104 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5105 zone_size); 5106 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5107 ctl->max_chunk_size = ctl->max_stripe_size; 5108 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5109 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5110 ctl->devs_max = min_t(int, ctl->devs_max, 5111 BTRFS_MAX_DEVS_SYS_CHUNK); 5112 } else { 5113 BUG(); 5114 } 5115 5116 /* We don't want a chunk larger than 10% of writable space */ 5117 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1), 5118 zone_size), 5119 min_chunk_size); 5120 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5121 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5122 } 5123 5124 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5125 struct alloc_chunk_ctl *ctl) 5126 { 5127 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5128 5129 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5130 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5131 ctl->devs_max = btrfs_raid_array[index].devs_max; 5132 if (!ctl->devs_max) 5133 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5134 ctl->devs_min = btrfs_raid_array[index].devs_min; 5135 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5136 ctl->ncopies = btrfs_raid_array[index].ncopies; 5137 ctl->nparity = btrfs_raid_array[index].nparity; 5138 ctl->ndevs = 0; 5139 5140 switch (fs_devices->chunk_alloc_policy) { 5141 case BTRFS_CHUNK_ALLOC_REGULAR: 5142 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5143 break; 5144 case BTRFS_CHUNK_ALLOC_ZONED: 5145 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5146 break; 5147 default: 5148 BUG(); 5149 } 5150 } 5151 5152 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5153 struct alloc_chunk_ctl *ctl, 5154 struct btrfs_device_info *devices_info) 5155 { 5156 struct btrfs_fs_info *info = fs_devices->fs_info; 5157 struct btrfs_device *device; 5158 u64 total_avail; 5159 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5160 int ret; 5161 int ndevs = 0; 5162 u64 max_avail; 5163 u64 dev_offset; 5164 5165 /* 5166 * in the first pass through the devices list, we gather information 5167 * about the available holes on each device. 5168 */ 5169 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5170 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5171 WARN(1, KERN_ERR 5172 "BTRFS: read-only device in alloc_list\n"); 5173 continue; 5174 } 5175 5176 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5177 &device->dev_state) || 5178 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5179 continue; 5180 5181 if (device->total_bytes > device->bytes_used) 5182 total_avail = device->total_bytes - device->bytes_used; 5183 else 5184 total_avail = 0; 5185 5186 /* If there is no space on this device, skip it. */ 5187 if (total_avail < ctl->dev_extent_min) 5188 continue; 5189 5190 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5191 &max_avail); 5192 if (ret && ret != -ENOSPC) 5193 return ret; 5194 5195 if (ret == 0) 5196 max_avail = dev_extent_want; 5197 5198 if (max_avail < ctl->dev_extent_min) { 5199 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5200 btrfs_debug(info, 5201 "%s: devid %llu has no free space, have=%llu want=%llu", 5202 __func__, device->devid, max_avail, 5203 ctl->dev_extent_min); 5204 continue; 5205 } 5206 5207 if (ndevs == fs_devices->rw_devices) { 5208 WARN(1, "%s: found more than %llu devices\n", 5209 __func__, fs_devices->rw_devices); 5210 break; 5211 } 5212 devices_info[ndevs].dev_offset = dev_offset; 5213 devices_info[ndevs].max_avail = max_avail; 5214 devices_info[ndevs].total_avail = total_avail; 5215 devices_info[ndevs].dev = device; 5216 ++ndevs; 5217 } 5218 ctl->ndevs = ndevs; 5219 5220 /* 5221 * now sort the devices by hole size / available space 5222 */ 5223 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5224 btrfs_cmp_device_info, NULL); 5225 5226 return 0; 5227 } 5228 5229 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5230 struct btrfs_device_info *devices_info) 5231 { 5232 /* Number of stripes that count for block group size */ 5233 int data_stripes; 5234 5235 /* 5236 * The primary goal is to maximize the number of stripes, so use as 5237 * many devices as possible, even if the stripes are not maximum sized. 5238 * 5239 * The DUP profile stores more than one stripe per device, the 5240 * max_avail is the total size so we have to adjust. 5241 */ 5242 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5243 ctl->dev_stripes); 5244 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5245 5246 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5247 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5248 5249 /* 5250 * Use the number of data stripes to figure out how big this chunk is 5251 * really going to be in terms of logical address space, and compare 5252 * that answer with the max chunk size. If it's higher, we try to 5253 * reduce stripe_size. 5254 */ 5255 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5256 /* 5257 * Reduce stripe_size, round it up to a 16MB boundary again and 5258 * then use it, unless it ends up being even bigger than the 5259 * previous value we had already. 5260 */ 5261 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5262 data_stripes), SZ_16M), 5263 ctl->stripe_size); 5264 } 5265 5266 /* Align to BTRFS_STRIPE_LEN */ 5267 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5268 ctl->chunk_size = ctl->stripe_size * data_stripes; 5269 5270 return 0; 5271 } 5272 5273 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5274 struct btrfs_device_info *devices_info) 5275 { 5276 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5277 /* Number of stripes that count for block group size */ 5278 int data_stripes; 5279 5280 /* 5281 * It should hold because: 5282 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5283 */ 5284 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5285 5286 ctl->stripe_size = zone_size; 5287 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5288 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5289 5290 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5291 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5292 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5293 ctl->stripe_size) + ctl->nparity, 5294 ctl->dev_stripes); 5295 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5296 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5297 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5298 } 5299 5300 ctl->chunk_size = ctl->stripe_size * data_stripes; 5301 5302 return 0; 5303 } 5304 5305 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5306 struct alloc_chunk_ctl *ctl, 5307 struct btrfs_device_info *devices_info) 5308 { 5309 struct btrfs_fs_info *info = fs_devices->fs_info; 5310 5311 /* 5312 * Round down to number of usable stripes, devs_increment can be any 5313 * number so we can't use round_down() that requires power of 2, while 5314 * rounddown is safe. 5315 */ 5316 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5317 5318 if (ctl->ndevs < ctl->devs_min) { 5319 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5320 btrfs_debug(info, 5321 "%s: not enough devices with free space: have=%d minimum required=%d", 5322 __func__, ctl->ndevs, ctl->devs_min); 5323 } 5324 return -ENOSPC; 5325 } 5326 5327 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5328 5329 switch (fs_devices->chunk_alloc_policy) { 5330 case BTRFS_CHUNK_ALLOC_REGULAR: 5331 return decide_stripe_size_regular(ctl, devices_info); 5332 case BTRFS_CHUNK_ALLOC_ZONED: 5333 return decide_stripe_size_zoned(ctl, devices_info); 5334 default: 5335 BUG(); 5336 } 5337 } 5338 5339 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5340 struct alloc_chunk_ctl *ctl, 5341 struct btrfs_device_info *devices_info) 5342 { 5343 struct btrfs_fs_info *info = trans->fs_info; 5344 struct map_lookup *map = NULL; 5345 struct extent_map_tree *em_tree; 5346 struct btrfs_block_group *block_group; 5347 struct extent_map *em; 5348 u64 start = ctl->start; 5349 u64 type = ctl->type; 5350 int ret; 5351 int i; 5352 int j; 5353 5354 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5355 if (!map) 5356 return ERR_PTR(-ENOMEM); 5357 map->num_stripes = ctl->num_stripes; 5358 5359 for (i = 0; i < ctl->ndevs; ++i) { 5360 for (j = 0; j < ctl->dev_stripes; ++j) { 5361 int s = i * ctl->dev_stripes + j; 5362 map->stripes[s].dev = devices_info[i].dev; 5363 map->stripes[s].physical = devices_info[i].dev_offset + 5364 j * ctl->stripe_size; 5365 } 5366 } 5367 map->stripe_len = BTRFS_STRIPE_LEN; 5368 map->io_align = BTRFS_STRIPE_LEN; 5369 map->io_width = BTRFS_STRIPE_LEN; 5370 map->type = type; 5371 map->sub_stripes = ctl->sub_stripes; 5372 5373 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5374 5375 em = alloc_extent_map(); 5376 if (!em) { 5377 kfree(map); 5378 return ERR_PTR(-ENOMEM); 5379 } 5380 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5381 em->map_lookup = map; 5382 em->start = start; 5383 em->len = ctl->chunk_size; 5384 em->block_start = 0; 5385 em->block_len = em->len; 5386 em->orig_block_len = ctl->stripe_size; 5387 5388 em_tree = &info->mapping_tree; 5389 write_lock(&em_tree->lock); 5390 ret = add_extent_mapping(em_tree, em, 0); 5391 if (ret) { 5392 write_unlock(&em_tree->lock); 5393 free_extent_map(em); 5394 return ERR_PTR(ret); 5395 } 5396 write_unlock(&em_tree->lock); 5397 5398 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5399 if (IS_ERR(block_group)) 5400 goto error_del_extent; 5401 5402 for (i = 0; i < map->num_stripes; i++) { 5403 struct btrfs_device *dev = map->stripes[i].dev; 5404 5405 btrfs_device_set_bytes_used(dev, 5406 dev->bytes_used + ctl->stripe_size); 5407 if (list_empty(&dev->post_commit_list)) 5408 list_add_tail(&dev->post_commit_list, 5409 &trans->transaction->dev_update_list); 5410 } 5411 5412 atomic64_sub(ctl->stripe_size * map->num_stripes, 5413 &info->free_chunk_space); 5414 5415 free_extent_map(em); 5416 check_raid56_incompat_flag(info, type); 5417 check_raid1c34_incompat_flag(info, type); 5418 5419 return block_group; 5420 5421 error_del_extent: 5422 write_lock(&em_tree->lock); 5423 remove_extent_mapping(em_tree, em); 5424 write_unlock(&em_tree->lock); 5425 5426 /* One for our allocation */ 5427 free_extent_map(em); 5428 /* One for the tree reference */ 5429 free_extent_map(em); 5430 5431 return block_group; 5432 } 5433 5434 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5435 u64 type) 5436 { 5437 struct btrfs_fs_info *info = trans->fs_info; 5438 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5439 struct btrfs_device_info *devices_info = NULL; 5440 struct alloc_chunk_ctl ctl; 5441 struct btrfs_block_group *block_group; 5442 int ret; 5443 5444 lockdep_assert_held(&info->chunk_mutex); 5445 5446 if (!alloc_profile_is_valid(type, 0)) { 5447 ASSERT(0); 5448 return ERR_PTR(-EINVAL); 5449 } 5450 5451 if (list_empty(&fs_devices->alloc_list)) { 5452 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5453 btrfs_debug(info, "%s: no writable device", __func__); 5454 return ERR_PTR(-ENOSPC); 5455 } 5456 5457 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5458 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5459 ASSERT(0); 5460 return ERR_PTR(-EINVAL); 5461 } 5462 5463 ctl.start = find_next_chunk(info); 5464 ctl.type = type; 5465 init_alloc_chunk_ctl(fs_devices, &ctl); 5466 5467 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5468 GFP_NOFS); 5469 if (!devices_info) 5470 return ERR_PTR(-ENOMEM); 5471 5472 ret = gather_device_info(fs_devices, &ctl, devices_info); 5473 if (ret < 0) { 5474 block_group = ERR_PTR(ret); 5475 goto out; 5476 } 5477 5478 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5479 if (ret < 0) { 5480 block_group = ERR_PTR(ret); 5481 goto out; 5482 } 5483 5484 block_group = create_chunk(trans, &ctl, devices_info); 5485 5486 out: 5487 kfree(devices_info); 5488 return block_group; 5489 } 5490 5491 /* 5492 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5493 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5494 * chunks. 5495 * 5496 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5497 * phases. 5498 */ 5499 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5500 struct btrfs_block_group *bg) 5501 { 5502 struct btrfs_fs_info *fs_info = trans->fs_info; 5503 struct btrfs_root *extent_root = fs_info->extent_root; 5504 struct btrfs_root *chunk_root = fs_info->chunk_root; 5505 struct btrfs_key key; 5506 struct btrfs_chunk *chunk; 5507 struct btrfs_stripe *stripe; 5508 struct extent_map *em; 5509 struct map_lookup *map; 5510 size_t item_size; 5511 int i; 5512 int ret; 5513 5514 /* 5515 * We take the chunk_mutex for 2 reasons: 5516 * 5517 * 1) Updates and insertions in the chunk btree must be done while holding 5518 * the chunk_mutex, as well as updating the system chunk array in the 5519 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5520 * details; 5521 * 5522 * 2) To prevent races with the final phase of a device replace operation 5523 * that replaces the device object associated with the map's stripes, 5524 * because the device object's id can change at any time during that 5525 * final phase of the device replace operation 5526 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5527 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5528 * which would cause a failure when updating the device item, which does 5529 * not exists, or persisting a stripe of the chunk item with such ID. 5530 * Here we can't use the device_list_mutex because our caller already 5531 * has locked the chunk_mutex, and the final phase of device replace 5532 * acquires both mutexes - first the device_list_mutex and then the 5533 * chunk_mutex. Using any of those two mutexes protects us from a 5534 * concurrent device replace. 5535 */ 5536 lockdep_assert_held(&fs_info->chunk_mutex); 5537 5538 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5539 if (IS_ERR(em)) { 5540 ret = PTR_ERR(em); 5541 btrfs_abort_transaction(trans, ret); 5542 return ret; 5543 } 5544 5545 map = em->map_lookup; 5546 item_size = btrfs_chunk_item_size(map->num_stripes); 5547 5548 chunk = kzalloc(item_size, GFP_NOFS); 5549 if (!chunk) { 5550 ret = -ENOMEM; 5551 btrfs_abort_transaction(trans, ret); 5552 goto out; 5553 } 5554 5555 for (i = 0; i < map->num_stripes; i++) { 5556 struct btrfs_device *device = map->stripes[i].dev; 5557 5558 ret = btrfs_update_device(trans, device); 5559 if (ret) 5560 goto out; 5561 } 5562 5563 stripe = &chunk->stripe; 5564 for (i = 0; i < map->num_stripes; i++) { 5565 struct btrfs_device *device = map->stripes[i].dev; 5566 const u64 dev_offset = map->stripes[i].physical; 5567 5568 btrfs_set_stack_stripe_devid(stripe, device->devid); 5569 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5570 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5571 stripe++; 5572 } 5573 5574 btrfs_set_stack_chunk_length(chunk, bg->length); 5575 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5576 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5577 btrfs_set_stack_chunk_type(chunk, map->type); 5578 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5579 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5580 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5581 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5582 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5583 5584 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5585 key.type = BTRFS_CHUNK_ITEM_KEY; 5586 key.offset = bg->start; 5587 5588 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5589 if (ret) 5590 goto out; 5591 5592 bg->chunk_item_inserted = 1; 5593 5594 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5595 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5596 if (ret) 5597 goto out; 5598 } 5599 5600 out: 5601 kfree(chunk); 5602 free_extent_map(em); 5603 return ret; 5604 } 5605 5606 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5607 { 5608 struct btrfs_fs_info *fs_info = trans->fs_info; 5609 u64 alloc_profile; 5610 struct btrfs_block_group *meta_bg; 5611 struct btrfs_block_group *sys_bg; 5612 5613 /* 5614 * When adding a new device for sprouting, the seed device is read-only 5615 * so we must first allocate a metadata and a system chunk. But before 5616 * adding the block group items to the extent, device and chunk btrees, 5617 * we must first: 5618 * 5619 * 1) Create both chunks without doing any changes to the btrees, as 5620 * otherwise we would get -ENOSPC since the block groups from the 5621 * seed device are read-only; 5622 * 5623 * 2) Add the device item for the new sprout device - finishing the setup 5624 * of a new block group requires updating the device item in the chunk 5625 * btree, so it must exist when we attempt to do it. The previous step 5626 * ensures this does not fail with -ENOSPC. 5627 * 5628 * After that we can add the block group items to their btrees: 5629 * update existing device item in the chunk btree, add a new block group 5630 * item to the extent btree, add a new chunk item to the chunk btree and 5631 * finally add the new device extent items to the devices btree. 5632 */ 5633 5634 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5635 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5636 if (IS_ERR(meta_bg)) 5637 return PTR_ERR(meta_bg); 5638 5639 alloc_profile = btrfs_system_alloc_profile(fs_info); 5640 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5641 if (IS_ERR(sys_bg)) 5642 return PTR_ERR(sys_bg); 5643 5644 return 0; 5645 } 5646 5647 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5648 { 5649 const int index = btrfs_bg_flags_to_raid_index(map->type); 5650 5651 return btrfs_raid_array[index].tolerated_failures; 5652 } 5653 5654 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5655 { 5656 struct extent_map *em; 5657 struct map_lookup *map; 5658 int miss_ndevs = 0; 5659 int i; 5660 bool ret = true; 5661 5662 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5663 if (IS_ERR(em)) 5664 return false; 5665 5666 map = em->map_lookup; 5667 for (i = 0; i < map->num_stripes; i++) { 5668 if (test_bit(BTRFS_DEV_STATE_MISSING, 5669 &map->stripes[i].dev->dev_state)) { 5670 miss_ndevs++; 5671 continue; 5672 } 5673 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5674 &map->stripes[i].dev->dev_state)) { 5675 ret = false; 5676 goto end; 5677 } 5678 } 5679 5680 /* 5681 * If the number of missing devices is larger than max errors, we can 5682 * not write the data into that chunk successfully. 5683 */ 5684 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5685 ret = false; 5686 end: 5687 free_extent_map(em); 5688 return ret; 5689 } 5690 5691 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5692 { 5693 struct extent_map *em; 5694 5695 while (1) { 5696 write_lock(&tree->lock); 5697 em = lookup_extent_mapping(tree, 0, (u64)-1); 5698 if (em) 5699 remove_extent_mapping(tree, em); 5700 write_unlock(&tree->lock); 5701 if (!em) 5702 break; 5703 /* once for us */ 5704 free_extent_map(em); 5705 /* once for the tree */ 5706 free_extent_map(em); 5707 } 5708 } 5709 5710 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5711 { 5712 struct extent_map *em; 5713 struct map_lookup *map; 5714 int ret; 5715 5716 em = btrfs_get_chunk_map(fs_info, logical, len); 5717 if (IS_ERR(em)) 5718 /* 5719 * We could return errors for these cases, but that could get 5720 * ugly and we'd probably do the same thing which is just not do 5721 * anything else and exit, so return 1 so the callers don't try 5722 * to use other copies. 5723 */ 5724 return 1; 5725 5726 map = em->map_lookup; 5727 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5728 ret = map->num_stripes; 5729 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5730 ret = map->sub_stripes; 5731 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5732 ret = 2; 5733 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5734 /* 5735 * There could be two corrupted data stripes, we need 5736 * to loop retry in order to rebuild the correct data. 5737 * 5738 * Fail a stripe at a time on every retry except the 5739 * stripe under reconstruction. 5740 */ 5741 ret = map->num_stripes; 5742 else 5743 ret = 1; 5744 free_extent_map(em); 5745 5746 down_read(&fs_info->dev_replace.rwsem); 5747 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5748 fs_info->dev_replace.tgtdev) 5749 ret++; 5750 up_read(&fs_info->dev_replace.rwsem); 5751 5752 return ret; 5753 } 5754 5755 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5756 u64 logical) 5757 { 5758 struct extent_map *em; 5759 struct map_lookup *map; 5760 unsigned long len = fs_info->sectorsize; 5761 5762 em = btrfs_get_chunk_map(fs_info, logical, len); 5763 5764 if (!WARN_ON(IS_ERR(em))) { 5765 map = em->map_lookup; 5766 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5767 len = map->stripe_len * nr_data_stripes(map); 5768 free_extent_map(em); 5769 } 5770 return len; 5771 } 5772 5773 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5774 { 5775 struct extent_map *em; 5776 struct map_lookup *map; 5777 int ret = 0; 5778 5779 em = btrfs_get_chunk_map(fs_info, logical, len); 5780 5781 if(!WARN_ON(IS_ERR(em))) { 5782 map = em->map_lookup; 5783 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5784 ret = 1; 5785 free_extent_map(em); 5786 } 5787 return ret; 5788 } 5789 5790 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5791 struct map_lookup *map, int first, 5792 int dev_replace_is_ongoing) 5793 { 5794 int i; 5795 int num_stripes; 5796 int preferred_mirror; 5797 int tolerance; 5798 struct btrfs_device *srcdev; 5799 5800 ASSERT((map->type & 5801 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5802 5803 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5804 num_stripes = map->sub_stripes; 5805 else 5806 num_stripes = map->num_stripes; 5807 5808 switch (fs_info->fs_devices->read_policy) { 5809 default: 5810 /* Shouldn't happen, just warn and use pid instead of failing */ 5811 btrfs_warn_rl(fs_info, 5812 "unknown read_policy type %u, reset to pid", 5813 fs_info->fs_devices->read_policy); 5814 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5815 fallthrough; 5816 case BTRFS_READ_POLICY_PID: 5817 preferred_mirror = first + (current->pid % num_stripes); 5818 break; 5819 } 5820 5821 if (dev_replace_is_ongoing && 5822 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5823 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5824 srcdev = fs_info->dev_replace.srcdev; 5825 else 5826 srcdev = NULL; 5827 5828 /* 5829 * try to avoid the drive that is the source drive for a 5830 * dev-replace procedure, only choose it if no other non-missing 5831 * mirror is available 5832 */ 5833 for (tolerance = 0; tolerance < 2; tolerance++) { 5834 if (map->stripes[preferred_mirror].dev->bdev && 5835 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5836 return preferred_mirror; 5837 for (i = first; i < first + num_stripes; i++) { 5838 if (map->stripes[i].dev->bdev && 5839 (tolerance || map->stripes[i].dev != srcdev)) 5840 return i; 5841 } 5842 } 5843 5844 /* we couldn't find one that doesn't fail. Just return something 5845 * and the io error handling code will clean up eventually 5846 */ 5847 return preferred_mirror; 5848 } 5849 5850 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5851 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes) 5852 { 5853 int i; 5854 int again = 1; 5855 5856 while (again) { 5857 again = 0; 5858 for (i = 0; i < num_stripes - 1; i++) { 5859 /* Swap if parity is on a smaller index */ 5860 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) { 5861 swap(bioc->stripes[i], bioc->stripes[i + 1]); 5862 swap(bioc->raid_map[i], bioc->raid_map[i + 1]); 5863 again = 1; 5864 } 5865 } 5866 } 5867 } 5868 5869 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5870 int total_stripes, 5871 int real_stripes) 5872 { 5873 struct btrfs_io_context *bioc = kzalloc( 5874 /* The size of btrfs_io_context */ 5875 sizeof(struct btrfs_io_context) + 5876 /* Plus the variable array for the stripes */ 5877 sizeof(struct btrfs_io_stripe) * (total_stripes) + 5878 /* Plus the variable array for the tgt dev */ 5879 sizeof(int) * (real_stripes) + 5880 /* 5881 * Plus the raid_map, which includes both the tgt dev 5882 * and the stripes. 5883 */ 5884 sizeof(u64) * (total_stripes), 5885 GFP_NOFS|__GFP_NOFAIL); 5886 5887 atomic_set(&bioc->error, 0); 5888 refcount_set(&bioc->refs, 1); 5889 5890 bioc->fs_info = fs_info; 5891 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes); 5892 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes); 5893 5894 return bioc; 5895 } 5896 5897 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5898 { 5899 WARN_ON(!refcount_read(&bioc->refs)); 5900 refcount_inc(&bioc->refs); 5901 } 5902 5903 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5904 { 5905 if (!bioc) 5906 return; 5907 if (refcount_dec_and_test(&bioc->refs)) 5908 kfree(bioc); 5909 } 5910 5911 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5912 /* 5913 * Please note that, discard won't be sent to target device of device 5914 * replace. 5915 */ 5916 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5917 u64 logical, u64 *length_ret, 5918 struct btrfs_io_context **bioc_ret) 5919 { 5920 struct extent_map *em; 5921 struct map_lookup *map; 5922 struct btrfs_io_context *bioc; 5923 u64 length = *length_ret; 5924 u64 offset; 5925 u64 stripe_nr; 5926 u64 stripe_nr_end; 5927 u64 stripe_end_offset; 5928 u64 stripe_cnt; 5929 u64 stripe_len; 5930 u64 stripe_offset; 5931 u64 num_stripes; 5932 u32 stripe_index; 5933 u32 factor = 0; 5934 u32 sub_stripes = 0; 5935 u64 stripes_per_dev = 0; 5936 u32 remaining_stripes = 0; 5937 u32 last_stripe = 0; 5938 int ret = 0; 5939 int i; 5940 5941 /* Discard always returns a bioc. */ 5942 ASSERT(bioc_ret); 5943 5944 em = btrfs_get_chunk_map(fs_info, logical, length); 5945 if (IS_ERR(em)) 5946 return PTR_ERR(em); 5947 5948 map = em->map_lookup; 5949 /* we don't discard raid56 yet */ 5950 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5951 ret = -EOPNOTSUPP; 5952 goto out; 5953 } 5954 5955 offset = logical - em->start; 5956 length = min_t(u64, em->start + em->len - logical, length); 5957 *length_ret = length; 5958 5959 stripe_len = map->stripe_len; 5960 /* 5961 * stripe_nr counts the total number of stripes we have to stride 5962 * to get to this block 5963 */ 5964 stripe_nr = div64_u64(offset, stripe_len); 5965 5966 /* stripe_offset is the offset of this block in its stripe */ 5967 stripe_offset = offset - stripe_nr * stripe_len; 5968 5969 stripe_nr_end = round_up(offset + length, map->stripe_len); 5970 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5971 stripe_cnt = stripe_nr_end - stripe_nr; 5972 stripe_end_offset = stripe_nr_end * map->stripe_len - 5973 (offset + length); 5974 /* 5975 * after this, stripe_nr is the number of stripes on this 5976 * device we have to walk to find the data, and stripe_index is 5977 * the number of our device in the stripe array 5978 */ 5979 num_stripes = 1; 5980 stripe_index = 0; 5981 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5982 BTRFS_BLOCK_GROUP_RAID10)) { 5983 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5984 sub_stripes = 1; 5985 else 5986 sub_stripes = map->sub_stripes; 5987 5988 factor = map->num_stripes / sub_stripes; 5989 num_stripes = min_t(u64, map->num_stripes, 5990 sub_stripes * stripe_cnt); 5991 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5992 stripe_index *= sub_stripes; 5993 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5994 &remaining_stripes); 5995 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5996 last_stripe *= sub_stripes; 5997 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 5998 BTRFS_BLOCK_GROUP_DUP)) { 5999 num_stripes = map->num_stripes; 6000 } else { 6001 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6002 &stripe_index); 6003 } 6004 6005 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0); 6006 if (!bioc) { 6007 ret = -ENOMEM; 6008 goto out; 6009 } 6010 6011 for (i = 0; i < num_stripes; i++) { 6012 bioc->stripes[i].physical = 6013 map->stripes[stripe_index].physical + 6014 stripe_offset + stripe_nr * map->stripe_len; 6015 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6016 6017 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6018 BTRFS_BLOCK_GROUP_RAID10)) { 6019 bioc->stripes[i].length = stripes_per_dev * 6020 map->stripe_len; 6021 6022 if (i / sub_stripes < remaining_stripes) 6023 bioc->stripes[i].length += map->stripe_len; 6024 6025 /* 6026 * Special for the first stripe and 6027 * the last stripe: 6028 * 6029 * |-------|...|-------| 6030 * |----------| 6031 * off end_off 6032 */ 6033 if (i < sub_stripes) 6034 bioc->stripes[i].length -= stripe_offset; 6035 6036 if (stripe_index >= last_stripe && 6037 stripe_index <= (last_stripe + 6038 sub_stripes - 1)) 6039 bioc->stripes[i].length -= stripe_end_offset; 6040 6041 if (i == sub_stripes - 1) 6042 stripe_offset = 0; 6043 } else { 6044 bioc->stripes[i].length = length; 6045 } 6046 6047 stripe_index++; 6048 if (stripe_index == map->num_stripes) { 6049 stripe_index = 0; 6050 stripe_nr++; 6051 } 6052 } 6053 6054 *bioc_ret = bioc; 6055 bioc->map_type = map->type; 6056 bioc->num_stripes = num_stripes; 6057 out: 6058 free_extent_map(em); 6059 return ret; 6060 } 6061 6062 /* 6063 * In dev-replace case, for repair case (that's the only case where the mirror 6064 * is selected explicitly when calling btrfs_map_block), blocks left of the 6065 * left cursor can also be read from the target drive. 6066 * 6067 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 6068 * array of stripes. 6069 * For READ, it also needs to be supported using the same mirror number. 6070 * 6071 * If the requested block is not left of the left cursor, EIO is returned. This 6072 * can happen because btrfs_num_copies() returns one more in the dev-replace 6073 * case. 6074 */ 6075 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 6076 u64 logical, u64 length, 6077 u64 srcdev_devid, int *mirror_num, 6078 u64 *physical) 6079 { 6080 struct btrfs_io_context *bioc = NULL; 6081 int num_stripes; 6082 int index_srcdev = 0; 6083 int found = 0; 6084 u64 physical_of_found = 0; 6085 int i; 6086 int ret = 0; 6087 6088 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 6089 logical, &length, &bioc, 0, 0); 6090 if (ret) { 6091 ASSERT(bioc == NULL); 6092 return ret; 6093 } 6094 6095 num_stripes = bioc->num_stripes; 6096 if (*mirror_num > num_stripes) { 6097 /* 6098 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 6099 * that means that the requested area is not left of the left 6100 * cursor 6101 */ 6102 btrfs_put_bioc(bioc); 6103 return -EIO; 6104 } 6105 6106 /* 6107 * process the rest of the function using the mirror_num of the source 6108 * drive. Therefore look it up first. At the end, patch the device 6109 * pointer to the one of the target drive. 6110 */ 6111 for (i = 0; i < num_stripes; i++) { 6112 if (bioc->stripes[i].dev->devid != srcdev_devid) 6113 continue; 6114 6115 /* 6116 * In case of DUP, in order to keep it simple, only add the 6117 * mirror with the lowest physical address 6118 */ 6119 if (found && 6120 physical_of_found <= bioc->stripes[i].physical) 6121 continue; 6122 6123 index_srcdev = i; 6124 found = 1; 6125 physical_of_found = bioc->stripes[i].physical; 6126 } 6127 6128 btrfs_put_bioc(bioc); 6129 6130 ASSERT(found); 6131 if (!found) 6132 return -EIO; 6133 6134 *mirror_num = index_srcdev + 1; 6135 *physical = physical_of_found; 6136 return ret; 6137 } 6138 6139 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6140 { 6141 struct btrfs_block_group *cache; 6142 bool ret; 6143 6144 /* Non zoned filesystem does not use "to_copy" flag */ 6145 if (!btrfs_is_zoned(fs_info)) 6146 return false; 6147 6148 cache = btrfs_lookup_block_group(fs_info, logical); 6149 6150 spin_lock(&cache->lock); 6151 ret = cache->to_copy; 6152 spin_unlock(&cache->lock); 6153 6154 btrfs_put_block_group(cache); 6155 return ret; 6156 } 6157 6158 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6159 struct btrfs_io_context **bioc_ret, 6160 struct btrfs_dev_replace *dev_replace, 6161 u64 logical, 6162 int *num_stripes_ret, int *max_errors_ret) 6163 { 6164 struct btrfs_io_context *bioc = *bioc_ret; 6165 u64 srcdev_devid = dev_replace->srcdev->devid; 6166 int tgtdev_indexes = 0; 6167 int num_stripes = *num_stripes_ret; 6168 int max_errors = *max_errors_ret; 6169 int i; 6170 6171 if (op == BTRFS_MAP_WRITE) { 6172 int index_where_to_add; 6173 6174 /* 6175 * A block group which have "to_copy" set will eventually 6176 * copied by dev-replace process. We can avoid cloning IO here. 6177 */ 6178 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6179 return; 6180 6181 /* 6182 * duplicate the write operations while the dev replace 6183 * procedure is running. Since the copying of the old disk to 6184 * the new disk takes place at run time while the filesystem is 6185 * mounted writable, the regular write operations to the old 6186 * disk have to be duplicated to go to the new disk as well. 6187 * 6188 * Note that device->missing is handled by the caller, and that 6189 * the write to the old disk is already set up in the stripes 6190 * array. 6191 */ 6192 index_where_to_add = num_stripes; 6193 for (i = 0; i < num_stripes; i++) { 6194 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6195 /* write to new disk, too */ 6196 struct btrfs_io_stripe *new = 6197 bioc->stripes + index_where_to_add; 6198 struct btrfs_io_stripe *old = 6199 bioc->stripes + i; 6200 6201 new->physical = old->physical; 6202 new->length = old->length; 6203 new->dev = dev_replace->tgtdev; 6204 bioc->tgtdev_map[i] = index_where_to_add; 6205 index_where_to_add++; 6206 max_errors++; 6207 tgtdev_indexes++; 6208 } 6209 } 6210 num_stripes = index_where_to_add; 6211 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 6212 int index_srcdev = 0; 6213 int found = 0; 6214 u64 physical_of_found = 0; 6215 6216 /* 6217 * During the dev-replace procedure, the target drive can also 6218 * be used to read data in case it is needed to repair a corrupt 6219 * block elsewhere. This is possible if the requested area is 6220 * left of the left cursor. In this area, the target drive is a 6221 * full copy of the source drive. 6222 */ 6223 for (i = 0; i < num_stripes; i++) { 6224 if (bioc->stripes[i].dev->devid == srcdev_devid) { 6225 /* 6226 * In case of DUP, in order to keep it simple, 6227 * only add the mirror with the lowest physical 6228 * address 6229 */ 6230 if (found && 6231 physical_of_found <= bioc->stripes[i].physical) 6232 continue; 6233 index_srcdev = i; 6234 found = 1; 6235 physical_of_found = bioc->stripes[i].physical; 6236 } 6237 } 6238 if (found) { 6239 struct btrfs_io_stripe *tgtdev_stripe = 6240 bioc->stripes + num_stripes; 6241 6242 tgtdev_stripe->physical = physical_of_found; 6243 tgtdev_stripe->length = 6244 bioc->stripes[index_srcdev].length; 6245 tgtdev_stripe->dev = dev_replace->tgtdev; 6246 bioc->tgtdev_map[index_srcdev] = num_stripes; 6247 6248 tgtdev_indexes++; 6249 num_stripes++; 6250 } 6251 } 6252 6253 *num_stripes_ret = num_stripes; 6254 *max_errors_ret = max_errors; 6255 bioc->num_tgtdevs = tgtdev_indexes; 6256 *bioc_ret = bioc; 6257 } 6258 6259 static bool need_full_stripe(enum btrfs_map_op op) 6260 { 6261 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6262 } 6263 6264 /* 6265 * Calculate the geometry of a particular (address, len) tuple. This 6266 * information is used to calculate how big a particular bio can get before it 6267 * straddles a stripe. 6268 * 6269 * @fs_info: the filesystem 6270 * @em: mapping containing the logical extent 6271 * @op: type of operation - write or read 6272 * @logical: address that we want to figure out the geometry of 6273 * @io_geom: pointer used to return values 6274 * 6275 * Returns < 0 in case a chunk for the given logical address cannot be found, 6276 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 6277 */ 6278 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em, 6279 enum btrfs_map_op op, u64 logical, 6280 struct btrfs_io_geometry *io_geom) 6281 { 6282 struct map_lookup *map; 6283 u64 len; 6284 u64 offset; 6285 u64 stripe_offset; 6286 u64 stripe_nr; 6287 u64 stripe_len; 6288 u64 raid56_full_stripe_start = (u64)-1; 6289 int data_stripes; 6290 6291 ASSERT(op != BTRFS_MAP_DISCARD); 6292 6293 map = em->map_lookup; 6294 /* Offset of this logical address in the chunk */ 6295 offset = logical - em->start; 6296 /* Len of a stripe in a chunk */ 6297 stripe_len = map->stripe_len; 6298 /* Stripe where this block falls in */ 6299 stripe_nr = div64_u64(offset, stripe_len); 6300 /* Offset of stripe in the chunk */ 6301 stripe_offset = stripe_nr * stripe_len; 6302 if (offset < stripe_offset) { 6303 btrfs_crit(fs_info, 6304 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 6305 stripe_offset, offset, em->start, logical, stripe_len); 6306 return -EINVAL; 6307 } 6308 6309 /* stripe_offset is the offset of this block in its stripe */ 6310 stripe_offset = offset - stripe_offset; 6311 data_stripes = nr_data_stripes(map); 6312 6313 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6314 u64 max_len = stripe_len - stripe_offset; 6315 6316 /* 6317 * In case of raid56, we need to know the stripe aligned start 6318 */ 6319 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6320 unsigned long full_stripe_len = stripe_len * data_stripes; 6321 raid56_full_stripe_start = offset; 6322 6323 /* 6324 * Allow a write of a full stripe, but make sure we 6325 * don't allow straddling of stripes 6326 */ 6327 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6328 full_stripe_len); 6329 raid56_full_stripe_start *= full_stripe_len; 6330 6331 /* 6332 * For writes to RAID[56], allow a full stripeset across 6333 * all disks. For other RAID types and for RAID[56] 6334 * reads, just allow a single stripe (on a single disk). 6335 */ 6336 if (op == BTRFS_MAP_WRITE) { 6337 max_len = stripe_len * data_stripes - 6338 (offset - raid56_full_stripe_start); 6339 } 6340 } 6341 len = min_t(u64, em->len - offset, max_len); 6342 } else { 6343 len = em->len - offset; 6344 } 6345 6346 io_geom->len = len; 6347 io_geom->offset = offset; 6348 io_geom->stripe_len = stripe_len; 6349 io_geom->stripe_nr = stripe_nr; 6350 io_geom->stripe_offset = stripe_offset; 6351 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6352 6353 return 0; 6354 } 6355 6356 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6357 enum btrfs_map_op op, 6358 u64 logical, u64 *length, 6359 struct btrfs_io_context **bioc_ret, 6360 int mirror_num, int need_raid_map) 6361 { 6362 struct extent_map *em; 6363 struct map_lookup *map; 6364 u64 stripe_offset; 6365 u64 stripe_nr; 6366 u64 stripe_len; 6367 u32 stripe_index; 6368 int data_stripes; 6369 int i; 6370 int ret = 0; 6371 int num_stripes; 6372 int max_errors = 0; 6373 int tgtdev_indexes = 0; 6374 struct btrfs_io_context *bioc = NULL; 6375 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6376 int dev_replace_is_ongoing = 0; 6377 int num_alloc_stripes; 6378 int patch_the_first_stripe_for_dev_replace = 0; 6379 u64 physical_to_patch_in_first_stripe = 0; 6380 u64 raid56_full_stripe_start = (u64)-1; 6381 struct btrfs_io_geometry geom; 6382 6383 ASSERT(bioc_ret); 6384 ASSERT(op != BTRFS_MAP_DISCARD); 6385 6386 em = btrfs_get_chunk_map(fs_info, logical, *length); 6387 ASSERT(!IS_ERR(em)); 6388 6389 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom); 6390 if (ret < 0) 6391 return ret; 6392 6393 map = em->map_lookup; 6394 6395 *length = geom.len; 6396 stripe_len = geom.stripe_len; 6397 stripe_nr = geom.stripe_nr; 6398 stripe_offset = geom.stripe_offset; 6399 raid56_full_stripe_start = geom.raid56_stripe_offset; 6400 data_stripes = nr_data_stripes(map); 6401 6402 down_read(&dev_replace->rwsem); 6403 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6404 /* 6405 * Hold the semaphore for read during the whole operation, write is 6406 * requested at commit time but must wait. 6407 */ 6408 if (!dev_replace_is_ongoing) 6409 up_read(&dev_replace->rwsem); 6410 6411 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6412 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6413 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6414 dev_replace->srcdev->devid, 6415 &mirror_num, 6416 &physical_to_patch_in_first_stripe); 6417 if (ret) 6418 goto out; 6419 else 6420 patch_the_first_stripe_for_dev_replace = 1; 6421 } else if (mirror_num > map->num_stripes) { 6422 mirror_num = 0; 6423 } 6424 6425 num_stripes = 1; 6426 stripe_index = 0; 6427 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6428 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6429 &stripe_index); 6430 if (!need_full_stripe(op)) 6431 mirror_num = 1; 6432 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6433 if (need_full_stripe(op)) 6434 num_stripes = map->num_stripes; 6435 else if (mirror_num) 6436 stripe_index = mirror_num - 1; 6437 else { 6438 stripe_index = find_live_mirror(fs_info, map, 0, 6439 dev_replace_is_ongoing); 6440 mirror_num = stripe_index + 1; 6441 } 6442 6443 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6444 if (need_full_stripe(op)) { 6445 num_stripes = map->num_stripes; 6446 } else if (mirror_num) { 6447 stripe_index = mirror_num - 1; 6448 } else { 6449 mirror_num = 1; 6450 } 6451 6452 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6453 u32 factor = map->num_stripes / map->sub_stripes; 6454 6455 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6456 stripe_index *= map->sub_stripes; 6457 6458 if (need_full_stripe(op)) 6459 num_stripes = map->sub_stripes; 6460 else if (mirror_num) 6461 stripe_index += mirror_num - 1; 6462 else { 6463 int old_stripe_index = stripe_index; 6464 stripe_index = find_live_mirror(fs_info, map, 6465 stripe_index, 6466 dev_replace_is_ongoing); 6467 mirror_num = stripe_index - old_stripe_index + 1; 6468 } 6469 6470 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6471 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6472 /* push stripe_nr back to the start of the full stripe */ 6473 stripe_nr = div64_u64(raid56_full_stripe_start, 6474 stripe_len * data_stripes); 6475 6476 /* RAID[56] write or recovery. Return all stripes */ 6477 num_stripes = map->num_stripes; 6478 max_errors = nr_parity_stripes(map); 6479 6480 *length = map->stripe_len; 6481 stripe_index = 0; 6482 stripe_offset = 0; 6483 } else { 6484 /* 6485 * Mirror #0 or #1 means the original data block. 6486 * Mirror #2 is RAID5 parity block. 6487 * Mirror #3 is RAID6 Q block. 6488 */ 6489 stripe_nr = div_u64_rem(stripe_nr, 6490 data_stripes, &stripe_index); 6491 if (mirror_num > 1) 6492 stripe_index = data_stripes + mirror_num - 2; 6493 6494 /* We distribute the parity blocks across stripes */ 6495 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6496 &stripe_index); 6497 if (!need_full_stripe(op) && mirror_num <= 1) 6498 mirror_num = 1; 6499 } 6500 } else { 6501 /* 6502 * after this, stripe_nr is the number of stripes on this 6503 * device we have to walk to find the data, and stripe_index is 6504 * the number of our device in the stripe array 6505 */ 6506 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6507 &stripe_index); 6508 mirror_num = stripe_index + 1; 6509 } 6510 if (stripe_index >= map->num_stripes) { 6511 btrfs_crit(fs_info, 6512 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6513 stripe_index, map->num_stripes); 6514 ret = -EINVAL; 6515 goto out; 6516 } 6517 6518 num_alloc_stripes = num_stripes; 6519 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6520 if (op == BTRFS_MAP_WRITE) 6521 num_alloc_stripes <<= 1; 6522 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6523 num_alloc_stripes++; 6524 tgtdev_indexes = num_stripes; 6525 } 6526 6527 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes); 6528 if (!bioc) { 6529 ret = -ENOMEM; 6530 goto out; 6531 } 6532 6533 for (i = 0; i < num_stripes; i++) { 6534 bioc->stripes[i].physical = map->stripes[stripe_index].physical + 6535 stripe_offset + stripe_nr * map->stripe_len; 6536 bioc->stripes[i].dev = map->stripes[stripe_index].dev; 6537 stripe_index++; 6538 } 6539 6540 /* Build raid_map */ 6541 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6542 (need_full_stripe(op) || mirror_num > 1)) { 6543 u64 tmp; 6544 unsigned rot; 6545 6546 /* Work out the disk rotation on this stripe-set */ 6547 div_u64_rem(stripe_nr, num_stripes, &rot); 6548 6549 /* Fill in the logical address of each stripe */ 6550 tmp = stripe_nr * data_stripes; 6551 for (i = 0; i < data_stripes; i++) 6552 bioc->raid_map[(i + rot) % num_stripes] = 6553 em->start + (tmp + i) * map->stripe_len; 6554 6555 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; 6556 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6557 bioc->raid_map[(i + rot + 1) % num_stripes] = 6558 RAID6_Q_STRIPE; 6559 6560 sort_parity_stripes(bioc, num_stripes); 6561 } 6562 6563 if (need_full_stripe(op)) 6564 max_errors = btrfs_chunk_max_errors(map); 6565 6566 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6567 need_full_stripe(op)) { 6568 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical, 6569 &num_stripes, &max_errors); 6570 } 6571 6572 *bioc_ret = bioc; 6573 bioc->map_type = map->type; 6574 bioc->num_stripes = num_stripes; 6575 bioc->max_errors = max_errors; 6576 bioc->mirror_num = mirror_num; 6577 6578 /* 6579 * this is the case that REQ_READ && dev_replace_is_ongoing && 6580 * mirror_num == num_stripes + 1 && dev_replace target drive is 6581 * available as a mirror 6582 */ 6583 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6584 WARN_ON(num_stripes > 1); 6585 bioc->stripes[0].dev = dev_replace->tgtdev; 6586 bioc->stripes[0].physical = physical_to_patch_in_first_stripe; 6587 bioc->mirror_num = map->num_stripes + 1; 6588 } 6589 out: 6590 if (dev_replace_is_ongoing) { 6591 lockdep_assert_held(&dev_replace->rwsem); 6592 /* Unlock and let waiting writers proceed */ 6593 up_read(&dev_replace->rwsem); 6594 } 6595 free_extent_map(em); 6596 return ret; 6597 } 6598 6599 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6600 u64 logical, u64 *length, 6601 struct btrfs_io_context **bioc_ret, int mirror_num) 6602 { 6603 if (op == BTRFS_MAP_DISCARD) 6604 return __btrfs_map_block_for_discard(fs_info, logical, 6605 length, bioc_ret); 6606 6607 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6608 mirror_num, 0); 6609 } 6610 6611 /* For Scrub/replace */ 6612 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6613 u64 logical, u64 *length, 6614 struct btrfs_io_context **bioc_ret) 6615 { 6616 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1); 6617 } 6618 6619 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio) 6620 { 6621 bio->bi_private = bioc->private; 6622 bio->bi_end_io = bioc->end_io; 6623 bio_endio(bio); 6624 6625 btrfs_put_bioc(bioc); 6626 } 6627 6628 static void btrfs_end_bio(struct bio *bio) 6629 { 6630 struct btrfs_io_context *bioc = bio->bi_private; 6631 int is_orig_bio = 0; 6632 6633 if (bio->bi_status) { 6634 atomic_inc(&bioc->error); 6635 if (bio->bi_status == BLK_STS_IOERR || 6636 bio->bi_status == BLK_STS_TARGET) { 6637 struct btrfs_device *dev = btrfs_bio(bio)->device; 6638 6639 ASSERT(dev->bdev); 6640 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 6641 btrfs_dev_stat_inc_and_print(dev, 6642 BTRFS_DEV_STAT_WRITE_ERRS); 6643 else if (!(bio->bi_opf & REQ_RAHEAD)) 6644 btrfs_dev_stat_inc_and_print(dev, 6645 BTRFS_DEV_STAT_READ_ERRS); 6646 if (bio->bi_opf & REQ_PREFLUSH) 6647 btrfs_dev_stat_inc_and_print(dev, 6648 BTRFS_DEV_STAT_FLUSH_ERRS); 6649 } 6650 } 6651 6652 if (bio == bioc->orig_bio) 6653 is_orig_bio = 1; 6654 6655 btrfs_bio_counter_dec(bioc->fs_info); 6656 6657 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6658 if (!is_orig_bio) { 6659 bio_put(bio); 6660 bio = bioc->orig_bio; 6661 } 6662 6663 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6664 /* only send an error to the higher layers if it is 6665 * beyond the tolerance of the btrfs bio 6666 */ 6667 if (atomic_read(&bioc->error) > bioc->max_errors) { 6668 bio->bi_status = BLK_STS_IOERR; 6669 } else { 6670 /* 6671 * this bio is actually up to date, we didn't 6672 * go over the max number of errors 6673 */ 6674 bio->bi_status = BLK_STS_OK; 6675 } 6676 6677 btrfs_end_bioc(bioc, bio); 6678 } else if (!is_orig_bio) { 6679 bio_put(bio); 6680 } 6681 } 6682 6683 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, 6684 u64 physical, struct btrfs_device *dev) 6685 { 6686 struct btrfs_fs_info *fs_info = bioc->fs_info; 6687 6688 bio->bi_private = bioc; 6689 btrfs_bio(bio)->device = dev; 6690 bio->bi_end_io = btrfs_end_bio; 6691 bio->bi_iter.bi_sector = physical >> 9; 6692 /* 6693 * For zone append writing, bi_sector must point the beginning of the 6694 * zone 6695 */ 6696 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 6697 if (btrfs_dev_is_sequential(dev, physical)) { 6698 u64 zone_start = round_down(physical, fs_info->zone_size); 6699 6700 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; 6701 } else { 6702 bio->bi_opf &= ~REQ_OP_ZONE_APPEND; 6703 bio->bi_opf |= REQ_OP_WRITE; 6704 } 6705 } 6706 btrfs_debug_in_rcu(fs_info, 6707 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6708 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6709 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6710 dev->devid, bio->bi_iter.bi_size); 6711 bio_set_dev(bio, dev->bdev); 6712 6713 btrfs_bio_counter_inc_noblocked(fs_info); 6714 6715 btrfsic_submit_bio(bio); 6716 } 6717 6718 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical) 6719 { 6720 atomic_inc(&bioc->error); 6721 if (atomic_dec_and_test(&bioc->stripes_pending)) { 6722 /* Should be the original bio. */ 6723 WARN_ON(bio != bioc->orig_bio); 6724 6725 btrfs_bio(bio)->mirror_num = bioc->mirror_num; 6726 bio->bi_iter.bi_sector = logical >> 9; 6727 if (atomic_read(&bioc->error) > bioc->max_errors) 6728 bio->bi_status = BLK_STS_IOERR; 6729 else 6730 bio->bi_status = BLK_STS_OK; 6731 btrfs_end_bioc(bioc, bio); 6732 } 6733 } 6734 6735 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6736 int mirror_num) 6737 { 6738 struct btrfs_device *dev; 6739 struct bio *first_bio = bio; 6740 u64 logical = bio->bi_iter.bi_sector << 9; 6741 u64 length = 0; 6742 u64 map_length; 6743 int ret; 6744 int dev_nr; 6745 int total_devs; 6746 struct btrfs_io_context *bioc = NULL; 6747 6748 length = bio->bi_iter.bi_size; 6749 map_length = length; 6750 6751 btrfs_bio_counter_inc_blocked(fs_info); 6752 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6753 &map_length, &bioc, mirror_num, 1); 6754 if (ret) { 6755 btrfs_bio_counter_dec(fs_info); 6756 return errno_to_blk_status(ret); 6757 } 6758 6759 total_devs = bioc->num_stripes; 6760 bioc->orig_bio = first_bio; 6761 bioc->private = first_bio->bi_private; 6762 bioc->end_io = first_bio->bi_end_io; 6763 atomic_set(&bioc->stripes_pending, bioc->num_stripes); 6764 6765 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6766 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { 6767 /* In this case, map_length has been set to the length of 6768 a single stripe; not the whole write */ 6769 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 6770 ret = raid56_parity_write(bio, bioc, map_length); 6771 } else { 6772 ret = raid56_parity_recover(bio, bioc, map_length, 6773 mirror_num, 1); 6774 } 6775 6776 btrfs_bio_counter_dec(fs_info); 6777 return errno_to_blk_status(ret); 6778 } 6779 6780 if (map_length < length) { 6781 btrfs_crit(fs_info, 6782 "mapping failed logical %llu bio len %llu len %llu", 6783 logical, length, map_length); 6784 BUG(); 6785 } 6786 6787 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6788 dev = bioc->stripes[dev_nr].dev; 6789 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6790 &dev->dev_state) || 6791 (btrfs_op(first_bio) == BTRFS_MAP_WRITE && 6792 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6793 bioc_error(bioc, first_bio, logical); 6794 continue; 6795 } 6796 6797 if (dev_nr < total_devs - 1) 6798 bio = btrfs_bio_clone(first_bio); 6799 else 6800 bio = first_bio; 6801 6802 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); 6803 } 6804 btrfs_bio_counter_dec(fs_info); 6805 return BLK_STS_OK; 6806 } 6807 6808 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6809 const struct btrfs_fs_devices *fs_devices) 6810 { 6811 if (args->fsid == NULL) 6812 return true; 6813 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6814 return true; 6815 return false; 6816 } 6817 6818 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6819 const struct btrfs_device *device) 6820 { 6821 ASSERT((args->devid != (u64)-1) || args->missing); 6822 6823 if ((args->devid != (u64)-1) && device->devid != args->devid) 6824 return false; 6825 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6826 return false; 6827 if (!args->missing) 6828 return true; 6829 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6830 !device->bdev) 6831 return true; 6832 return false; 6833 } 6834 6835 /* 6836 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6837 * return NULL. 6838 * 6839 * If devid and uuid are both specified, the match must be exact, otherwise 6840 * only devid is used. 6841 */ 6842 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6843 const struct btrfs_dev_lookup_args *args) 6844 { 6845 struct btrfs_device *device; 6846 struct btrfs_fs_devices *seed_devs; 6847 6848 if (dev_args_match_fs_devices(args, fs_devices)) { 6849 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6850 if (dev_args_match_device(args, device)) 6851 return device; 6852 } 6853 } 6854 6855 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6856 if (!dev_args_match_fs_devices(args, seed_devs)) 6857 continue; 6858 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6859 if (dev_args_match_device(args, device)) 6860 return device; 6861 } 6862 } 6863 6864 return NULL; 6865 } 6866 6867 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6868 u64 devid, u8 *dev_uuid) 6869 { 6870 struct btrfs_device *device; 6871 unsigned int nofs_flag; 6872 6873 /* 6874 * We call this under the chunk_mutex, so we want to use NOFS for this 6875 * allocation, however we don't want to change btrfs_alloc_device() to 6876 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6877 * places. 6878 */ 6879 nofs_flag = memalloc_nofs_save(); 6880 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6881 memalloc_nofs_restore(nofs_flag); 6882 if (IS_ERR(device)) 6883 return device; 6884 6885 list_add(&device->dev_list, &fs_devices->devices); 6886 device->fs_devices = fs_devices; 6887 fs_devices->num_devices++; 6888 6889 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6890 fs_devices->missing_devices++; 6891 6892 return device; 6893 } 6894 6895 /** 6896 * btrfs_alloc_device - allocate struct btrfs_device 6897 * @fs_info: used only for generating a new devid, can be NULL if 6898 * devid is provided (i.e. @devid != NULL). 6899 * @devid: a pointer to devid for this device. If NULL a new devid 6900 * is generated. 6901 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6902 * is generated. 6903 * 6904 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6905 * on error. Returned struct is not linked onto any lists and must be 6906 * destroyed with btrfs_free_device. 6907 */ 6908 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6909 const u64 *devid, 6910 const u8 *uuid) 6911 { 6912 struct btrfs_device *dev; 6913 u64 tmp; 6914 6915 if (WARN_ON(!devid && !fs_info)) 6916 return ERR_PTR(-EINVAL); 6917 6918 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6919 if (!dev) 6920 return ERR_PTR(-ENOMEM); 6921 6922 /* 6923 * Preallocate a bio that's always going to be used for flushing device 6924 * barriers and matches the device lifespan 6925 */ 6926 dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0); 6927 if (!dev->flush_bio) { 6928 kfree(dev); 6929 return ERR_PTR(-ENOMEM); 6930 } 6931 6932 INIT_LIST_HEAD(&dev->dev_list); 6933 INIT_LIST_HEAD(&dev->dev_alloc_list); 6934 INIT_LIST_HEAD(&dev->post_commit_list); 6935 6936 atomic_set(&dev->reada_in_flight, 0); 6937 atomic_set(&dev->dev_stats_ccnt, 0); 6938 btrfs_device_data_ordered_init(dev); 6939 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6940 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 6941 extent_io_tree_init(fs_info, &dev->alloc_state, 6942 IO_TREE_DEVICE_ALLOC_STATE, NULL); 6943 6944 if (devid) 6945 tmp = *devid; 6946 else { 6947 int ret; 6948 6949 ret = find_next_devid(fs_info, &tmp); 6950 if (ret) { 6951 btrfs_free_device(dev); 6952 return ERR_PTR(ret); 6953 } 6954 } 6955 dev->devid = tmp; 6956 6957 if (uuid) 6958 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6959 else 6960 generate_random_uuid(dev->uuid); 6961 6962 return dev; 6963 } 6964 6965 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6966 u64 devid, u8 *uuid, bool error) 6967 { 6968 if (error) 6969 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6970 devid, uuid); 6971 else 6972 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6973 devid, uuid); 6974 } 6975 6976 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6977 { 6978 const int data_stripes = calc_data_stripes(type, num_stripes); 6979 6980 return div_u64(chunk_len, data_stripes); 6981 } 6982 6983 #if BITS_PER_LONG == 32 6984 /* 6985 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6986 * can't be accessed on 32bit systems. 6987 * 6988 * This function do mount time check to reject the fs if it already has 6989 * metadata chunk beyond that limit. 6990 */ 6991 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6992 u64 logical, u64 length, u64 type) 6993 { 6994 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6995 return 0; 6996 6997 if (logical + length < MAX_LFS_FILESIZE) 6998 return 0; 6999 7000 btrfs_err_32bit_limit(fs_info); 7001 return -EOVERFLOW; 7002 } 7003 7004 /* 7005 * This is to give early warning for any metadata chunk reaching 7006 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7007 * Although we can still access the metadata, it's not going to be possible 7008 * once the limit is reached. 7009 */ 7010 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7011 u64 logical, u64 length, u64 type) 7012 { 7013 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7014 return; 7015 7016 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7017 return; 7018 7019 btrfs_warn_32bit_limit(fs_info); 7020 } 7021 #endif 7022 7023 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7024 struct btrfs_chunk *chunk) 7025 { 7026 BTRFS_DEV_LOOKUP_ARGS(args); 7027 struct btrfs_fs_info *fs_info = leaf->fs_info; 7028 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7029 struct map_lookup *map; 7030 struct extent_map *em; 7031 u64 logical; 7032 u64 length; 7033 u64 devid; 7034 u64 type; 7035 u8 uuid[BTRFS_UUID_SIZE]; 7036 int num_stripes; 7037 int ret; 7038 int i; 7039 7040 logical = key->offset; 7041 length = btrfs_chunk_length(leaf, chunk); 7042 type = btrfs_chunk_type(leaf, chunk); 7043 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7044 7045 #if BITS_PER_LONG == 32 7046 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7047 if (ret < 0) 7048 return ret; 7049 warn_32bit_meta_chunk(fs_info, logical, length, type); 7050 #endif 7051 7052 /* 7053 * Only need to verify chunk item if we're reading from sys chunk array, 7054 * as chunk item in tree block is already verified by tree-checker. 7055 */ 7056 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7057 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7058 if (ret) 7059 return ret; 7060 } 7061 7062 read_lock(&map_tree->lock); 7063 em = lookup_extent_mapping(map_tree, logical, 1); 7064 read_unlock(&map_tree->lock); 7065 7066 /* already mapped? */ 7067 if (em && em->start <= logical && em->start + em->len > logical) { 7068 free_extent_map(em); 7069 return 0; 7070 } else if (em) { 7071 free_extent_map(em); 7072 } 7073 7074 em = alloc_extent_map(); 7075 if (!em) 7076 return -ENOMEM; 7077 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 7078 if (!map) { 7079 free_extent_map(em); 7080 return -ENOMEM; 7081 } 7082 7083 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 7084 em->map_lookup = map; 7085 em->start = logical; 7086 em->len = length; 7087 em->orig_start = 0; 7088 em->block_start = 0; 7089 em->block_len = em->len; 7090 7091 map->num_stripes = num_stripes; 7092 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7093 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7094 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 7095 map->type = type; 7096 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 7097 map->verified_stripes = 0; 7098 em->orig_block_len = calc_stripe_length(type, em->len, 7099 map->num_stripes); 7100 for (i = 0; i < num_stripes; i++) { 7101 map->stripes[i].physical = 7102 btrfs_stripe_offset_nr(leaf, chunk, i); 7103 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7104 args.devid = devid; 7105 read_extent_buffer(leaf, uuid, (unsigned long) 7106 btrfs_stripe_dev_uuid_nr(chunk, i), 7107 BTRFS_UUID_SIZE); 7108 args.uuid = uuid; 7109 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7110 if (!map->stripes[i].dev && 7111 !btrfs_test_opt(fs_info, DEGRADED)) { 7112 free_extent_map(em); 7113 btrfs_report_missing_device(fs_info, devid, uuid, true); 7114 return -ENOENT; 7115 } 7116 if (!map->stripes[i].dev) { 7117 map->stripes[i].dev = 7118 add_missing_dev(fs_info->fs_devices, devid, 7119 uuid); 7120 if (IS_ERR(map->stripes[i].dev)) { 7121 free_extent_map(em); 7122 btrfs_err(fs_info, 7123 "failed to init missing dev %llu: %ld", 7124 devid, PTR_ERR(map->stripes[i].dev)); 7125 return PTR_ERR(map->stripes[i].dev); 7126 } 7127 btrfs_report_missing_device(fs_info, devid, uuid, false); 7128 } 7129 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7130 &(map->stripes[i].dev->dev_state)); 7131 7132 } 7133 7134 write_lock(&map_tree->lock); 7135 ret = add_extent_mapping(map_tree, em, 0); 7136 write_unlock(&map_tree->lock); 7137 if (ret < 0) { 7138 btrfs_err(fs_info, 7139 "failed to add chunk map, start=%llu len=%llu: %d", 7140 em->start, em->len, ret); 7141 } 7142 free_extent_map(em); 7143 7144 return ret; 7145 } 7146 7147 static void fill_device_from_item(struct extent_buffer *leaf, 7148 struct btrfs_dev_item *dev_item, 7149 struct btrfs_device *device) 7150 { 7151 unsigned long ptr; 7152 7153 device->devid = btrfs_device_id(leaf, dev_item); 7154 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7155 device->total_bytes = device->disk_total_bytes; 7156 device->commit_total_bytes = device->disk_total_bytes; 7157 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7158 device->commit_bytes_used = device->bytes_used; 7159 device->type = btrfs_device_type(leaf, dev_item); 7160 device->io_align = btrfs_device_io_align(leaf, dev_item); 7161 device->io_width = btrfs_device_io_width(leaf, dev_item); 7162 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7163 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7164 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7165 7166 ptr = btrfs_device_uuid(dev_item); 7167 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7168 } 7169 7170 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7171 u8 *fsid) 7172 { 7173 struct btrfs_fs_devices *fs_devices; 7174 int ret; 7175 7176 lockdep_assert_held(&uuid_mutex); 7177 ASSERT(fsid); 7178 7179 /* This will match only for multi-device seed fs */ 7180 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7181 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7182 return fs_devices; 7183 7184 7185 fs_devices = find_fsid(fsid, NULL); 7186 if (!fs_devices) { 7187 if (!btrfs_test_opt(fs_info, DEGRADED)) 7188 return ERR_PTR(-ENOENT); 7189 7190 fs_devices = alloc_fs_devices(fsid, NULL); 7191 if (IS_ERR(fs_devices)) 7192 return fs_devices; 7193 7194 fs_devices->seeding = true; 7195 fs_devices->opened = 1; 7196 return fs_devices; 7197 } 7198 7199 /* 7200 * Upon first call for a seed fs fsid, just create a private copy of the 7201 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7202 */ 7203 fs_devices = clone_fs_devices(fs_devices); 7204 if (IS_ERR(fs_devices)) 7205 return fs_devices; 7206 7207 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 7208 if (ret) { 7209 free_fs_devices(fs_devices); 7210 return ERR_PTR(ret); 7211 } 7212 7213 if (!fs_devices->seeding) { 7214 close_fs_devices(fs_devices); 7215 free_fs_devices(fs_devices); 7216 return ERR_PTR(-EINVAL); 7217 } 7218 7219 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7220 7221 return fs_devices; 7222 } 7223 7224 static int read_one_dev(struct extent_buffer *leaf, 7225 struct btrfs_dev_item *dev_item) 7226 { 7227 BTRFS_DEV_LOOKUP_ARGS(args); 7228 struct btrfs_fs_info *fs_info = leaf->fs_info; 7229 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7230 struct btrfs_device *device; 7231 u64 devid; 7232 int ret; 7233 u8 fs_uuid[BTRFS_FSID_SIZE]; 7234 u8 dev_uuid[BTRFS_UUID_SIZE]; 7235 7236 devid = args.devid = btrfs_device_id(leaf, dev_item); 7237 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7238 BTRFS_UUID_SIZE); 7239 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7240 BTRFS_FSID_SIZE); 7241 args.uuid = dev_uuid; 7242 args.fsid = fs_uuid; 7243 7244 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7245 fs_devices = open_seed_devices(fs_info, fs_uuid); 7246 if (IS_ERR(fs_devices)) 7247 return PTR_ERR(fs_devices); 7248 } 7249 7250 device = btrfs_find_device(fs_info->fs_devices, &args); 7251 if (!device) { 7252 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7253 btrfs_report_missing_device(fs_info, devid, 7254 dev_uuid, true); 7255 return -ENOENT; 7256 } 7257 7258 device = add_missing_dev(fs_devices, devid, dev_uuid); 7259 if (IS_ERR(device)) { 7260 btrfs_err(fs_info, 7261 "failed to add missing dev %llu: %ld", 7262 devid, PTR_ERR(device)); 7263 return PTR_ERR(device); 7264 } 7265 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7266 } else { 7267 if (!device->bdev) { 7268 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7269 btrfs_report_missing_device(fs_info, 7270 devid, dev_uuid, true); 7271 return -ENOENT; 7272 } 7273 btrfs_report_missing_device(fs_info, devid, 7274 dev_uuid, false); 7275 } 7276 7277 if (!device->bdev && 7278 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7279 /* 7280 * this happens when a device that was properly setup 7281 * in the device info lists suddenly goes bad. 7282 * device->bdev is NULL, and so we have to set 7283 * device->missing to one here 7284 */ 7285 device->fs_devices->missing_devices++; 7286 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7287 } 7288 7289 /* Move the device to its own fs_devices */ 7290 if (device->fs_devices != fs_devices) { 7291 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7292 &device->dev_state)); 7293 7294 list_move(&device->dev_list, &fs_devices->devices); 7295 device->fs_devices->num_devices--; 7296 fs_devices->num_devices++; 7297 7298 device->fs_devices->missing_devices--; 7299 fs_devices->missing_devices++; 7300 7301 device->fs_devices = fs_devices; 7302 } 7303 } 7304 7305 if (device->fs_devices != fs_info->fs_devices) { 7306 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7307 if (device->generation != 7308 btrfs_device_generation(leaf, dev_item)) 7309 return -EINVAL; 7310 } 7311 7312 fill_device_from_item(leaf, dev_item, device); 7313 if (device->bdev) { 7314 u64 max_total_bytes = i_size_read(device->bdev->bd_inode); 7315 7316 if (device->total_bytes > max_total_bytes) { 7317 btrfs_err(fs_info, 7318 "device total_bytes should be at most %llu but found %llu", 7319 max_total_bytes, device->total_bytes); 7320 return -EINVAL; 7321 } 7322 } 7323 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7324 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7325 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7326 device->fs_devices->total_rw_bytes += device->total_bytes; 7327 atomic64_add(device->total_bytes - device->bytes_used, 7328 &fs_info->free_chunk_space); 7329 } 7330 ret = 0; 7331 return ret; 7332 } 7333 7334 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7335 { 7336 struct btrfs_root *root = fs_info->tree_root; 7337 struct btrfs_super_block *super_copy = fs_info->super_copy; 7338 struct extent_buffer *sb; 7339 struct btrfs_disk_key *disk_key; 7340 struct btrfs_chunk *chunk; 7341 u8 *array_ptr; 7342 unsigned long sb_array_offset; 7343 int ret = 0; 7344 u32 num_stripes; 7345 u32 array_size; 7346 u32 len = 0; 7347 u32 cur_offset; 7348 u64 type; 7349 struct btrfs_key key; 7350 7351 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7352 /* 7353 * This will create extent buffer of nodesize, superblock size is 7354 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7355 * overallocate but we can keep it as-is, only the first page is used. 7356 */ 7357 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 7358 root->root_key.objectid, 0); 7359 if (IS_ERR(sb)) 7360 return PTR_ERR(sb); 7361 set_extent_buffer_uptodate(sb); 7362 /* 7363 * The sb extent buffer is artificial and just used to read the system array. 7364 * set_extent_buffer_uptodate() call does not properly mark all it's 7365 * pages up-to-date when the page is larger: extent does not cover the 7366 * whole page and consequently check_page_uptodate does not find all 7367 * the page's extents up-to-date (the hole beyond sb), 7368 * write_extent_buffer then triggers a WARN_ON. 7369 * 7370 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7371 * but sb spans only this function. Add an explicit SetPageUptodate call 7372 * to silence the warning eg. on PowerPC 64. 7373 */ 7374 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7375 SetPageUptodate(sb->pages[0]); 7376 7377 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7378 array_size = btrfs_super_sys_array_size(super_copy); 7379 7380 array_ptr = super_copy->sys_chunk_array; 7381 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7382 cur_offset = 0; 7383 7384 while (cur_offset < array_size) { 7385 disk_key = (struct btrfs_disk_key *)array_ptr; 7386 len = sizeof(*disk_key); 7387 if (cur_offset + len > array_size) 7388 goto out_short_read; 7389 7390 btrfs_disk_key_to_cpu(&key, disk_key); 7391 7392 array_ptr += len; 7393 sb_array_offset += len; 7394 cur_offset += len; 7395 7396 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7397 btrfs_err(fs_info, 7398 "unexpected item type %u in sys_array at offset %u", 7399 (u32)key.type, cur_offset); 7400 ret = -EIO; 7401 break; 7402 } 7403 7404 chunk = (struct btrfs_chunk *)sb_array_offset; 7405 /* 7406 * At least one btrfs_chunk with one stripe must be present, 7407 * exact stripe count check comes afterwards 7408 */ 7409 len = btrfs_chunk_item_size(1); 7410 if (cur_offset + len > array_size) 7411 goto out_short_read; 7412 7413 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7414 if (!num_stripes) { 7415 btrfs_err(fs_info, 7416 "invalid number of stripes %u in sys_array at offset %u", 7417 num_stripes, cur_offset); 7418 ret = -EIO; 7419 break; 7420 } 7421 7422 type = btrfs_chunk_type(sb, chunk); 7423 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7424 btrfs_err(fs_info, 7425 "invalid chunk type %llu in sys_array at offset %u", 7426 type, cur_offset); 7427 ret = -EIO; 7428 break; 7429 } 7430 7431 len = btrfs_chunk_item_size(num_stripes); 7432 if (cur_offset + len > array_size) 7433 goto out_short_read; 7434 7435 ret = read_one_chunk(&key, sb, chunk); 7436 if (ret) 7437 break; 7438 7439 array_ptr += len; 7440 sb_array_offset += len; 7441 cur_offset += len; 7442 } 7443 clear_extent_buffer_uptodate(sb); 7444 free_extent_buffer_stale(sb); 7445 return ret; 7446 7447 out_short_read: 7448 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7449 len, cur_offset); 7450 clear_extent_buffer_uptodate(sb); 7451 free_extent_buffer_stale(sb); 7452 return -EIO; 7453 } 7454 7455 /* 7456 * Check if all chunks in the fs are OK for read-write degraded mount 7457 * 7458 * If the @failing_dev is specified, it's accounted as missing. 7459 * 7460 * Return true if all chunks meet the minimal RW mount requirements. 7461 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7462 */ 7463 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7464 struct btrfs_device *failing_dev) 7465 { 7466 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7467 struct extent_map *em; 7468 u64 next_start = 0; 7469 bool ret = true; 7470 7471 read_lock(&map_tree->lock); 7472 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7473 read_unlock(&map_tree->lock); 7474 /* No chunk at all? Return false anyway */ 7475 if (!em) { 7476 ret = false; 7477 goto out; 7478 } 7479 while (em) { 7480 struct map_lookup *map; 7481 int missing = 0; 7482 int max_tolerated; 7483 int i; 7484 7485 map = em->map_lookup; 7486 max_tolerated = 7487 btrfs_get_num_tolerated_disk_barrier_failures( 7488 map->type); 7489 for (i = 0; i < map->num_stripes; i++) { 7490 struct btrfs_device *dev = map->stripes[i].dev; 7491 7492 if (!dev || !dev->bdev || 7493 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7494 dev->last_flush_error) 7495 missing++; 7496 else if (failing_dev && failing_dev == dev) 7497 missing++; 7498 } 7499 if (missing > max_tolerated) { 7500 if (!failing_dev) 7501 btrfs_warn(fs_info, 7502 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7503 em->start, missing, max_tolerated); 7504 free_extent_map(em); 7505 ret = false; 7506 goto out; 7507 } 7508 next_start = extent_map_end(em); 7509 free_extent_map(em); 7510 7511 read_lock(&map_tree->lock); 7512 em = lookup_extent_mapping(map_tree, next_start, 7513 (u64)(-1) - next_start); 7514 read_unlock(&map_tree->lock); 7515 } 7516 out: 7517 return ret; 7518 } 7519 7520 static void readahead_tree_node_children(struct extent_buffer *node) 7521 { 7522 int i; 7523 const int nr_items = btrfs_header_nritems(node); 7524 7525 for (i = 0; i < nr_items; i++) 7526 btrfs_readahead_node_child(node, i); 7527 } 7528 7529 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7530 { 7531 struct btrfs_root *root = fs_info->chunk_root; 7532 struct btrfs_path *path; 7533 struct extent_buffer *leaf; 7534 struct btrfs_key key; 7535 struct btrfs_key found_key; 7536 int ret; 7537 int slot; 7538 u64 total_dev = 0; 7539 u64 last_ra_node = 0; 7540 7541 path = btrfs_alloc_path(); 7542 if (!path) 7543 return -ENOMEM; 7544 7545 /* 7546 * uuid_mutex is needed only if we are mounting a sprout FS 7547 * otherwise we don't need it. 7548 */ 7549 mutex_lock(&uuid_mutex); 7550 7551 /* 7552 * It is possible for mount and umount to race in such a way that 7553 * we execute this code path, but open_fs_devices failed to clear 7554 * total_rw_bytes. We certainly want it cleared before reading the 7555 * device items, so clear it here. 7556 */ 7557 fs_info->fs_devices->total_rw_bytes = 0; 7558 7559 /* 7560 * Read all device items, and then all the chunk items. All 7561 * device items are found before any chunk item (their object id 7562 * is smaller than the lowest possible object id for a chunk 7563 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7564 */ 7565 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7566 key.offset = 0; 7567 key.type = 0; 7568 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7569 if (ret < 0) 7570 goto error; 7571 while (1) { 7572 struct extent_buffer *node; 7573 7574 leaf = path->nodes[0]; 7575 slot = path->slots[0]; 7576 if (slot >= btrfs_header_nritems(leaf)) { 7577 ret = btrfs_next_leaf(root, path); 7578 if (ret == 0) 7579 continue; 7580 if (ret < 0) 7581 goto error; 7582 break; 7583 } 7584 /* 7585 * The nodes on level 1 are not locked but we don't need to do 7586 * that during mount time as nothing else can access the tree 7587 */ 7588 node = path->nodes[1]; 7589 if (node) { 7590 if (last_ra_node != node->start) { 7591 readahead_tree_node_children(node); 7592 last_ra_node = node->start; 7593 } 7594 } 7595 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7596 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7597 struct btrfs_dev_item *dev_item; 7598 dev_item = btrfs_item_ptr(leaf, slot, 7599 struct btrfs_dev_item); 7600 ret = read_one_dev(leaf, dev_item); 7601 if (ret) 7602 goto error; 7603 total_dev++; 7604 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7605 struct btrfs_chunk *chunk; 7606 7607 /* 7608 * We are only called at mount time, so no need to take 7609 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7610 * we always lock first fs_info->chunk_mutex before 7611 * acquiring any locks on the chunk tree. This is a 7612 * requirement for chunk allocation, see the comment on 7613 * top of btrfs_chunk_alloc() for details. 7614 */ 7615 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7616 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7617 ret = read_one_chunk(&found_key, leaf, chunk); 7618 if (ret) 7619 goto error; 7620 } 7621 path->slots[0]++; 7622 } 7623 7624 /* 7625 * After loading chunk tree, we've got all device information, 7626 * do another round of validation checks. 7627 */ 7628 if (total_dev != fs_info->fs_devices->total_devices) { 7629 btrfs_err(fs_info, 7630 "super_num_devices %llu mismatch with num_devices %llu found here", 7631 btrfs_super_num_devices(fs_info->super_copy), 7632 total_dev); 7633 ret = -EINVAL; 7634 goto error; 7635 } 7636 if (btrfs_super_total_bytes(fs_info->super_copy) < 7637 fs_info->fs_devices->total_rw_bytes) { 7638 btrfs_err(fs_info, 7639 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7640 btrfs_super_total_bytes(fs_info->super_copy), 7641 fs_info->fs_devices->total_rw_bytes); 7642 ret = -EINVAL; 7643 goto error; 7644 } 7645 ret = 0; 7646 error: 7647 mutex_unlock(&uuid_mutex); 7648 7649 btrfs_free_path(path); 7650 return ret; 7651 } 7652 7653 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7654 { 7655 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7656 struct btrfs_device *device; 7657 7658 fs_devices->fs_info = fs_info; 7659 7660 mutex_lock(&fs_devices->device_list_mutex); 7661 list_for_each_entry(device, &fs_devices->devices, dev_list) 7662 device->fs_info = fs_info; 7663 7664 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7665 list_for_each_entry(device, &seed_devs->devices, dev_list) 7666 device->fs_info = fs_info; 7667 7668 seed_devs->fs_info = fs_info; 7669 } 7670 mutex_unlock(&fs_devices->device_list_mutex); 7671 } 7672 7673 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7674 const struct btrfs_dev_stats_item *ptr, 7675 int index) 7676 { 7677 u64 val; 7678 7679 read_extent_buffer(eb, &val, 7680 offsetof(struct btrfs_dev_stats_item, values) + 7681 ((unsigned long)ptr) + (index * sizeof(u64)), 7682 sizeof(val)); 7683 return val; 7684 } 7685 7686 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7687 struct btrfs_dev_stats_item *ptr, 7688 int index, u64 val) 7689 { 7690 write_extent_buffer(eb, &val, 7691 offsetof(struct btrfs_dev_stats_item, values) + 7692 ((unsigned long)ptr) + (index * sizeof(u64)), 7693 sizeof(val)); 7694 } 7695 7696 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7697 struct btrfs_path *path) 7698 { 7699 struct btrfs_dev_stats_item *ptr; 7700 struct extent_buffer *eb; 7701 struct btrfs_key key; 7702 int item_size; 7703 int i, ret, slot; 7704 7705 if (!device->fs_info->dev_root) 7706 return 0; 7707 7708 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7709 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7710 key.offset = device->devid; 7711 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7712 if (ret) { 7713 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7714 btrfs_dev_stat_set(device, i, 0); 7715 device->dev_stats_valid = 1; 7716 btrfs_release_path(path); 7717 return ret < 0 ? ret : 0; 7718 } 7719 slot = path->slots[0]; 7720 eb = path->nodes[0]; 7721 item_size = btrfs_item_size_nr(eb, slot); 7722 7723 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7724 7725 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7726 if (item_size >= (1 + i) * sizeof(__le64)) 7727 btrfs_dev_stat_set(device, i, 7728 btrfs_dev_stats_value(eb, ptr, i)); 7729 else 7730 btrfs_dev_stat_set(device, i, 0); 7731 } 7732 7733 device->dev_stats_valid = 1; 7734 btrfs_dev_stat_print_on_load(device); 7735 btrfs_release_path(path); 7736 7737 return 0; 7738 } 7739 7740 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7741 { 7742 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7743 struct btrfs_device *device; 7744 struct btrfs_path *path = NULL; 7745 int ret = 0; 7746 7747 path = btrfs_alloc_path(); 7748 if (!path) 7749 return -ENOMEM; 7750 7751 mutex_lock(&fs_devices->device_list_mutex); 7752 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7753 ret = btrfs_device_init_dev_stats(device, path); 7754 if (ret) 7755 goto out; 7756 } 7757 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7758 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7759 ret = btrfs_device_init_dev_stats(device, path); 7760 if (ret) 7761 goto out; 7762 } 7763 } 7764 out: 7765 mutex_unlock(&fs_devices->device_list_mutex); 7766 7767 btrfs_free_path(path); 7768 return ret; 7769 } 7770 7771 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7772 struct btrfs_device *device) 7773 { 7774 struct btrfs_fs_info *fs_info = trans->fs_info; 7775 struct btrfs_root *dev_root = fs_info->dev_root; 7776 struct btrfs_path *path; 7777 struct btrfs_key key; 7778 struct extent_buffer *eb; 7779 struct btrfs_dev_stats_item *ptr; 7780 int ret; 7781 int i; 7782 7783 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7784 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7785 key.offset = device->devid; 7786 7787 path = btrfs_alloc_path(); 7788 if (!path) 7789 return -ENOMEM; 7790 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7791 if (ret < 0) { 7792 btrfs_warn_in_rcu(fs_info, 7793 "error %d while searching for dev_stats item for device %s", 7794 ret, rcu_str_deref(device->name)); 7795 goto out; 7796 } 7797 7798 if (ret == 0 && 7799 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7800 /* need to delete old one and insert a new one */ 7801 ret = btrfs_del_item(trans, dev_root, path); 7802 if (ret != 0) { 7803 btrfs_warn_in_rcu(fs_info, 7804 "delete too small dev_stats item for device %s failed %d", 7805 rcu_str_deref(device->name), ret); 7806 goto out; 7807 } 7808 ret = 1; 7809 } 7810 7811 if (ret == 1) { 7812 /* need to insert a new item */ 7813 btrfs_release_path(path); 7814 ret = btrfs_insert_empty_item(trans, dev_root, path, 7815 &key, sizeof(*ptr)); 7816 if (ret < 0) { 7817 btrfs_warn_in_rcu(fs_info, 7818 "insert dev_stats item for device %s failed %d", 7819 rcu_str_deref(device->name), ret); 7820 goto out; 7821 } 7822 } 7823 7824 eb = path->nodes[0]; 7825 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7826 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7827 btrfs_set_dev_stats_value(eb, ptr, i, 7828 btrfs_dev_stat_read(device, i)); 7829 btrfs_mark_buffer_dirty(eb); 7830 7831 out: 7832 btrfs_free_path(path); 7833 return ret; 7834 } 7835 7836 /* 7837 * called from commit_transaction. Writes all changed device stats to disk. 7838 */ 7839 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7840 { 7841 struct btrfs_fs_info *fs_info = trans->fs_info; 7842 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7843 struct btrfs_device *device; 7844 int stats_cnt; 7845 int ret = 0; 7846 7847 mutex_lock(&fs_devices->device_list_mutex); 7848 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7849 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7850 if (!device->dev_stats_valid || stats_cnt == 0) 7851 continue; 7852 7853 7854 /* 7855 * There is a LOAD-LOAD control dependency between the value of 7856 * dev_stats_ccnt and updating the on-disk values which requires 7857 * reading the in-memory counters. Such control dependencies 7858 * require explicit read memory barriers. 7859 * 7860 * This memory barriers pairs with smp_mb__before_atomic in 7861 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7862 * barrier implied by atomic_xchg in 7863 * btrfs_dev_stats_read_and_reset 7864 */ 7865 smp_rmb(); 7866 7867 ret = update_dev_stat_item(trans, device); 7868 if (!ret) 7869 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7870 } 7871 mutex_unlock(&fs_devices->device_list_mutex); 7872 7873 return ret; 7874 } 7875 7876 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7877 { 7878 btrfs_dev_stat_inc(dev, index); 7879 btrfs_dev_stat_print_on_error(dev); 7880 } 7881 7882 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7883 { 7884 if (!dev->dev_stats_valid) 7885 return; 7886 btrfs_err_rl_in_rcu(dev->fs_info, 7887 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7888 rcu_str_deref(dev->name), 7889 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7890 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7891 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7892 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7893 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7894 } 7895 7896 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7897 { 7898 int i; 7899 7900 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7901 if (btrfs_dev_stat_read(dev, i) != 0) 7902 break; 7903 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7904 return; /* all values == 0, suppress message */ 7905 7906 btrfs_info_in_rcu(dev->fs_info, 7907 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7908 rcu_str_deref(dev->name), 7909 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7910 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7911 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7912 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7913 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7914 } 7915 7916 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7917 struct btrfs_ioctl_get_dev_stats *stats) 7918 { 7919 BTRFS_DEV_LOOKUP_ARGS(args); 7920 struct btrfs_device *dev; 7921 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7922 int i; 7923 7924 mutex_lock(&fs_devices->device_list_mutex); 7925 args.devid = stats->devid; 7926 dev = btrfs_find_device(fs_info->fs_devices, &args); 7927 mutex_unlock(&fs_devices->device_list_mutex); 7928 7929 if (!dev) { 7930 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7931 return -ENODEV; 7932 } else if (!dev->dev_stats_valid) { 7933 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7934 return -ENODEV; 7935 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7936 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7937 if (stats->nr_items > i) 7938 stats->values[i] = 7939 btrfs_dev_stat_read_and_reset(dev, i); 7940 else 7941 btrfs_dev_stat_set(dev, i, 0); 7942 } 7943 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7944 current->comm, task_pid_nr(current)); 7945 } else { 7946 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7947 if (stats->nr_items > i) 7948 stats->values[i] = btrfs_dev_stat_read(dev, i); 7949 } 7950 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7951 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7952 return 0; 7953 } 7954 7955 /* 7956 * Update the size and bytes used for each device where it changed. This is 7957 * delayed since we would otherwise get errors while writing out the 7958 * superblocks. 7959 * 7960 * Must be invoked during transaction commit. 7961 */ 7962 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7963 { 7964 struct btrfs_device *curr, *next; 7965 7966 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7967 7968 if (list_empty(&trans->dev_update_list)) 7969 return; 7970 7971 /* 7972 * We don't need the device_list_mutex here. This list is owned by the 7973 * transaction and the transaction must complete before the device is 7974 * released. 7975 */ 7976 mutex_lock(&trans->fs_info->chunk_mutex); 7977 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7978 post_commit_list) { 7979 list_del_init(&curr->post_commit_list); 7980 curr->commit_total_bytes = curr->disk_total_bytes; 7981 curr->commit_bytes_used = curr->bytes_used; 7982 } 7983 mutex_unlock(&trans->fs_info->chunk_mutex); 7984 } 7985 7986 /* 7987 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7988 */ 7989 int btrfs_bg_type_to_factor(u64 flags) 7990 { 7991 const int index = btrfs_bg_flags_to_raid_index(flags); 7992 7993 return btrfs_raid_array[index].ncopies; 7994 } 7995 7996 7997 7998 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7999 u64 chunk_offset, u64 devid, 8000 u64 physical_offset, u64 physical_len) 8001 { 8002 struct btrfs_dev_lookup_args args = { .devid = devid }; 8003 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8004 struct extent_map *em; 8005 struct map_lookup *map; 8006 struct btrfs_device *dev; 8007 u64 stripe_len; 8008 bool found = false; 8009 int ret = 0; 8010 int i; 8011 8012 read_lock(&em_tree->lock); 8013 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 8014 read_unlock(&em_tree->lock); 8015 8016 if (!em) { 8017 btrfs_err(fs_info, 8018 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8019 physical_offset, devid); 8020 ret = -EUCLEAN; 8021 goto out; 8022 } 8023 8024 map = em->map_lookup; 8025 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 8026 if (physical_len != stripe_len) { 8027 btrfs_err(fs_info, 8028 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8029 physical_offset, devid, em->start, physical_len, 8030 stripe_len); 8031 ret = -EUCLEAN; 8032 goto out; 8033 } 8034 8035 for (i = 0; i < map->num_stripes; i++) { 8036 if (map->stripes[i].dev->devid == devid && 8037 map->stripes[i].physical == physical_offset) { 8038 found = true; 8039 if (map->verified_stripes >= map->num_stripes) { 8040 btrfs_err(fs_info, 8041 "too many dev extents for chunk %llu found", 8042 em->start); 8043 ret = -EUCLEAN; 8044 goto out; 8045 } 8046 map->verified_stripes++; 8047 break; 8048 } 8049 } 8050 if (!found) { 8051 btrfs_err(fs_info, 8052 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8053 physical_offset, devid); 8054 ret = -EUCLEAN; 8055 } 8056 8057 /* Make sure no dev extent is beyond device boundary */ 8058 dev = btrfs_find_device(fs_info->fs_devices, &args); 8059 if (!dev) { 8060 btrfs_err(fs_info, "failed to find devid %llu", devid); 8061 ret = -EUCLEAN; 8062 goto out; 8063 } 8064 8065 if (physical_offset + physical_len > dev->disk_total_bytes) { 8066 btrfs_err(fs_info, 8067 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8068 devid, physical_offset, physical_len, 8069 dev->disk_total_bytes); 8070 ret = -EUCLEAN; 8071 goto out; 8072 } 8073 8074 if (dev->zone_info) { 8075 u64 zone_size = dev->zone_info->zone_size; 8076 8077 if (!IS_ALIGNED(physical_offset, zone_size) || 8078 !IS_ALIGNED(physical_len, zone_size)) { 8079 btrfs_err(fs_info, 8080 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8081 devid, physical_offset, physical_len); 8082 ret = -EUCLEAN; 8083 goto out; 8084 } 8085 } 8086 8087 out: 8088 free_extent_map(em); 8089 return ret; 8090 } 8091 8092 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8093 { 8094 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 8095 struct extent_map *em; 8096 struct rb_node *node; 8097 int ret = 0; 8098 8099 read_lock(&em_tree->lock); 8100 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 8101 em = rb_entry(node, struct extent_map, rb_node); 8102 if (em->map_lookup->num_stripes != 8103 em->map_lookup->verified_stripes) { 8104 btrfs_err(fs_info, 8105 "chunk %llu has missing dev extent, have %d expect %d", 8106 em->start, em->map_lookup->verified_stripes, 8107 em->map_lookup->num_stripes); 8108 ret = -EUCLEAN; 8109 goto out; 8110 } 8111 } 8112 out: 8113 read_unlock(&em_tree->lock); 8114 return ret; 8115 } 8116 8117 /* 8118 * Ensure that all dev extents are mapped to correct chunk, otherwise 8119 * later chunk allocation/free would cause unexpected behavior. 8120 * 8121 * NOTE: This will iterate through the whole device tree, which should be of 8122 * the same size level as the chunk tree. This slightly increases mount time. 8123 */ 8124 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8125 { 8126 struct btrfs_path *path; 8127 struct btrfs_root *root = fs_info->dev_root; 8128 struct btrfs_key key; 8129 u64 prev_devid = 0; 8130 u64 prev_dev_ext_end = 0; 8131 int ret = 0; 8132 8133 /* 8134 * We don't have a dev_root because we mounted with ignorebadroots and 8135 * failed to load the root, so we want to skip the verification in this 8136 * case for sure. 8137 * 8138 * However if the dev root is fine, but the tree itself is corrupted 8139 * we'd still fail to mount. This verification is only to make sure 8140 * writes can happen safely, so instead just bypass this check 8141 * completely in the case of IGNOREBADROOTS. 8142 */ 8143 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8144 return 0; 8145 8146 key.objectid = 1; 8147 key.type = BTRFS_DEV_EXTENT_KEY; 8148 key.offset = 0; 8149 8150 path = btrfs_alloc_path(); 8151 if (!path) 8152 return -ENOMEM; 8153 8154 path->reada = READA_FORWARD; 8155 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8156 if (ret < 0) 8157 goto out; 8158 8159 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8160 ret = btrfs_next_leaf(root, path); 8161 if (ret < 0) 8162 goto out; 8163 /* No dev extents at all? Not good */ 8164 if (ret > 0) { 8165 ret = -EUCLEAN; 8166 goto out; 8167 } 8168 } 8169 while (1) { 8170 struct extent_buffer *leaf = path->nodes[0]; 8171 struct btrfs_dev_extent *dext; 8172 int slot = path->slots[0]; 8173 u64 chunk_offset; 8174 u64 physical_offset; 8175 u64 physical_len; 8176 u64 devid; 8177 8178 btrfs_item_key_to_cpu(leaf, &key, slot); 8179 if (key.type != BTRFS_DEV_EXTENT_KEY) 8180 break; 8181 devid = key.objectid; 8182 physical_offset = key.offset; 8183 8184 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8185 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8186 physical_len = btrfs_dev_extent_length(leaf, dext); 8187 8188 /* Check if this dev extent overlaps with the previous one */ 8189 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8190 btrfs_err(fs_info, 8191 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8192 devid, physical_offset, prev_dev_ext_end); 8193 ret = -EUCLEAN; 8194 goto out; 8195 } 8196 8197 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8198 physical_offset, physical_len); 8199 if (ret < 0) 8200 goto out; 8201 prev_devid = devid; 8202 prev_dev_ext_end = physical_offset + physical_len; 8203 8204 ret = btrfs_next_item(root, path); 8205 if (ret < 0) 8206 goto out; 8207 if (ret > 0) { 8208 ret = 0; 8209 break; 8210 } 8211 } 8212 8213 /* Ensure all chunks have corresponding dev extents */ 8214 ret = verify_chunk_dev_extent_mapping(fs_info); 8215 out: 8216 btrfs_free_path(path); 8217 return ret; 8218 } 8219 8220 /* 8221 * Check whether the given block group or device is pinned by any inode being 8222 * used as a swapfile. 8223 */ 8224 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8225 { 8226 struct btrfs_swapfile_pin *sp; 8227 struct rb_node *node; 8228 8229 spin_lock(&fs_info->swapfile_pins_lock); 8230 node = fs_info->swapfile_pins.rb_node; 8231 while (node) { 8232 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8233 if (ptr < sp->ptr) 8234 node = node->rb_left; 8235 else if (ptr > sp->ptr) 8236 node = node->rb_right; 8237 else 8238 break; 8239 } 8240 spin_unlock(&fs_info->swapfile_pins_lock); 8241 return node != NULL; 8242 } 8243 8244 static int relocating_repair_kthread(void *data) 8245 { 8246 struct btrfs_block_group *cache = (struct btrfs_block_group *)data; 8247 struct btrfs_fs_info *fs_info = cache->fs_info; 8248 u64 target; 8249 int ret = 0; 8250 8251 target = cache->start; 8252 btrfs_put_block_group(cache); 8253 8254 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8255 btrfs_info(fs_info, 8256 "zoned: skip relocating block group %llu to repair: EBUSY", 8257 target); 8258 return -EBUSY; 8259 } 8260 8261 mutex_lock(&fs_info->reclaim_bgs_lock); 8262 8263 /* Ensure block group still exists */ 8264 cache = btrfs_lookup_block_group(fs_info, target); 8265 if (!cache) 8266 goto out; 8267 8268 if (!cache->relocating_repair) 8269 goto out; 8270 8271 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8272 if (ret < 0) 8273 goto out; 8274 8275 btrfs_info(fs_info, 8276 "zoned: relocating block group %llu to repair IO failure", 8277 target); 8278 ret = btrfs_relocate_chunk(fs_info, target); 8279 8280 out: 8281 if (cache) 8282 btrfs_put_block_group(cache); 8283 mutex_unlock(&fs_info->reclaim_bgs_lock); 8284 btrfs_exclop_finish(fs_info); 8285 8286 return ret; 8287 } 8288 8289 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8290 { 8291 struct btrfs_block_group *cache; 8292 8293 /* Do not attempt to repair in degraded state */ 8294 if (btrfs_test_opt(fs_info, DEGRADED)) 8295 return 0; 8296 8297 cache = btrfs_lookup_block_group(fs_info, logical); 8298 if (!cache) 8299 return 0; 8300 8301 spin_lock(&cache->lock); 8302 if (cache->relocating_repair) { 8303 spin_unlock(&cache->lock); 8304 btrfs_put_block_group(cache); 8305 return 0; 8306 } 8307 cache->relocating_repair = 1; 8308 spin_unlock(&cache->lock); 8309 8310 kthread_run(relocating_repair_kthread, cache, 8311 "btrfs-relocating-repair"); 8312 8313 return 0; 8314 } 8315