1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include "misc.h" 18 #include "ctree.h" 19 #include "extent_map.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "print-tree.h" 23 #include "volumes.h" 24 #include "raid56.h" 25 #include "async-thread.h" 26 #include "check-integrity.h" 27 #include "rcu-string.h" 28 #include "dev-replace.h" 29 #include "sysfs.h" 30 #include "tree-checker.h" 31 #include "space-info.h" 32 #include "block-group.h" 33 #include "discard.h" 34 #include "zoned.h" 35 36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 37 [BTRFS_RAID_RAID10] = { 38 .sub_stripes = 2, 39 .dev_stripes = 1, 40 .devs_max = 0, /* 0 == as many as possible */ 41 .devs_min = 4, 42 .tolerated_failures = 1, 43 .devs_increment = 2, 44 .ncopies = 2, 45 .nparity = 0, 46 .raid_name = "raid10", 47 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 48 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 49 }, 50 [BTRFS_RAID_RAID1] = { 51 .sub_stripes = 1, 52 .dev_stripes = 1, 53 .devs_max = 2, 54 .devs_min = 2, 55 .tolerated_failures = 1, 56 .devs_increment = 2, 57 .ncopies = 2, 58 .nparity = 0, 59 .raid_name = "raid1", 60 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 61 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 62 }, 63 [BTRFS_RAID_RAID1C3] = { 64 .sub_stripes = 1, 65 .dev_stripes = 1, 66 .devs_max = 3, 67 .devs_min = 3, 68 .tolerated_failures = 2, 69 .devs_increment = 3, 70 .ncopies = 3, 71 .nparity = 0, 72 .raid_name = "raid1c3", 73 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 74 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 75 }, 76 [BTRFS_RAID_RAID1C4] = { 77 .sub_stripes = 1, 78 .dev_stripes = 1, 79 .devs_max = 4, 80 .devs_min = 4, 81 .tolerated_failures = 3, 82 .devs_increment = 4, 83 .ncopies = 4, 84 .nparity = 0, 85 .raid_name = "raid1c4", 86 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 87 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 88 }, 89 [BTRFS_RAID_DUP] = { 90 .sub_stripes = 1, 91 .dev_stripes = 2, 92 .devs_max = 1, 93 .devs_min = 1, 94 .tolerated_failures = 0, 95 .devs_increment = 1, 96 .ncopies = 2, 97 .nparity = 0, 98 .raid_name = "dup", 99 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 100 .mindev_error = 0, 101 }, 102 [BTRFS_RAID_RAID0] = { 103 .sub_stripes = 1, 104 .dev_stripes = 1, 105 .devs_max = 0, 106 .devs_min = 2, 107 .tolerated_failures = 0, 108 .devs_increment = 1, 109 .ncopies = 1, 110 .nparity = 0, 111 .raid_name = "raid0", 112 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 113 .mindev_error = 0, 114 }, 115 [BTRFS_RAID_SINGLE] = { 116 .sub_stripes = 1, 117 .dev_stripes = 1, 118 .devs_max = 1, 119 .devs_min = 1, 120 .tolerated_failures = 0, 121 .devs_increment = 1, 122 .ncopies = 1, 123 .nparity = 0, 124 .raid_name = "single", 125 .bg_flag = 0, 126 .mindev_error = 0, 127 }, 128 [BTRFS_RAID_RAID5] = { 129 .sub_stripes = 1, 130 .dev_stripes = 1, 131 .devs_max = 0, 132 .devs_min = 2, 133 .tolerated_failures = 1, 134 .devs_increment = 1, 135 .ncopies = 1, 136 .nparity = 1, 137 .raid_name = "raid5", 138 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 139 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 140 }, 141 [BTRFS_RAID_RAID6] = { 142 .sub_stripes = 1, 143 .dev_stripes = 1, 144 .devs_max = 0, 145 .devs_min = 3, 146 .tolerated_failures = 2, 147 .devs_increment = 1, 148 .ncopies = 1, 149 .nparity = 2, 150 .raid_name = "raid6", 151 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 152 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 153 }, 154 }; 155 156 const char *btrfs_bg_type_to_raid_name(u64 flags) 157 { 158 const int index = btrfs_bg_flags_to_raid_index(flags); 159 160 if (index >= BTRFS_NR_RAID_TYPES) 161 return NULL; 162 163 return btrfs_raid_array[index].raid_name; 164 } 165 166 /* 167 * Fill @buf with textual description of @bg_flags, no more than @size_buf 168 * bytes including terminating null byte. 169 */ 170 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 171 { 172 int i; 173 int ret; 174 char *bp = buf; 175 u64 flags = bg_flags; 176 u32 size_bp = size_buf; 177 178 if (!flags) { 179 strcpy(bp, "NONE"); 180 return; 181 } 182 183 #define DESCRIBE_FLAG(flag, desc) \ 184 do { \ 185 if (flags & (flag)) { \ 186 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 187 if (ret < 0 || ret >= size_bp) \ 188 goto out_overflow; \ 189 size_bp -= ret; \ 190 bp += ret; \ 191 flags &= ~(flag); \ 192 } \ 193 } while (0) 194 195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 197 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 198 199 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 200 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 201 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 202 btrfs_raid_array[i].raid_name); 203 #undef DESCRIBE_FLAG 204 205 if (flags) { 206 ret = snprintf(bp, size_bp, "0x%llx|", flags); 207 size_bp -= ret; 208 } 209 210 if (size_bp < size_buf) 211 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 212 213 /* 214 * The text is trimmed, it's up to the caller to provide sufficiently 215 * large buffer 216 */ 217 out_overflow:; 218 } 219 220 static int init_first_rw_device(struct btrfs_trans_handle *trans); 221 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 222 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 223 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 224 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 225 enum btrfs_map_op op, 226 u64 logical, u64 *length, 227 struct btrfs_bio **bbio_ret, 228 int mirror_num, int need_raid_map); 229 230 /* 231 * Device locking 232 * ============== 233 * 234 * There are several mutexes that protect manipulation of devices and low-level 235 * structures like chunks but not block groups, extents or files 236 * 237 * uuid_mutex (global lock) 238 * ------------------------ 239 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 240 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 241 * device) or requested by the device= mount option 242 * 243 * the mutex can be very coarse and can cover long-running operations 244 * 245 * protects: updates to fs_devices counters like missing devices, rw devices, 246 * seeding, structure cloning, opening/closing devices at mount/umount time 247 * 248 * global::fs_devs - add, remove, updates to the global list 249 * 250 * does not protect: manipulation of the fs_devices::devices list in general 251 * but in mount context it could be used to exclude list modifications by eg. 252 * scan ioctl 253 * 254 * btrfs_device::name - renames (write side), read is RCU 255 * 256 * fs_devices::device_list_mutex (per-fs, with RCU) 257 * ------------------------------------------------ 258 * protects updates to fs_devices::devices, ie. adding and deleting 259 * 260 * simple list traversal with read-only actions can be done with RCU protection 261 * 262 * may be used to exclude some operations from running concurrently without any 263 * modifications to the list (see write_all_supers) 264 * 265 * Is not required at mount and close times, because our device list is 266 * protected by the uuid_mutex at that point. 267 * 268 * balance_mutex 269 * ------------- 270 * protects balance structures (status, state) and context accessed from 271 * several places (internally, ioctl) 272 * 273 * chunk_mutex 274 * ----------- 275 * protects chunks, adding or removing during allocation, trim or when a new 276 * device is added/removed. Additionally it also protects post_commit_list of 277 * individual devices, since they can be added to the transaction's 278 * post_commit_list only with chunk_mutex held. 279 * 280 * cleaner_mutex 281 * ------------- 282 * a big lock that is held by the cleaner thread and prevents running subvolume 283 * cleaning together with relocation or delayed iputs 284 * 285 * 286 * Lock nesting 287 * ============ 288 * 289 * uuid_mutex 290 * device_list_mutex 291 * chunk_mutex 292 * balance_mutex 293 * 294 * 295 * Exclusive operations 296 * ==================== 297 * 298 * Maintains the exclusivity of the following operations that apply to the 299 * whole filesystem and cannot run in parallel. 300 * 301 * - Balance (*) 302 * - Device add 303 * - Device remove 304 * - Device replace (*) 305 * - Resize 306 * 307 * The device operations (as above) can be in one of the following states: 308 * 309 * - Running state 310 * - Paused state 311 * - Completed state 312 * 313 * Only device operations marked with (*) can go into the Paused state for the 314 * following reasons: 315 * 316 * - ioctl (only Balance can be Paused through ioctl) 317 * - filesystem remounted as read-only 318 * - filesystem unmounted and mounted as read-only 319 * - system power-cycle and filesystem mounted as read-only 320 * - filesystem or device errors leading to forced read-only 321 * 322 * The status of exclusive operation is set and cleared atomically. 323 * During the course of Paused state, fs_info::exclusive_operation remains set. 324 * A device operation in Paused or Running state can be canceled or resumed 325 * either by ioctl (Balance only) or when remounted as read-write. 326 * The exclusive status is cleared when the device operation is canceled or 327 * completed. 328 */ 329 330 DEFINE_MUTEX(uuid_mutex); 331 static LIST_HEAD(fs_uuids); 332 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 333 { 334 return &fs_uuids; 335 } 336 337 /* 338 * alloc_fs_devices - allocate struct btrfs_fs_devices 339 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 340 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 341 * 342 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 343 * The returned struct is not linked onto any lists and can be destroyed with 344 * kfree() right away. 345 */ 346 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 347 const u8 *metadata_fsid) 348 { 349 struct btrfs_fs_devices *fs_devs; 350 351 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 352 if (!fs_devs) 353 return ERR_PTR(-ENOMEM); 354 355 mutex_init(&fs_devs->device_list_mutex); 356 357 INIT_LIST_HEAD(&fs_devs->devices); 358 INIT_LIST_HEAD(&fs_devs->alloc_list); 359 INIT_LIST_HEAD(&fs_devs->fs_list); 360 INIT_LIST_HEAD(&fs_devs->seed_list); 361 if (fsid) 362 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 363 364 if (metadata_fsid) 365 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 366 else if (fsid) 367 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 368 369 return fs_devs; 370 } 371 372 void btrfs_free_device(struct btrfs_device *device) 373 { 374 WARN_ON(!list_empty(&device->post_commit_list)); 375 rcu_string_free(device->name); 376 extent_io_tree_release(&device->alloc_state); 377 bio_put(device->flush_bio); 378 btrfs_destroy_dev_zone_info(device); 379 kfree(device); 380 } 381 382 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 383 { 384 struct btrfs_device *device; 385 WARN_ON(fs_devices->opened); 386 while (!list_empty(&fs_devices->devices)) { 387 device = list_entry(fs_devices->devices.next, 388 struct btrfs_device, dev_list); 389 list_del(&device->dev_list); 390 btrfs_free_device(device); 391 } 392 kfree(fs_devices); 393 } 394 395 void __exit btrfs_cleanup_fs_uuids(void) 396 { 397 struct btrfs_fs_devices *fs_devices; 398 399 while (!list_empty(&fs_uuids)) { 400 fs_devices = list_entry(fs_uuids.next, 401 struct btrfs_fs_devices, fs_list); 402 list_del(&fs_devices->fs_list); 403 free_fs_devices(fs_devices); 404 } 405 } 406 407 /* 408 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. 409 * Returned struct is not linked onto any lists and must be destroyed using 410 * btrfs_free_device. 411 */ 412 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info) 413 { 414 struct btrfs_device *dev; 415 416 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 417 if (!dev) 418 return ERR_PTR(-ENOMEM); 419 420 /* 421 * Preallocate a bio that's always going to be used for flushing device 422 * barriers and matches the device lifespan 423 */ 424 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); 425 if (!dev->flush_bio) { 426 kfree(dev); 427 return ERR_PTR(-ENOMEM); 428 } 429 430 INIT_LIST_HEAD(&dev->dev_list); 431 INIT_LIST_HEAD(&dev->dev_alloc_list); 432 INIT_LIST_HEAD(&dev->post_commit_list); 433 434 atomic_set(&dev->reada_in_flight, 0); 435 atomic_set(&dev->dev_stats_ccnt, 0); 436 btrfs_device_data_ordered_init(dev, fs_info); 437 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 438 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 439 extent_io_tree_init(fs_info, &dev->alloc_state, 440 IO_TREE_DEVICE_ALLOC_STATE, NULL); 441 442 return dev; 443 } 444 445 static noinline struct btrfs_fs_devices *find_fsid( 446 const u8 *fsid, const u8 *metadata_fsid) 447 { 448 struct btrfs_fs_devices *fs_devices; 449 450 ASSERT(fsid); 451 452 /* Handle non-split brain cases */ 453 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 454 if (metadata_fsid) { 455 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 456 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 457 BTRFS_FSID_SIZE) == 0) 458 return fs_devices; 459 } else { 460 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 461 return fs_devices; 462 } 463 } 464 return NULL; 465 } 466 467 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 468 struct btrfs_super_block *disk_super) 469 { 470 471 struct btrfs_fs_devices *fs_devices; 472 473 /* 474 * Handle scanned device having completed its fsid change but 475 * belonging to a fs_devices that was created by first scanning 476 * a device which didn't have its fsid/metadata_uuid changed 477 * at all and the CHANGING_FSID_V2 flag set. 478 */ 479 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 480 if (fs_devices->fsid_change && 481 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 482 BTRFS_FSID_SIZE) == 0 && 483 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 484 BTRFS_FSID_SIZE) == 0) { 485 return fs_devices; 486 } 487 } 488 /* 489 * Handle scanned device having completed its fsid change but 490 * belonging to a fs_devices that was created by a device that 491 * has an outdated pair of fsid/metadata_uuid and 492 * CHANGING_FSID_V2 flag set. 493 */ 494 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 495 if (fs_devices->fsid_change && 496 memcmp(fs_devices->metadata_uuid, 497 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 498 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 499 BTRFS_FSID_SIZE) == 0) { 500 return fs_devices; 501 } 502 } 503 504 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 505 } 506 507 508 static int 509 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 510 int flush, struct block_device **bdev, 511 struct btrfs_super_block **disk_super) 512 { 513 int ret; 514 515 *bdev = blkdev_get_by_path(device_path, flags, holder); 516 517 if (IS_ERR(*bdev)) { 518 ret = PTR_ERR(*bdev); 519 goto error; 520 } 521 522 if (flush) 523 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 524 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 525 if (ret) { 526 blkdev_put(*bdev, flags); 527 goto error; 528 } 529 invalidate_bdev(*bdev); 530 *disk_super = btrfs_read_dev_super(*bdev); 531 if (IS_ERR(*disk_super)) { 532 ret = PTR_ERR(*disk_super); 533 blkdev_put(*bdev, flags); 534 goto error; 535 } 536 537 return 0; 538 539 error: 540 *bdev = NULL; 541 return ret; 542 } 543 544 static bool device_path_matched(const char *path, struct btrfs_device *device) 545 { 546 int found; 547 548 rcu_read_lock(); 549 found = strcmp(rcu_str_deref(device->name), path); 550 rcu_read_unlock(); 551 552 return found == 0; 553 } 554 555 /* 556 * Search and remove all stale (devices which are not mounted) devices. 557 * When both inputs are NULL, it will search and release all stale devices. 558 * path: Optional. When provided will it release all unmounted devices 559 * matching this path only. 560 * skip_dev: Optional. Will skip this device when searching for the stale 561 * devices. 562 * Return: 0 for success or if @path is NULL. 563 * -EBUSY if @path is a mounted device. 564 * -ENOENT if @path does not match any device in the list. 565 */ 566 static int btrfs_free_stale_devices(const char *path, 567 struct btrfs_device *skip_device) 568 { 569 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 570 struct btrfs_device *device, *tmp_device; 571 int ret = 0; 572 573 if (path) 574 ret = -ENOENT; 575 576 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 577 578 mutex_lock(&fs_devices->device_list_mutex); 579 list_for_each_entry_safe(device, tmp_device, 580 &fs_devices->devices, dev_list) { 581 if (skip_device && skip_device == device) 582 continue; 583 if (path && !device->name) 584 continue; 585 if (path && !device_path_matched(path, device)) 586 continue; 587 if (fs_devices->opened) { 588 /* for an already deleted device return 0 */ 589 if (path && ret != 0) 590 ret = -EBUSY; 591 break; 592 } 593 594 /* delete the stale device */ 595 fs_devices->num_devices--; 596 list_del(&device->dev_list); 597 btrfs_free_device(device); 598 599 ret = 0; 600 } 601 mutex_unlock(&fs_devices->device_list_mutex); 602 603 if (fs_devices->num_devices == 0) { 604 btrfs_sysfs_remove_fsid(fs_devices); 605 list_del(&fs_devices->fs_list); 606 free_fs_devices(fs_devices); 607 } 608 } 609 610 return ret; 611 } 612 613 /* 614 * This is only used on mount, and we are protected from competing things 615 * messing with our fs_devices by the uuid_mutex, thus we do not need the 616 * fs_devices->device_list_mutex here. 617 */ 618 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 619 struct btrfs_device *device, fmode_t flags, 620 void *holder) 621 { 622 struct request_queue *q; 623 struct block_device *bdev; 624 struct btrfs_super_block *disk_super; 625 u64 devid; 626 int ret; 627 628 if (device->bdev) 629 return -EINVAL; 630 if (!device->name) 631 return -EINVAL; 632 633 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 634 &bdev, &disk_super); 635 if (ret) 636 return ret; 637 638 devid = btrfs_stack_device_id(&disk_super->dev_item); 639 if (devid != device->devid) 640 goto error_free_page; 641 642 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 643 goto error_free_page; 644 645 device->generation = btrfs_super_generation(disk_super); 646 647 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 648 if (btrfs_super_incompat_flags(disk_super) & 649 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 650 pr_err( 651 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 652 goto error_free_page; 653 } 654 655 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 656 fs_devices->seeding = true; 657 } else { 658 if (bdev_read_only(bdev)) 659 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 660 else 661 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 662 } 663 664 q = bdev_get_queue(bdev); 665 if (!blk_queue_nonrot(q)) 666 fs_devices->rotating = true; 667 668 device->bdev = bdev; 669 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 670 device->mode = flags; 671 672 ret = btrfs_get_dev_zone_info(device); 673 if (ret != 0) 674 goto error_free_page; 675 676 fs_devices->open_devices++; 677 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 678 device->devid != BTRFS_DEV_REPLACE_DEVID) { 679 fs_devices->rw_devices++; 680 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 681 } 682 btrfs_release_disk_super(disk_super); 683 684 return 0; 685 686 error_free_page: 687 btrfs_release_disk_super(disk_super); 688 blkdev_put(bdev, flags); 689 690 return -EINVAL; 691 } 692 693 /* 694 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 695 * being created with a disk that has already completed its fsid change. Such 696 * disk can belong to an fs which has its FSID changed or to one which doesn't. 697 * Handle both cases here. 698 */ 699 static struct btrfs_fs_devices *find_fsid_inprogress( 700 struct btrfs_super_block *disk_super) 701 { 702 struct btrfs_fs_devices *fs_devices; 703 704 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 705 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 706 BTRFS_FSID_SIZE) != 0 && 707 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 708 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 709 return fs_devices; 710 } 711 } 712 713 return find_fsid(disk_super->fsid, NULL); 714 } 715 716 717 static struct btrfs_fs_devices *find_fsid_changed( 718 struct btrfs_super_block *disk_super) 719 { 720 struct btrfs_fs_devices *fs_devices; 721 722 /* 723 * Handles the case where scanned device is part of an fs that had 724 * multiple successful changes of FSID but curently device didn't 725 * observe it. Meaning our fsid will be different than theirs. We need 726 * to handle two subcases : 727 * 1 - The fs still continues to have different METADATA/FSID uuids. 728 * 2 - The fs is switched back to its original FSID (METADATA/FSID 729 * are equal). 730 */ 731 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 732 /* Changed UUIDs */ 733 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 734 BTRFS_FSID_SIZE) != 0 && 735 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 736 BTRFS_FSID_SIZE) == 0 && 737 memcmp(fs_devices->fsid, disk_super->fsid, 738 BTRFS_FSID_SIZE) != 0) 739 return fs_devices; 740 741 /* Unchanged UUIDs */ 742 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 743 BTRFS_FSID_SIZE) == 0 && 744 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 745 BTRFS_FSID_SIZE) == 0) 746 return fs_devices; 747 } 748 749 return NULL; 750 } 751 752 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 753 struct btrfs_super_block *disk_super) 754 { 755 struct btrfs_fs_devices *fs_devices; 756 757 /* 758 * Handle the case where the scanned device is part of an fs whose last 759 * metadata UUID change reverted it to the original FSID. At the same 760 * time * fs_devices was first created by another constitutent device 761 * which didn't fully observe the operation. This results in an 762 * btrfs_fs_devices created with metadata/fsid different AND 763 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 764 * fs_devices equal to the FSID of the disk. 765 */ 766 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 767 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 768 BTRFS_FSID_SIZE) != 0 && 769 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 770 BTRFS_FSID_SIZE) == 0 && 771 fs_devices->fsid_change) 772 return fs_devices; 773 } 774 775 return NULL; 776 } 777 /* 778 * Add new device to list of registered devices 779 * 780 * Returns: 781 * device pointer which was just added or updated when successful 782 * error pointer when failed 783 */ 784 static noinline struct btrfs_device *device_list_add(const char *path, 785 struct btrfs_super_block *disk_super, 786 bool *new_device_added) 787 { 788 struct btrfs_device *device; 789 struct btrfs_fs_devices *fs_devices = NULL; 790 struct rcu_string *name; 791 u64 found_transid = btrfs_super_generation(disk_super); 792 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 793 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 794 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 795 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 796 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 797 798 if (fsid_change_in_progress) { 799 if (!has_metadata_uuid) 800 fs_devices = find_fsid_inprogress(disk_super); 801 else 802 fs_devices = find_fsid_changed(disk_super); 803 } else if (has_metadata_uuid) { 804 fs_devices = find_fsid_with_metadata_uuid(disk_super); 805 } else { 806 fs_devices = find_fsid_reverted_metadata(disk_super); 807 if (!fs_devices) 808 fs_devices = find_fsid(disk_super->fsid, NULL); 809 } 810 811 812 if (!fs_devices) { 813 if (has_metadata_uuid) 814 fs_devices = alloc_fs_devices(disk_super->fsid, 815 disk_super->metadata_uuid); 816 else 817 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 818 819 if (IS_ERR(fs_devices)) 820 return ERR_CAST(fs_devices); 821 822 fs_devices->fsid_change = fsid_change_in_progress; 823 824 mutex_lock(&fs_devices->device_list_mutex); 825 list_add(&fs_devices->fs_list, &fs_uuids); 826 827 device = NULL; 828 } else { 829 mutex_lock(&fs_devices->device_list_mutex); 830 device = btrfs_find_device(fs_devices, devid, 831 disk_super->dev_item.uuid, NULL); 832 833 /* 834 * If this disk has been pulled into an fs devices created by 835 * a device which had the CHANGING_FSID_V2 flag then replace the 836 * metadata_uuid/fsid values of the fs_devices. 837 */ 838 if (fs_devices->fsid_change && 839 found_transid > fs_devices->latest_generation) { 840 memcpy(fs_devices->fsid, disk_super->fsid, 841 BTRFS_FSID_SIZE); 842 843 if (has_metadata_uuid) 844 memcpy(fs_devices->metadata_uuid, 845 disk_super->metadata_uuid, 846 BTRFS_FSID_SIZE); 847 else 848 memcpy(fs_devices->metadata_uuid, 849 disk_super->fsid, BTRFS_FSID_SIZE); 850 851 fs_devices->fsid_change = false; 852 } 853 } 854 855 if (!device) { 856 if (fs_devices->opened) { 857 mutex_unlock(&fs_devices->device_list_mutex); 858 return ERR_PTR(-EBUSY); 859 } 860 861 device = btrfs_alloc_device(NULL, &devid, 862 disk_super->dev_item.uuid); 863 if (IS_ERR(device)) { 864 mutex_unlock(&fs_devices->device_list_mutex); 865 /* we can safely leave the fs_devices entry around */ 866 return device; 867 } 868 869 name = rcu_string_strdup(path, GFP_NOFS); 870 if (!name) { 871 btrfs_free_device(device); 872 mutex_unlock(&fs_devices->device_list_mutex); 873 return ERR_PTR(-ENOMEM); 874 } 875 rcu_assign_pointer(device->name, name); 876 877 list_add_rcu(&device->dev_list, &fs_devices->devices); 878 fs_devices->num_devices++; 879 880 device->fs_devices = fs_devices; 881 *new_device_added = true; 882 883 if (disk_super->label[0]) 884 pr_info( 885 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 886 disk_super->label, devid, found_transid, path, 887 current->comm, task_pid_nr(current)); 888 else 889 pr_info( 890 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 891 disk_super->fsid, devid, found_transid, path, 892 current->comm, task_pid_nr(current)); 893 894 } else if (!device->name || strcmp(device->name->str, path)) { 895 /* 896 * When FS is already mounted. 897 * 1. If you are here and if the device->name is NULL that 898 * means this device was missing at time of FS mount. 899 * 2. If you are here and if the device->name is different 900 * from 'path' that means either 901 * a. The same device disappeared and reappeared with 902 * different name. or 903 * b. The missing-disk-which-was-replaced, has 904 * reappeared now. 905 * 906 * We must allow 1 and 2a above. But 2b would be a spurious 907 * and unintentional. 908 * 909 * Further in case of 1 and 2a above, the disk at 'path' 910 * would have missed some transaction when it was away and 911 * in case of 2a the stale bdev has to be updated as well. 912 * 2b must not be allowed at all time. 913 */ 914 915 /* 916 * For now, we do allow update to btrfs_fs_device through the 917 * btrfs dev scan cli after FS has been mounted. We're still 918 * tracking a problem where systems fail mount by subvolume id 919 * when we reject replacement on a mounted FS. 920 */ 921 if (!fs_devices->opened && found_transid < device->generation) { 922 /* 923 * That is if the FS is _not_ mounted and if you 924 * are here, that means there is more than one 925 * disk with same uuid and devid.We keep the one 926 * with larger generation number or the last-in if 927 * generation are equal. 928 */ 929 mutex_unlock(&fs_devices->device_list_mutex); 930 return ERR_PTR(-EEXIST); 931 } 932 933 /* 934 * We are going to replace the device path for a given devid, 935 * make sure it's the same device if the device is mounted 936 */ 937 if (device->bdev) { 938 int error; 939 dev_t path_dev; 940 941 error = lookup_bdev(path, &path_dev); 942 if (error) { 943 mutex_unlock(&fs_devices->device_list_mutex); 944 return ERR_PTR(error); 945 } 946 947 if (device->bdev->bd_dev != path_dev) { 948 mutex_unlock(&fs_devices->device_list_mutex); 949 /* 950 * device->fs_info may not be reliable here, so 951 * pass in a NULL instead. This avoids a 952 * possible use-after-free when the fs_info and 953 * fs_info->sb are already torn down. 954 */ 955 btrfs_warn_in_rcu(NULL, 956 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 957 path, devid, found_transid, 958 current->comm, 959 task_pid_nr(current)); 960 return ERR_PTR(-EEXIST); 961 } 962 btrfs_info_in_rcu(device->fs_info, 963 "devid %llu device path %s changed to %s scanned by %s (%d)", 964 devid, rcu_str_deref(device->name), 965 path, current->comm, 966 task_pid_nr(current)); 967 } 968 969 name = rcu_string_strdup(path, GFP_NOFS); 970 if (!name) { 971 mutex_unlock(&fs_devices->device_list_mutex); 972 return ERR_PTR(-ENOMEM); 973 } 974 rcu_string_free(device->name); 975 rcu_assign_pointer(device->name, name); 976 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 977 fs_devices->missing_devices--; 978 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 979 } 980 } 981 982 /* 983 * Unmount does not free the btrfs_device struct but would zero 984 * generation along with most of the other members. So just update 985 * it back. We need it to pick the disk with largest generation 986 * (as above). 987 */ 988 if (!fs_devices->opened) { 989 device->generation = found_transid; 990 fs_devices->latest_generation = max_t(u64, found_transid, 991 fs_devices->latest_generation); 992 } 993 994 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 995 996 mutex_unlock(&fs_devices->device_list_mutex); 997 return device; 998 } 999 1000 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 1001 { 1002 struct btrfs_fs_devices *fs_devices; 1003 struct btrfs_device *device; 1004 struct btrfs_device *orig_dev; 1005 int ret = 0; 1006 1007 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1008 if (IS_ERR(fs_devices)) 1009 return fs_devices; 1010 1011 mutex_lock(&orig->device_list_mutex); 1012 fs_devices->total_devices = orig->total_devices; 1013 1014 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1015 struct rcu_string *name; 1016 1017 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1018 orig_dev->uuid); 1019 if (IS_ERR(device)) { 1020 ret = PTR_ERR(device); 1021 goto error; 1022 } 1023 1024 /* 1025 * This is ok to do without rcu read locked because we hold the 1026 * uuid mutex so nothing we touch in here is going to disappear. 1027 */ 1028 if (orig_dev->name) { 1029 name = rcu_string_strdup(orig_dev->name->str, 1030 GFP_KERNEL); 1031 if (!name) { 1032 btrfs_free_device(device); 1033 ret = -ENOMEM; 1034 goto error; 1035 } 1036 rcu_assign_pointer(device->name, name); 1037 } 1038 1039 list_add(&device->dev_list, &fs_devices->devices); 1040 device->fs_devices = fs_devices; 1041 fs_devices->num_devices++; 1042 } 1043 mutex_unlock(&orig->device_list_mutex); 1044 return fs_devices; 1045 error: 1046 mutex_unlock(&orig->device_list_mutex); 1047 free_fs_devices(fs_devices); 1048 return ERR_PTR(ret); 1049 } 1050 1051 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1052 struct btrfs_device **latest_dev) 1053 { 1054 struct btrfs_device *device, *next; 1055 1056 /* This is the initialized path, it is safe to release the devices. */ 1057 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1058 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1059 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1060 &device->dev_state) && 1061 !test_bit(BTRFS_DEV_STATE_MISSING, 1062 &device->dev_state) && 1063 (!*latest_dev || 1064 device->generation > (*latest_dev)->generation)) { 1065 *latest_dev = device; 1066 } 1067 continue; 1068 } 1069 1070 /* 1071 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1072 * in btrfs_init_dev_replace() so just continue. 1073 */ 1074 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1075 continue; 1076 1077 if (device->bdev) { 1078 blkdev_put(device->bdev, device->mode); 1079 device->bdev = NULL; 1080 fs_devices->open_devices--; 1081 } 1082 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1083 list_del_init(&device->dev_alloc_list); 1084 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1085 } 1086 list_del_init(&device->dev_list); 1087 fs_devices->num_devices--; 1088 btrfs_free_device(device); 1089 } 1090 1091 } 1092 1093 /* 1094 * After we have read the system tree and know devids belonging to this 1095 * filesystem, remove the device which does not belong there. 1096 */ 1097 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1098 { 1099 struct btrfs_device *latest_dev = NULL; 1100 struct btrfs_fs_devices *seed_dev; 1101 1102 mutex_lock(&uuid_mutex); 1103 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1104 1105 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1106 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1107 1108 fs_devices->latest_bdev = latest_dev->bdev; 1109 1110 mutex_unlock(&uuid_mutex); 1111 } 1112 1113 static void btrfs_close_bdev(struct btrfs_device *device) 1114 { 1115 if (!device->bdev) 1116 return; 1117 1118 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1119 sync_blockdev(device->bdev); 1120 invalidate_bdev(device->bdev); 1121 } 1122 1123 blkdev_put(device->bdev, device->mode); 1124 } 1125 1126 static void btrfs_close_one_device(struct btrfs_device *device) 1127 { 1128 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1129 1130 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1131 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1132 list_del_init(&device->dev_alloc_list); 1133 fs_devices->rw_devices--; 1134 } 1135 1136 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1137 fs_devices->missing_devices--; 1138 1139 btrfs_close_bdev(device); 1140 if (device->bdev) { 1141 fs_devices->open_devices--; 1142 device->bdev = NULL; 1143 } 1144 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1145 btrfs_destroy_dev_zone_info(device); 1146 1147 device->fs_info = NULL; 1148 atomic_set(&device->dev_stats_ccnt, 0); 1149 extent_io_tree_release(&device->alloc_state); 1150 1151 /* Verify the device is back in a pristine state */ 1152 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1153 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1154 ASSERT(list_empty(&device->dev_alloc_list)); 1155 ASSERT(list_empty(&device->post_commit_list)); 1156 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1157 } 1158 1159 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1160 { 1161 struct btrfs_device *device, *tmp; 1162 1163 lockdep_assert_held(&uuid_mutex); 1164 1165 if (--fs_devices->opened > 0) 1166 return; 1167 1168 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1169 btrfs_close_one_device(device); 1170 1171 WARN_ON(fs_devices->open_devices); 1172 WARN_ON(fs_devices->rw_devices); 1173 fs_devices->opened = 0; 1174 fs_devices->seeding = false; 1175 fs_devices->fs_info = NULL; 1176 } 1177 1178 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1179 { 1180 LIST_HEAD(list); 1181 struct btrfs_fs_devices *tmp; 1182 1183 mutex_lock(&uuid_mutex); 1184 close_fs_devices(fs_devices); 1185 if (!fs_devices->opened) 1186 list_splice_init(&fs_devices->seed_list, &list); 1187 1188 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1189 close_fs_devices(fs_devices); 1190 list_del(&fs_devices->seed_list); 1191 free_fs_devices(fs_devices); 1192 } 1193 mutex_unlock(&uuid_mutex); 1194 } 1195 1196 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1197 fmode_t flags, void *holder) 1198 { 1199 struct btrfs_device *device; 1200 struct btrfs_device *latest_dev = NULL; 1201 struct btrfs_device *tmp_device; 1202 1203 flags |= FMODE_EXCL; 1204 1205 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1206 dev_list) { 1207 int ret; 1208 1209 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1210 if (ret == 0 && 1211 (!latest_dev || device->generation > latest_dev->generation)) { 1212 latest_dev = device; 1213 } else if (ret == -ENODATA) { 1214 fs_devices->num_devices--; 1215 list_del(&device->dev_list); 1216 btrfs_free_device(device); 1217 } 1218 } 1219 if (fs_devices->open_devices == 0) 1220 return -EINVAL; 1221 1222 fs_devices->opened = 1; 1223 fs_devices->latest_bdev = latest_dev->bdev; 1224 fs_devices->total_rw_bytes = 0; 1225 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1226 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1227 1228 return 0; 1229 } 1230 1231 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) 1232 { 1233 struct btrfs_device *dev1, *dev2; 1234 1235 dev1 = list_entry(a, struct btrfs_device, dev_list); 1236 dev2 = list_entry(b, struct btrfs_device, dev_list); 1237 1238 if (dev1->devid < dev2->devid) 1239 return -1; 1240 else if (dev1->devid > dev2->devid) 1241 return 1; 1242 return 0; 1243 } 1244 1245 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1246 fmode_t flags, void *holder) 1247 { 1248 int ret; 1249 1250 lockdep_assert_held(&uuid_mutex); 1251 /* 1252 * The device_list_mutex cannot be taken here in case opening the 1253 * underlying device takes further locks like bd_mutex. 1254 * 1255 * We also don't need the lock here as this is called during mount and 1256 * exclusion is provided by uuid_mutex 1257 */ 1258 1259 if (fs_devices->opened) { 1260 fs_devices->opened++; 1261 ret = 0; 1262 } else { 1263 list_sort(NULL, &fs_devices->devices, devid_cmp); 1264 ret = open_fs_devices(fs_devices, flags, holder); 1265 } 1266 1267 return ret; 1268 } 1269 1270 void btrfs_release_disk_super(struct btrfs_super_block *super) 1271 { 1272 struct page *page = virt_to_page(super); 1273 1274 put_page(page); 1275 } 1276 1277 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1278 u64 bytenr, u64 bytenr_orig) 1279 { 1280 struct btrfs_super_block *disk_super; 1281 struct page *page; 1282 void *p; 1283 pgoff_t index; 1284 1285 /* make sure our super fits in the device */ 1286 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1287 return ERR_PTR(-EINVAL); 1288 1289 /* make sure our super fits in the page */ 1290 if (sizeof(*disk_super) > PAGE_SIZE) 1291 return ERR_PTR(-EINVAL); 1292 1293 /* make sure our super doesn't straddle pages on disk */ 1294 index = bytenr >> PAGE_SHIFT; 1295 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1296 return ERR_PTR(-EINVAL); 1297 1298 /* pull in the page with our super */ 1299 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1300 1301 if (IS_ERR(page)) 1302 return ERR_CAST(page); 1303 1304 p = page_address(page); 1305 1306 /* align our pointer to the offset of the super block */ 1307 disk_super = p + offset_in_page(bytenr); 1308 1309 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1310 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1311 btrfs_release_disk_super(p); 1312 return ERR_PTR(-EINVAL); 1313 } 1314 1315 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1316 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1317 1318 return disk_super; 1319 } 1320 1321 int btrfs_forget_devices(const char *path) 1322 { 1323 int ret; 1324 1325 mutex_lock(&uuid_mutex); 1326 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1327 mutex_unlock(&uuid_mutex); 1328 1329 return ret; 1330 } 1331 1332 /* 1333 * Look for a btrfs signature on a device. This may be called out of the mount path 1334 * and we are not allowed to call set_blocksize during the scan. The superblock 1335 * is read via pagecache 1336 */ 1337 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1338 void *holder) 1339 { 1340 struct btrfs_super_block *disk_super; 1341 bool new_device_added = false; 1342 struct btrfs_device *device = NULL; 1343 struct block_device *bdev; 1344 u64 bytenr, bytenr_orig; 1345 int ret; 1346 1347 lockdep_assert_held(&uuid_mutex); 1348 1349 /* 1350 * we would like to check all the supers, but that would make 1351 * a btrfs mount succeed after a mkfs from a different FS. 1352 * So, we need to add a special mount option to scan for 1353 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1354 */ 1355 flags |= FMODE_EXCL; 1356 1357 bdev = blkdev_get_by_path(path, flags, holder); 1358 if (IS_ERR(bdev)) 1359 return ERR_CAST(bdev); 1360 1361 bytenr_orig = btrfs_sb_offset(0); 1362 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1363 if (ret) 1364 return ERR_PTR(ret); 1365 1366 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1367 if (IS_ERR(disk_super)) { 1368 device = ERR_CAST(disk_super); 1369 goto error_bdev_put; 1370 } 1371 1372 device = device_list_add(path, disk_super, &new_device_added); 1373 if (!IS_ERR(device)) { 1374 if (new_device_added) 1375 btrfs_free_stale_devices(path, device); 1376 } 1377 1378 btrfs_release_disk_super(disk_super); 1379 1380 error_bdev_put: 1381 blkdev_put(bdev, flags); 1382 1383 return device; 1384 } 1385 1386 /* 1387 * Try to find a chunk that intersects [start, start + len] range and when one 1388 * such is found, record the end of it in *start 1389 */ 1390 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1391 u64 len) 1392 { 1393 u64 physical_start, physical_end; 1394 1395 lockdep_assert_held(&device->fs_info->chunk_mutex); 1396 1397 if (!find_first_extent_bit(&device->alloc_state, *start, 1398 &physical_start, &physical_end, 1399 CHUNK_ALLOCATED, NULL)) { 1400 1401 if (in_range(physical_start, *start, len) || 1402 in_range(*start, physical_start, 1403 physical_end - physical_start)) { 1404 *start = physical_end + 1; 1405 return true; 1406 } 1407 } 1408 return false; 1409 } 1410 1411 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1412 { 1413 switch (device->fs_devices->chunk_alloc_policy) { 1414 case BTRFS_CHUNK_ALLOC_REGULAR: 1415 /* 1416 * We don't want to overwrite the superblock on the drive nor 1417 * any area used by the boot loader (grub for example), so we 1418 * make sure to start at an offset of at least 1MB. 1419 */ 1420 return max_t(u64, start, SZ_1M); 1421 default: 1422 BUG(); 1423 } 1424 } 1425 1426 /** 1427 * dev_extent_hole_check - check if specified hole is suitable for allocation 1428 * @device: the device which we have the hole 1429 * @hole_start: starting position of the hole 1430 * @hole_size: the size of the hole 1431 * @num_bytes: the size of the free space that we need 1432 * 1433 * This function may modify @hole_start and @hole_end to reflect the suitable 1434 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1435 */ 1436 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1437 u64 *hole_size, u64 num_bytes) 1438 { 1439 bool changed = false; 1440 u64 hole_end = *hole_start + *hole_size; 1441 1442 /* 1443 * Check before we set max_hole_start, otherwise we could end up 1444 * sending back this offset anyway. 1445 */ 1446 if (contains_pending_extent(device, hole_start, *hole_size)) { 1447 if (hole_end >= *hole_start) 1448 *hole_size = hole_end - *hole_start; 1449 else 1450 *hole_size = 0; 1451 changed = true; 1452 } 1453 1454 switch (device->fs_devices->chunk_alloc_policy) { 1455 case BTRFS_CHUNK_ALLOC_REGULAR: 1456 /* No extra check */ 1457 break; 1458 default: 1459 BUG(); 1460 } 1461 1462 return changed; 1463 } 1464 1465 /* 1466 * find_free_dev_extent_start - find free space in the specified device 1467 * @device: the device which we search the free space in 1468 * @num_bytes: the size of the free space that we need 1469 * @search_start: the position from which to begin the search 1470 * @start: store the start of the free space. 1471 * @len: the size of the free space. that we find, or the size 1472 * of the max free space if we don't find suitable free space 1473 * 1474 * this uses a pretty simple search, the expectation is that it is 1475 * called very infrequently and that a given device has a small number 1476 * of extents 1477 * 1478 * @start is used to store the start of the free space if we find. But if we 1479 * don't find suitable free space, it will be used to store the start position 1480 * of the max free space. 1481 * 1482 * @len is used to store the size of the free space that we find. 1483 * But if we don't find suitable free space, it is used to store the size of 1484 * the max free space. 1485 * 1486 * NOTE: This function will search *commit* root of device tree, and does extra 1487 * check to ensure dev extents are not double allocated. 1488 * This makes the function safe to allocate dev extents but may not report 1489 * correct usable device space, as device extent freed in current transaction 1490 * is not reported as avaiable. 1491 */ 1492 static int find_free_dev_extent_start(struct btrfs_device *device, 1493 u64 num_bytes, u64 search_start, u64 *start, 1494 u64 *len) 1495 { 1496 struct btrfs_fs_info *fs_info = device->fs_info; 1497 struct btrfs_root *root = fs_info->dev_root; 1498 struct btrfs_key key; 1499 struct btrfs_dev_extent *dev_extent; 1500 struct btrfs_path *path; 1501 u64 hole_size; 1502 u64 max_hole_start; 1503 u64 max_hole_size; 1504 u64 extent_end; 1505 u64 search_end = device->total_bytes; 1506 int ret; 1507 int slot; 1508 struct extent_buffer *l; 1509 1510 search_start = dev_extent_search_start(device, search_start); 1511 1512 path = btrfs_alloc_path(); 1513 if (!path) 1514 return -ENOMEM; 1515 1516 max_hole_start = search_start; 1517 max_hole_size = 0; 1518 1519 again: 1520 if (search_start >= search_end || 1521 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1522 ret = -ENOSPC; 1523 goto out; 1524 } 1525 1526 path->reada = READA_FORWARD; 1527 path->search_commit_root = 1; 1528 path->skip_locking = 1; 1529 1530 key.objectid = device->devid; 1531 key.offset = search_start; 1532 key.type = BTRFS_DEV_EXTENT_KEY; 1533 1534 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1535 if (ret < 0) 1536 goto out; 1537 if (ret > 0) { 1538 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1539 if (ret < 0) 1540 goto out; 1541 } 1542 1543 while (1) { 1544 l = path->nodes[0]; 1545 slot = path->slots[0]; 1546 if (slot >= btrfs_header_nritems(l)) { 1547 ret = btrfs_next_leaf(root, path); 1548 if (ret == 0) 1549 continue; 1550 if (ret < 0) 1551 goto out; 1552 1553 break; 1554 } 1555 btrfs_item_key_to_cpu(l, &key, slot); 1556 1557 if (key.objectid < device->devid) 1558 goto next; 1559 1560 if (key.objectid > device->devid) 1561 break; 1562 1563 if (key.type != BTRFS_DEV_EXTENT_KEY) 1564 goto next; 1565 1566 if (key.offset > search_start) { 1567 hole_size = key.offset - search_start; 1568 dev_extent_hole_check(device, &search_start, &hole_size, 1569 num_bytes); 1570 1571 if (hole_size > max_hole_size) { 1572 max_hole_start = search_start; 1573 max_hole_size = hole_size; 1574 } 1575 1576 /* 1577 * If this free space is greater than which we need, 1578 * it must be the max free space that we have found 1579 * until now, so max_hole_start must point to the start 1580 * of this free space and the length of this free space 1581 * is stored in max_hole_size. Thus, we return 1582 * max_hole_start and max_hole_size and go back to the 1583 * caller. 1584 */ 1585 if (hole_size >= num_bytes) { 1586 ret = 0; 1587 goto out; 1588 } 1589 } 1590 1591 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1592 extent_end = key.offset + btrfs_dev_extent_length(l, 1593 dev_extent); 1594 if (extent_end > search_start) 1595 search_start = extent_end; 1596 next: 1597 path->slots[0]++; 1598 cond_resched(); 1599 } 1600 1601 /* 1602 * At this point, search_start should be the end of 1603 * allocated dev extents, and when shrinking the device, 1604 * search_end may be smaller than search_start. 1605 */ 1606 if (search_end > search_start) { 1607 hole_size = search_end - search_start; 1608 if (dev_extent_hole_check(device, &search_start, &hole_size, 1609 num_bytes)) { 1610 btrfs_release_path(path); 1611 goto again; 1612 } 1613 1614 if (hole_size > max_hole_size) { 1615 max_hole_start = search_start; 1616 max_hole_size = hole_size; 1617 } 1618 } 1619 1620 /* See above. */ 1621 if (max_hole_size < num_bytes) 1622 ret = -ENOSPC; 1623 else 1624 ret = 0; 1625 1626 out: 1627 btrfs_free_path(path); 1628 *start = max_hole_start; 1629 if (len) 1630 *len = max_hole_size; 1631 return ret; 1632 } 1633 1634 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1635 u64 *start, u64 *len) 1636 { 1637 /* FIXME use last free of some kind */ 1638 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1639 } 1640 1641 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1642 struct btrfs_device *device, 1643 u64 start, u64 *dev_extent_len) 1644 { 1645 struct btrfs_fs_info *fs_info = device->fs_info; 1646 struct btrfs_root *root = fs_info->dev_root; 1647 int ret; 1648 struct btrfs_path *path; 1649 struct btrfs_key key; 1650 struct btrfs_key found_key; 1651 struct extent_buffer *leaf = NULL; 1652 struct btrfs_dev_extent *extent = NULL; 1653 1654 path = btrfs_alloc_path(); 1655 if (!path) 1656 return -ENOMEM; 1657 1658 key.objectid = device->devid; 1659 key.offset = start; 1660 key.type = BTRFS_DEV_EXTENT_KEY; 1661 again: 1662 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1663 if (ret > 0) { 1664 ret = btrfs_previous_item(root, path, key.objectid, 1665 BTRFS_DEV_EXTENT_KEY); 1666 if (ret) 1667 goto out; 1668 leaf = path->nodes[0]; 1669 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1670 extent = btrfs_item_ptr(leaf, path->slots[0], 1671 struct btrfs_dev_extent); 1672 BUG_ON(found_key.offset > start || found_key.offset + 1673 btrfs_dev_extent_length(leaf, extent) < start); 1674 key = found_key; 1675 btrfs_release_path(path); 1676 goto again; 1677 } else if (ret == 0) { 1678 leaf = path->nodes[0]; 1679 extent = btrfs_item_ptr(leaf, path->slots[0], 1680 struct btrfs_dev_extent); 1681 } else { 1682 btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); 1683 goto out; 1684 } 1685 1686 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1687 1688 ret = btrfs_del_item(trans, root, path); 1689 if (ret) { 1690 btrfs_handle_fs_error(fs_info, ret, 1691 "Failed to remove dev extent item"); 1692 } else { 1693 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1694 } 1695 out: 1696 btrfs_free_path(path); 1697 return ret; 1698 } 1699 1700 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1701 struct btrfs_device *device, 1702 u64 chunk_offset, u64 start, u64 num_bytes) 1703 { 1704 int ret; 1705 struct btrfs_path *path; 1706 struct btrfs_fs_info *fs_info = device->fs_info; 1707 struct btrfs_root *root = fs_info->dev_root; 1708 struct btrfs_dev_extent *extent; 1709 struct extent_buffer *leaf; 1710 struct btrfs_key key; 1711 1712 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 1713 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1714 path = btrfs_alloc_path(); 1715 if (!path) 1716 return -ENOMEM; 1717 1718 key.objectid = device->devid; 1719 key.offset = start; 1720 key.type = BTRFS_DEV_EXTENT_KEY; 1721 ret = btrfs_insert_empty_item(trans, root, path, &key, 1722 sizeof(*extent)); 1723 if (ret) 1724 goto out; 1725 1726 leaf = path->nodes[0]; 1727 extent = btrfs_item_ptr(leaf, path->slots[0], 1728 struct btrfs_dev_extent); 1729 btrfs_set_dev_extent_chunk_tree(leaf, extent, 1730 BTRFS_CHUNK_TREE_OBJECTID); 1731 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 1732 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 1733 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1734 1735 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1736 btrfs_mark_buffer_dirty(leaf); 1737 out: 1738 btrfs_free_path(path); 1739 return ret; 1740 } 1741 1742 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1743 { 1744 struct extent_map_tree *em_tree; 1745 struct extent_map *em; 1746 struct rb_node *n; 1747 u64 ret = 0; 1748 1749 em_tree = &fs_info->mapping_tree; 1750 read_lock(&em_tree->lock); 1751 n = rb_last(&em_tree->map.rb_root); 1752 if (n) { 1753 em = rb_entry(n, struct extent_map, rb_node); 1754 ret = em->start + em->len; 1755 } 1756 read_unlock(&em_tree->lock); 1757 1758 return ret; 1759 } 1760 1761 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1762 u64 *devid_ret) 1763 { 1764 int ret; 1765 struct btrfs_key key; 1766 struct btrfs_key found_key; 1767 struct btrfs_path *path; 1768 1769 path = btrfs_alloc_path(); 1770 if (!path) 1771 return -ENOMEM; 1772 1773 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1774 key.type = BTRFS_DEV_ITEM_KEY; 1775 key.offset = (u64)-1; 1776 1777 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1778 if (ret < 0) 1779 goto error; 1780 1781 if (ret == 0) { 1782 /* Corruption */ 1783 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1784 ret = -EUCLEAN; 1785 goto error; 1786 } 1787 1788 ret = btrfs_previous_item(fs_info->chunk_root, path, 1789 BTRFS_DEV_ITEMS_OBJECTID, 1790 BTRFS_DEV_ITEM_KEY); 1791 if (ret) { 1792 *devid_ret = 1; 1793 } else { 1794 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1795 path->slots[0]); 1796 *devid_ret = found_key.offset + 1; 1797 } 1798 ret = 0; 1799 error: 1800 btrfs_free_path(path); 1801 return ret; 1802 } 1803 1804 /* 1805 * the device information is stored in the chunk root 1806 * the btrfs_device struct should be fully filled in 1807 */ 1808 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1809 struct btrfs_device *device) 1810 { 1811 int ret; 1812 struct btrfs_path *path; 1813 struct btrfs_dev_item *dev_item; 1814 struct extent_buffer *leaf; 1815 struct btrfs_key key; 1816 unsigned long ptr; 1817 1818 path = btrfs_alloc_path(); 1819 if (!path) 1820 return -ENOMEM; 1821 1822 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1823 key.type = BTRFS_DEV_ITEM_KEY; 1824 key.offset = device->devid; 1825 1826 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1827 &key, sizeof(*dev_item)); 1828 if (ret) 1829 goto out; 1830 1831 leaf = path->nodes[0]; 1832 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1833 1834 btrfs_set_device_id(leaf, dev_item, device->devid); 1835 btrfs_set_device_generation(leaf, dev_item, 0); 1836 btrfs_set_device_type(leaf, dev_item, device->type); 1837 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1838 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1839 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1840 btrfs_set_device_total_bytes(leaf, dev_item, 1841 btrfs_device_get_disk_total_bytes(device)); 1842 btrfs_set_device_bytes_used(leaf, dev_item, 1843 btrfs_device_get_bytes_used(device)); 1844 btrfs_set_device_group(leaf, dev_item, 0); 1845 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1846 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1847 btrfs_set_device_start_offset(leaf, dev_item, 0); 1848 1849 ptr = btrfs_device_uuid(dev_item); 1850 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1851 ptr = btrfs_device_fsid(dev_item); 1852 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1853 ptr, BTRFS_FSID_SIZE); 1854 btrfs_mark_buffer_dirty(leaf); 1855 1856 ret = 0; 1857 out: 1858 btrfs_free_path(path); 1859 return ret; 1860 } 1861 1862 /* 1863 * Function to update ctime/mtime for a given device path. 1864 * Mainly used for ctime/mtime based probe like libblkid. 1865 */ 1866 static void update_dev_time(const char *path_name) 1867 { 1868 struct file *filp; 1869 1870 filp = filp_open(path_name, O_RDWR, 0); 1871 if (IS_ERR(filp)) 1872 return; 1873 file_update_time(filp); 1874 filp_close(filp, NULL); 1875 } 1876 1877 static int btrfs_rm_dev_item(struct btrfs_device *device) 1878 { 1879 struct btrfs_root *root = device->fs_info->chunk_root; 1880 int ret; 1881 struct btrfs_path *path; 1882 struct btrfs_key key; 1883 struct btrfs_trans_handle *trans; 1884 1885 path = btrfs_alloc_path(); 1886 if (!path) 1887 return -ENOMEM; 1888 1889 trans = btrfs_start_transaction(root, 0); 1890 if (IS_ERR(trans)) { 1891 btrfs_free_path(path); 1892 return PTR_ERR(trans); 1893 } 1894 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1895 key.type = BTRFS_DEV_ITEM_KEY; 1896 key.offset = device->devid; 1897 1898 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1899 if (ret) { 1900 if (ret > 0) 1901 ret = -ENOENT; 1902 btrfs_abort_transaction(trans, ret); 1903 btrfs_end_transaction(trans); 1904 goto out; 1905 } 1906 1907 ret = btrfs_del_item(trans, root, path); 1908 if (ret) { 1909 btrfs_abort_transaction(trans, ret); 1910 btrfs_end_transaction(trans); 1911 } 1912 1913 out: 1914 btrfs_free_path(path); 1915 if (!ret) 1916 ret = btrfs_commit_transaction(trans); 1917 return ret; 1918 } 1919 1920 /* 1921 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1922 * filesystem. It's up to the caller to adjust that number regarding eg. device 1923 * replace. 1924 */ 1925 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1926 u64 num_devices) 1927 { 1928 u64 all_avail; 1929 unsigned seq; 1930 int i; 1931 1932 do { 1933 seq = read_seqbegin(&fs_info->profiles_lock); 1934 1935 all_avail = fs_info->avail_data_alloc_bits | 1936 fs_info->avail_system_alloc_bits | 1937 fs_info->avail_metadata_alloc_bits; 1938 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1939 1940 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1941 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1942 continue; 1943 1944 if (num_devices < btrfs_raid_array[i].devs_min) { 1945 int ret = btrfs_raid_array[i].mindev_error; 1946 1947 if (ret) 1948 return ret; 1949 } 1950 } 1951 1952 return 0; 1953 } 1954 1955 static struct btrfs_device * btrfs_find_next_active_device( 1956 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1957 { 1958 struct btrfs_device *next_device; 1959 1960 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1961 if (next_device != device && 1962 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1963 && next_device->bdev) 1964 return next_device; 1965 } 1966 1967 return NULL; 1968 } 1969 1970 /* 1971 * Helper function to check if the given device is part of s_bdev / latest_bdev 1972 * and replace it with the provided or the next active device, in the context 1973 * where this function called, there should be always be another device (or 1974 * this_dev) which is active. 1975 */ 1976 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1977 struct btrfs_device *next_device) 1978 { 1979 struct btrfs_fs_info *fs_info = device->fs_info; 1980 1981 if (!next_device) 1982 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1983 device); 1984 ASSERT(next_device); 1985 1986 if (fs_info->sb->s_bdev && 1987 (fs_info->sb->s_bdev == device->bdev)) 1988 fs_info->sb->s_bdev = next_device->bdev; 1989 1990 if (fs_info->fs_devices->latest_bdev == device->bdev) 1991 fs_info->fs_devices->latest_bdev = next_device->bdev; 1992 } 1993 1994 /* 1995 * Return btrfs_fs_devices::num_devices excluding the device that's being 1996 * currently replaced. 1997 */ 1998 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 1999 { 2000 u64 num_devices = fs_info->fs_devices->num_devices; 2001 2002 down_read(&fs_info->dev_replace.rwsem); 2003 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2004 ASSERT(num_devices > 1); 2005 num_devices--; 2006 } 2007 up_read(&fs_info->dev_replace.rwsem); 2008 2009 return num_devices; 2010 } 2011 2012 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2013 struct block_device *bdev, 2014 const char *device_path) 2015 { 2016 struct btrfs_super_block *disk_super; 2017 int copy_num; 2018 2019 if (!bdev) 2020 return; 2021 2022 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2023 struct page *page; 2024 int ret; 2025 2026 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2027 if (IS_ERR(disk_super)) 2028 continue; 2029 2030 if (bdev_is_zoned(bdev)) { 2031 btrfs_reset_sb_log_zones(bdev, copy_num); 2032 continue; 2033 } 2034 2035 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2036 2037 page = virt_to_page(disk_super); 2038 set_page_dirty(page); 2039 lock_page(page); 2040 /* write_on_page() unlocks the page */ 2041 ret = write_one_page(page); 2042 if (ret) 2043 btrfs_warn(fs_info, 2044 "error clearing superblock number %d (%d)", 2045 copy_num, ret); 2046 btrfs_release_disk_super(disk_super); 2047 2048 } 2049 2050 /* Notify udev that device has changed */ 2051 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2052 2053 /* Update ctime/mtime for device path for libblkid */ 2054 update_dev_time(device_path); 2055 } 2056 2057 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, 2058 u64 devid) 2059 { 2060 struct btrfs_device *device; 2061 struct btrfs_fs_devices *cur_devices; 2062 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2063 u64 num_devices; 2064 int ret = 0; 2065 2066 mutex_lock(&uuid_mutex); 2067 2068 num_devices = btrfs_num_devices(fs_info); 2069 2070 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2071 if (ret) 2072 goto out; 2073 2074 device = btrfs_find_device_by_devspec(fs_info, devid, device_path); 2075 2076 if (IS_ERR(device)) { 2077 if (PTR_ERR(device) == -ENOENT && 2078 strcmp(device_path, "missing") == 0) 2079 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2080 else 2081 ret = PTR_ERR(device); 2082 goto out; 2083 } 2084 2085 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2086 btrfs_warn_in_rcu(fs_info, 2087 "cannot remove device %s (devid %llu) due to active swapfile", 2088 rcu_str_deref(device->name), device->devid); 2089 ret = -ETXTBSY; 2090 goto out; 2091 } 2092 2093 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2094 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2095 goto out; 2096 } 2097 2098 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2099 fs_info->fs_devices->rw_devices == 1) { 2100 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2101 goto out; 2102 } 2103 2104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2105 mutex_lock(&fs_info->chunk_mutex); 2106 list_del_init(&device->dev_alloc_list); 2107 device->fs_devices->rw_devices--; 2108 mutex_unlock(&fs_info->chunk_mutex); 2109 } 2110 2111 mutex_unlock(&uuid_mutex); 2112 ret = btrfs_shrink_device(device, 0); 2113 if (!ret) 2114 btrfs_reada_remove_dev(device); 2115 mutex_lock(&uuid_mutex); 2116 if (ret) 2117 goto error_undo; 2118 2119 /* 2120 * TODO: the superblock still includes this device in its num_devices 2121 * counter although write_all_supers() is not locked out. This 2122 * could give a filesystem state which requires a degraded mount. 2123 */ 2124 ret = btrfs_rm_dev_item(device); 2125 if (ret) 2126 goto error_undo; 2127 2128 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2129 btrfs_scrub_cancel_dev(device); 2130 2131 /* 2132 * the device list mutex makes sure that we don't change 2133 * the device list while someone else is writing out all 2134 * the device supers. Whoever is writing all supers, should 2135 * lock the device list mutex before getting the number of 2136 * devices in the super block (super_copy). Conversely, 2137 * whoever updates the number of devices in the super block 2138 * (super_copy) should hold the device list mutex. 2139 */ 2140 2141 /* 2142 * In normal cases the cur_devices == fs_devices. But in case 2143 * of deleting a seed device, the cur_devices should point to 2144 * its own fs_devices listed under the fs_devices->seed. 2145 */ 2146 cur_devices = device->fs_devices; 2147 mutex_lock(&fs_devices->device_list_mutex); 2148 list_del_rcu(&device->dev_list); 2149 2150 cur_devices->num_devices--; 2151 cur_devices->total_devices--; 2152 /* Update total_devices of the parent fs_devices if it's seed */ 2153 if (cur_devices != fs_devices) 2154 fs_devices->total_devices--; 2155 2156 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2157 cur_devices->missing_devices--; 2158 2159 btrfs_assign_next_active_device(device, NULL); 2160 2161 if (device->bdev) { 2162 cur_devices->open_devices--; 2163 /* remove sysfs entry */ 2164 btrfs_sysfs_remove_device(device); 2165 } 2166 2167 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2168 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2169 mutex_unlock(&fs_devices->device_list_mutex); 2170 2171 /* 2172 * at this point, the device is zero sized and detached from 2173 * the devices list. All that's left is to zero out the old 2174 * supers and free the device. 2175 */ 2176 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2177 btrfs_scratch_superblocks(fs_info, device->bdev, 2178 device->name->str); 2179 2180 btrfs_close_bdev(device); 2181 synchronize_rcu(); 2182 btrfs_free_device(device); 2183 2184 if (cur_devices->open_devices == 0) { 2185 list_del_init(&cur_devices->seed_list); 2186 close_fs_devices(cur_devices); 2187 free_fs_devices(cur_devices); 2188 } 2189 2190 out: 2191 mutex_unlock(&uuid_mutex); 2192 return ret; 2193 2194 error_undo: 2195 btrfs_reada_undo_remove_dev(device); 2196 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2197 mutex_lock(&fs_info->chunk_mutex); 2198 list_add(&device->dev_alloc_list, 2199 &fs_devices->alloc_list); 2200 device->fs_devices->rw_devices++; 2201 mutex_unlock(&fs_info->chunk_mutex); 2202 } 2203 goto out; 2204 } 2205 2206 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2207 { 2208 struct btrfs_fs_devices *fs_devices; 2209 2210 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2211 2212 /* 2213 * in case of fs with no seed, srcdev->fs_devices will point 2214 * to fs_devices of fs_info. However when the dev being replaced is 2215 * a seed dev it will point to the seed's local fs_devices. In short 2216 * srcdev will have its correct fs_devices in both the cases. 2217 */ 2218 fs_devices = srcdev->fs_devices; 2219 2220 list_del_rcu(&srcdev->dev_list); 2221 list_del(&srcdev->dev_alloc_list); 2222 fs_devices->num_devices--; 2223 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2224 fs_devices->missing_devices--; 2225 2226 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2227 fs_devices->rw_devices--; 2228 2229 if (srcdev->bdev) 2230 fs_devices->open_devices--; 2231 } 2232 2233 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2234 { 2235 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2236 2237 mutex_lock(&uuid_mutex); 2238 2239 btrfs_close_bdev(srcdev); 2240 synchronize_rcu(); 2241 btrfs_free_device(srcdev); 2242 2243 /* if this is no devs we rather delete the fs_devices */ 2244 if (!fs_devices->num_devices) { 2245 /* 2246 * On a mounted FS, num_devices can't be zero unless it's a 2247 * seed. In case of a seed device being replaced, the replace 2248 * target added to the sprout FS, so there will be no more 2249 * device left under the seed FS. 2250 */ 2251 ASSERT(fs_devices->seeding); 2252 2253 list_del_init(&fs_devices->seed_list); 2254 close_fs_devices(fs_devices); 2255 free_fs_devices(fs_devices); 2256 } 2257 mutex_unlock(&uuid_mutex); 2258 } 2259 2260 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2261 { 2262 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2263 2264 mutex_lock(&fs_devices->device_list_mutex); 2265 2266 btrfs_sysfs_remove_device(tgtdev); 2267 2268 if (tgtdev->bdev) 2269 fs_devices->open_devices--; 2270 2271 fs_devices->num_devices--; 2272 2273 btrfs_assign_next_active_device(tgtdev, NULL); 2274 2275 list_del_rcu(&tgtdev->dev_list); 2276 2277 mutex_unlock(&fs_devices->device_list_mutex); 2278 2279 /* 2280 * The update_dev_time() with in btrfs_scratch_superblocks() 2281 * may lead to a call to btrfs_show_devname() which will try 2282 * to hold device_list_mutex. And here this device 2283 * is already out of device list, so we don't have to hold 2284 * the device_list_mutex lock. 2285 */ 2286 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2287 tgtdev->name->str); 2288 2289 btrfs_close_bdev(tgtdev); 2290 synchronize_rcu(); 2291 btrfs_free_device(tgtdev); 2292 } 2293 2294 static struct btrfs_device *btrfs_find_device_by_path( 2295 struct btrfs_fs_info *fs_info, const char *device_path) 2296 { 2297 int ret = 0; 2298 struct btrfs_super_block *disk_super; 2299 u64 devid; 2300 u8 *dev_uuid; 2301 struct block_device *bdev; 2302 struct btrfs_device *device; 2303 2304 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2305 fs_info->bdev_holder, 0, &bdev, &disk_super); 2306 if (ret) 2307 return ERR_PTR(ret); 2308 2309 devid = btrfs_stack_device_id(&disk_super->dev_item); 2310 dev_uuid = disk_super->dev_item.uuid; 2311 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2312 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2313 disk_super->metadata_uuid); 2314 else 2315 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2316 disk_super->fsid); 2317 2318 btrfs_release_disk_super(disk_super); 2319 if (!device) 2320 device = ERR_PTR(-ENOENT); 2321 blkdev_put(bdev, FMODE_READ); 2322 return device; 2323 } 2324 2325 /* 2326 * Lookup a device given by device id, or the path if the id is 0. 2327 */ 2328 struct btrfs_device *btrfs_find_device_by_devspec( 2329 struct btrfs_fs_info *fs_info, u64 devid, 2330 const char *device_path) 2331 { 2332 struct btrfs_device *device; 2333 2334 if (devid) { 2335 device = btrfs_find_device(fs_info->fs_devices, devid, NULL, 2336 NULL); 2337 if (!device) 2338 return ERR_PTR(-ENOENT); 2339 return device; 2340 } 2341 2342 if (!device_path || !device_path[0]) 2343 return ERR_PTR(-EINVAL); 2344 2345 if (strcmp(device_path, "missing") == 0) { 2346 /* Find first missing device */ 2347 list_for_each_entry(device, &fs_info->fs_devices->devices, 2348 dev_list) { 2349 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 2350 &device->dev_state) && !device->bdev) 2351 return device; 2352 } 2353 return ERR_PTR(-ENOENT); 2354 } 2355 2356 return btrfs_find_device_by_path(fs_info, device_path); 2357 } 2358 2359 /* 2360 * does all the dirty work required for changing file system's UUID. 2361 */ 2362 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2363 { 2364 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2365 struct btrfs_fs_devices *old_devices; 2366 struct btrfs_fs_devices *seed_devices; 2367 struct btrfs_super_block *disk_super = fs_info->super_copy; 2368 struct btrfs_device *device; 2369 u64 super_flags; 2370 2371 lockdep_assert_held(&uuid_mutex); 2372 if (!fs_devices->seeding) 2373 return -EINVAL; 2374 2375 /* 2376 * Private copy of the seed devices, anchored at 2377 * fs_info->fs_devices->seed_list 2378 */ 2379 seed_devices = alloc_fs_devices(NULL, NULL); 2380 if (IS_ERR(seed_devices)) 2381 return PTR_ERR(seed_devices); 2382 2383 /* 2384 * It's necessary to retain a copy of the original seed fs_devices in 2385 * fs_uuids so that filesystems which have been seeded can successfully 2386 * reference the seed device from open_seed_devices. This also supports 2387 * multiple fs seed. 2388 */ 2389 old_devices = clone_fs_devices(fs_devices); 2390 if (IS_ERR(old_devices)) { 2391 kfree(seed_devices); 2392 return PTR_ERR(old_devices); 2393 } 2394 2395 list_add(&old_devices->fs_list, &fs_uuids); 2396 2397 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2398 seed_devices->opened = 1; 2399 INIT_LIST_HEAD(&seed_devices->devices); 2400 INIT_LIST_HEAD(&seed_devices->alloc_list); 2401 mutex_init(&seed_devices->device_list_mutex); 2402 2403 mutex_lock(&fs_devices->device_list_mutex); 2404 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2405 synchronize_rcu); 2406 list_for_each_entry(device, &seed_devices->devices, dev_list) 2407 device->fs_devices = seed_devices; 2408 2409 fs_devices->seeding = false; 2410 fs_devices->num_devices = 0; 2411 fs_devices->open_devices = 0; 2412 fs_devices->missing_devices = 0; 2413 fs_devices->rotating = false; 2414 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2415 2416 generate_random_uuid(fs_devices->fsid); 2417 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2418 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2419 mutex_unlock(&fs_devices->device_list_mutex); 2420 2421 super_flags = btrfs_super_flags(disk_super) & 2422 ~BTRFS_SUPER_FLAG_SEEDING; 2423 btrfs_set_super_flags(disk_super, super_flags); 2424 2425 return 0; 2426 } 2427 2428 /* 2429 * Store the expected generation for seed devices in device items. 2430 */ 2431 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2432 { 2433 struct btrfs_fs_info *fs_info = trans->fs_info; 2434 struct btrfs_root *root = fs_info->chunk_root; 2435 struct btrfs_path *path; 2436 struct extent_buffer *leaf; 2437 struct btrfs_dev_item *dev_item; 2438 struct btrfs_device *device; 2439 struct btrfs_key key; 2440 u8 fs_uuid[BTRFS_FSID_SIZE]; 2441 u8 dev_uuid[BTRFS_UUID_SIZE]; 2442 u64 devid; 2443 int ret; 2444 2445 path = btrfs_alloc_path(); 2446 if (!path) 2447 return -ENOMEM; 2448 2449 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2450 key.offset = 0; 2451 key.type = BTRFS_DEV_ITEM_KEY; 2452 2453 while (1) { 2454 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2455 if (ret < 0) 2456 goto error; 2457 2458 leaf = path->nodes[0]; 2459 next_slot: 2460 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2461 ret = btrfs_next_leaf(root, path); 2462 if (ret > 0) 2463 break; 2464 if (ret < 0) 2465 goto error; 2466 leaf = path->nodes[0]; 2467 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2468 btrfs_release_path(path); 2469 continue; 2470 } 2471 2472 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2473 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2474 key.type != BTRFS_DEV_ITEM_KEY) 2475 break; 2476 2477 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2478 struct btrfs_dev_item); 2479 devid = btrfs_device_id(leaf, dev_item); 2480 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2481 BTRFS_UUID_SIZE); 2482 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2483 BTRFS_FSID_SIZE); 2484 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2485 fs_uuid); 2486 BUG_ON(!device); /* Logic error */ 2487 2488 if (device->fs_devices->seeding) { 2489 btrfs_set_device_generation(leaf, dev_item, 2490 device->generation); 2491 btrfs_mark_buffer_dirty(leaf); 2492 } 2493 2494 path->slots[0]++; 2495 goto next_slot; 2496 } 2497 ret = 0; 2498 error: 2499 btrfs_free_path(path); 2500 return ret; 2501 } 2502 2503 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2504 { 2505 struct btrfs_root *root = fs_info->dev_root; 2506 struct request_queue *q; 2507 struct btrfs_trans_handle *trans; 2508 struct btrfs_device *device; 2509 struct block_device *bdev; 2510 struct super_block *sb = fs_info->sb; 2511 struct rcu_string *name; 2512 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2513 u64 orig_super_total_bytes; 2514 u64 orig_super_num_devices; 2515 int seeding_dev = 0; 2516 int ret = 0; 2517 bool locked = false; 2518 2519 if (sb_rdonly(sb) && !fs_devices->seeding) 2520 return -EROFS; 2521 2522 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2523 fs_info->bdev_holder); 2524 if (IS_ERR(bdev)) 2525 return PTR_ERR(bdev); 2526 2527 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2528 ret = -EINVAL; 2529 goto error; 2530 } 2531 2532 if (fs_devices->seeding) { 2533 seeding_dev = 1; 2534 down_write(&sb->s_umount); 2535 mutex_lock(&uuid_mutex); 2536 locked = true; 2537 } 2538 2539 sync_blockdev(bdev); 2540 2541 rcu_read_lock(); 2542 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2543 if (device->bdev == bdev) { 2544 ret = -EEXIST; 2545 rcu_read_unlock(); 2546 goto error; 2547 } 2548 } 2549 rcu_read_unlock(); 2550 2551 device = btrfs_alloc_device(fs_info, NULL, NULL); 2552 if (IS_ERR(device)) { 2553 /* we can safely leave the fs_devices entry around */ 2554 ret = PTR_ERR(device); 2555 goto error; 2556 } 2557 2558 name = rcu_string_strdup(device_path, GFP_KERNEL); 2559 if (!name) { 2560 ret = -ENOMEM; 2561 goto error_free_device; 2562 } 2563 rcu_assign_pointer(device->name, name); 2564 2565 device->fs_info = fs_info; 2566 device->bdev = bdev; 2567 2568 ret = btrfs_get_dev_zone_info(device); 2569 if (ret) 2570 goto error_free_device; 2571 2572 trans = btrfs_start_transaction(root, 0); 2573 if (IS_ERR(trans)) { 2574 ret = PTR_ERR(trans); 2575 goto error_free_zone; 2576 } 2577 2578 q = bdev_get_queue(bdev); 2579 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2580 device->generation = trans->transid; 2581 device->io_width = fs_info->sectorsize; 2582 device->io_align = fs_info->sectorsize; 2583 device->sector_size = fs_info->sectorsize; 2584 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2585 fs_info->sectorsize); 2586 device->disk_total_bytes = device->total_bytes; 2587 device->commit_total_bytes = device->total_bytes; 2588 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2589 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2590 device->mode = FMODE_EXCL; 2591 device->dev_stats_valid = 1; 2592 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2593 2594 if (seeding_dev) { 2595 sb->s_flags &= ~SB_RDONLY; 2596 ret = btrfs_prepare_sprout(fs_info); 2597 if (ret) { 2598 btrfs_abort_transaction(trans, ret); 2599 goto error_trans; 2600 } 2601 } 2602 2603 device->fs_devices = fs_devices; 2604 2605 mutex_lock(&fs_devices->device_list_mutex); 2606 mutex_lock(&fs_info->chunk_mutex); 2607 list_add_rcu(&device->dev_list, &fs_devices->devices); 2608 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2609 fs_devices->num_devices++; 2610 fs_devices->open_devices++; 2611 fs_devices->rw_devices++; 2612 fs_devices->total_devices++; 2613 fs_devices->total_rw_bytes += device->total_bytes; 2614 2615 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2616 2617 if (!blk_queue_nonrot(q)) 2618 fs_devices->rotating = true; 2619 2620 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2621 btrfs_set_super_total_bytes(fs_info->super_copy, 2622 round_down(orig_super_total_bytes + device->total_bytes, 2623 fs_info->sectorsize)); 2624 2625 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2626 btrfs_set_super_num_devices(fs_info->super_copy, 2627 orig_super_num_devices + 1); 2628 2629 /* 2630 * we've got more storage, clear any full flags on the space 2631 * infos 2632 */ 2633 btrfs_clear_space_info_full(fs_info); 2634 2635 mutex_unlock(&fs_info->chunk_mutex); 2636 2637 /* Add sysfs device entry */ 2638 btrfs_sysfs_add_device(device); 2639 2640 mutex_unlock(&fs_devices->device_list_mutex); 2641 2642 if (seeding_dev) { 2643 mutex_lock(&fs_info->chunk_mutex); 2644 ret = init_first_rw_device(trans); 2645 mutex_unlock(&fs_info->chunk_mutex); 2646 if (ret) { 2647 btrfs_abort_transaction(trans, ret); 2648 goto error_sysfs; 2649 } 2650 } 2651 2652 ret = btrfs_add_dev_item(trans, device); 2653 if (ret) { 2654 btrfs_abort_transaction(trans, ret); 2655 goto error_sysfs; 2656 } 2657 2658 if (seeding_dev) { 2659 ret = btrfs_finish_sprout(trans); 2660 if (ret) { 2661 btrfs_abort_transaction(trans, ret); 2662 goto error_sysfs; 2663 } 2664 2665 /* 2666 * fs_devices now represents the newly sprouted filesystem and 2667 * its fsid has been changed by btrfs_prepare_sprout 2668 */ 2669 btrfs_sysfs_update_sprout_fsid(fs_devices); 2670 } 2671 2672 ret = btrfs_commit_transaction(trans); 2673 2674 if (seeding_dev) { 2675 mutex_unlock(&uuid_mutex); 2676 up_write(&sb->s_umount); 2677 locked = false; 2678 2679 if (ret) /* transaction commit */ 2680 return ret; 2681 2682 ret = btrfs_relocate_sys_chunks(fs_info); 2683 if (ret < 0) 2684 btrfs_handle_fs_error(fs_info, ret, 2685 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2686 trans = btrfs_attach_transaction(root); 2687 if (IS_ERR(trans)) { 2688 if (PTR_ERR(trans) == -ENOENT) 2689 return 0; 2690 ret = PTR_ERR(trans); 2691 trans = NULL; 2692 goto error_sysfs; 2693 } 2694 ret = btrfs_commit_transaction(trans); 2695 } 2696 2697 /* 2698 * Now that we have written a new super block to this device, check all 2699 * other fs_devices list if device_path alienates any other scanned 2700 * device. 2701 * We can ignore the return value as it typically returns -EINVAL and 2702 * only succeeds if the device was an alien. 2703 */ 2704 btrfs_forget_devices(device_path); 2705 2706 /* Update ctime/mtime for blkid or udev */ 2707 update_dev_time(device_path); 2708 2709 return ret; 2710 2711 error_sysfs: 2712 btrfs_sysfs_remove_device(device); 2713 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2714 mutex_lock(&fs_info->chunk_mutex); 2715 list_del_rcu(&device->dev_list); 2716 list_del(&device->dev_alloc_list); 2717 fs_info->fs_devices->num_devices--; 2718 fs_info->fs_devices->open_devices--; 2719 fs_info->fs_devices->rw_devices--; 2720 fs_info->fs_devices->total_devices--; 2721 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2722 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2723 btrfs_set_super_total_bytes(fs_info->super_copy, 2724 orig_super_total_bytes); 2725 btrfs_set_super_num_devices(fs_info->super_copy, 2726 orig_super_num_devices); 2727 mutex_unlock(&fs_info->chunk_mutex); 2728 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2729 error_trans: 2730 if (seeding_dev) 2731 sb->s_flags |= SB_RDONLY; 2732 if (trans) 2733 btrfs_end_transaction(trans); 2734 error_free_zone: 2735 btrfs_destroy_dev_zone_info(device); 2736 error_free_device: 2737 btrfs_free_device(device); 2738 error: 2739 blkdev_put(bdev, FMODE_EXCL); 2740 if (locked) { 2741 mutex_unlock(&uuid_mutex); 2742 up_write(&sb->s_umount); 2743 } 2744 return ret; 2745 } 2746 2747 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2748 struct btrfs_device *device) 2749 { 2750 int ret; 2751 struct btrfs_path *path; 2752 struct btrfs_root *root = device->fs_info->chunk_root; 2753 struct btrfs_dev_item *dev_item; 2754 struct extent_buffer *leaf; 2755 struct btrfs_key key; 2756 2757 path = btrfs_alloc_path(); 2758 if (!path) 2759 return -ENOMEM; 2760 2761 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2762 key.type = BTRFS_DEV_ITEM_KEY; 2763 key.offset = device->devid; 2764 2765 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2766 if (ret < 0) 2767 goto out; 2768 2769 if (ret > 0) { 2770 ret = -ENOENT; 2771 goto out; 2772 } 2773 2774 leaf = path->nodes[0]; 2775 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2776 2777 btrfs_set_device_id(leaf, dev_item, device->devid); 2778 btrfs_set_device_type(leaf, dev_item, device->type); 2779 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2780 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2781 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2782 btrfs_set_device_total_bytes(leaf, dev_item, 2783 btrfs_device_get_disk_total_bytes(device)); 2784 btrfs_set_device_bytes_used(leaf, dev_item, 2785 btrfs_device_get_bytes_used(device)); 2786 btrfs_mark_buffer_dirty(leaf); 2787 2788 out: 2789 btrfs_free_path(path); 2790 return ret; 2791 } 2792 2793 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2794 struct btrfs_device *device, u64 new_size) 2795 { 2796 struct btrfs_fs_info *fs_info = device->fs_info; 2797 struct btrfs_super_block *super_copy = fs_info->super_copy; 2798 u64 old_total; 2799 u64 diff; 2800 2801 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2802 return -EACCES; 2803 2804 new_size = round_down(new_size, fs_info->sectorsize); 2805 2806 mutex_lock(&fs_info->chunk_mutex); 2807 old_total = btrfs_super_total_bytes(super_copy); 2808 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2809 2810 if (new_size <= device->total_bytes || 2811 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2812 mutex_unlock(&fs_info->chunk_mutex); 2813 return -EINVAL; 2814 } 2815 2816 btrfs_set_super_total_bytes(super_copy, 2817 round_down(old_total + diff, fs_info->sectorsize)); 2818 device->fs_devices->total_rw_bytes += diff; 2819 2820 btrfs_device_set_total_bytes(device, new_size); 2821 btrfs_device_set_disk_total_bytes(device, new_size); 2822 btrfs_clear_space_info_full(device->fs_info); 2823 if (list_empty(&device->post_commit_list)) 2824 list_add_tail(&device->post_commit_list, 2825 &trans->transaction->dev_update_list); 2826 mutex_unlock(&fs_info->chunk_mutex); 2827 2828 return btrfs_update_device(trans, device); 2829 } 2830 2831 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2832 { 2833 struct btrfs_fs_info *fs_info = trans->fs_info; 2834 struct btrfs_root *root = fs_info->chunk_root; 2835 int ret; 2836 struct btrfs_path *path; 2837 struct btrfs_key key; 2838 2839 path = btrfs_alloc_path(); 2840 if (!path) 2841 return -ENOMEM; 2842 2843 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2844 key.offset = chunk_offset; 2845 key.type = BTRFS_CHUNK_ITEM_KEY; 2846 2847 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2848 if (ret < 0) 2849 goto out; 2850 else if (ret > 0) { /* Logic error or corruption */ 2851 btrfs_handle_fs_error(fs_info, -ENOENT, 2852 "Failed lookup while freeing chunk."); 2853 ret = -ENOENT; 2854 goto out; 2855 } 2856 2857 ret = btrfs_del_item(trans, root, path); 2858 if (ret < 0) 2859 btrfs_handle_fs_error(fs_info, ret, 2860 "Failed to delete chunk item."); 2861 out: 2862 btrfs_free_path(path); 2863 return ret; 2864 } 2865 2866 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2867 { 2868 struct btrfs_super_block *super_copy = fs_info->super_copy; 2869 struct btrfs_disk_key *disk_key; 2870 struct btrfs_chunk *chunk; 2871 u8 *ptr; 2872 int ret = 0; 2873 u32 num_stripes; 2874 u32 array_size; 2875 u32 len = 0; 2876 u32 cur; 2877 struct btrfs_key key; 2878 2879 mutex_lock(&fs_info->chunk_mutex); 2880 array_size = btrfs_super_sys_array_size(super_copy); 2881 2882 ptr = super_copy->sys_chunk_array; 2883 cur = 0; 2884 2885 while (cur < array_size) { 2886 disk_key = (struct btrfs_disk_key *)ptr; 2887 btrfs_disk_key_to_cpu(&key, disk_key); 2888 2889 len = sizeof(*disk_key); 2890 2891 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2892 chunk = (struct btrfs_chunk *)(ptr + len); 2893 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2894 len += btrfs_chunk_item_size(num_stripes); 2895 } else { 2896 ret = -EIO; 2897 break; 2898 } 2899 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2900 key.offset == chunk_offset) { 2901 memmove(ptr, ptr + len, array_size - (cur + len)); 2902 array_size -= len; 2903 btrfs_set_super_sys_array_size(super_copy, array_size); 2904 } else { 2905 ptr += len; 2906 cur += len; 2907 } 2908 } 2909 mutex_unlock(&fs_info->chunk_mutex); 2910 return ret; 2911 } 2912 2913 /* 2914 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2915 * @logical: Logical block offset in bytes. 2916 * @length: Length of extent in bytes. 2917 * 2918 * Return: Chunk mapping or ERR_PTR. 2919 */ 2920 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 2921 u64 logical, u64 length) 2922 { 2923 struct extent_map_tree *em_tree; 2924 struct extent_map *em; 2925 2926 em_tree = &fs_info->mapping_tree; 2927 read_lock(&em_tree->lock); 2928 em = lookup_extent_mapping(em_tree, logical, length); 2929 read_unlock(&em_tree->lock); 2930 2931 if (!em) { 2932 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 2933 logical, length); 2934 return ERR_PTR(-EINVAL); 2935 } 2936 2937 if (em->start > logical || em->start + em->len < logical) { 2938 btrfs_crit(fs_info, 2939 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 2940 logical, length, em->start, em->start + em->len); 2941 free_extent_map(em); 2942 return ERR_PTR(-EINVAL); 2943 } 2944 2945 /* callers are responsible for dropping em's ref. */ 2946 return em; 2947 } 2948 2949 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2950 { 2951 struct btrfs_fs_info *fs_info = trans->fs_info; 2952 struct extent_map *em; 2953 struct map_lookup *map; 2954 u64 dev_extent_len = 0; 2955 int i, ret = 0; 2956 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2957 2958 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 2959 if (IS_ERR(em)) { 2960 /* 2961 * This is a logic error, but we don't want to just rely on the 2962 * user having built with ASSERT enabled, so if ASSERT doesn't 2963 * do anything we still error out. 2964 */ 2965 ASSERT(0); 2966 return PTR_ERR(em); 2967 } 2968 map = em->map_lookup; 2969 mutex_lock(&fs_info->chunk_mutex); 2970 check_system_chunk(trans, map->type); 2971 mutex_unlock(&fs_info->chunk_mutex); 2972 2973 /* 2974 * Take the device list mutex to prevent races with the final phase of 2975 * a device replace operation that replaces the device object associated 2976 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). 2977 */ 2978 mutex_lock(&fs_devices->device_list_mutex); 2979 for (i = 0; i < map->num_stripes; i++) { 2980 struct btrfs_device *device = map->stripes[i].dev; 2981 ret = btrfs_free_dev_extent(trans, device, 2982 map->stripes[i].physical, 2983 &dev_extent_len); 2984 if (ret) { 2985 mutex_unlock(&fs_devices->device_list_mutex); 2986 btrfs_abort_transaction(trans, ret); 2987 goto out; 2988 } 2989 2990 if (device->bytes_used > 0) { 2991 mutex_lock(&fs_info->chunk_mutex); 2992 btrfs_device_set_bytes_used(device, 2993 device->bytes_used - dev_extent_len); 2994 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 2995 btrfs_clear_space_info_full(fs_info); 2996 mutex_unlock(&fs_info->chunk_mutex); 2997 } 2998 2999 ret = btrfs_update_device(trans, device); 3000 if (ret) { 3001 mutex_unlock(&fs_devices->device_list_mutex); 3002 btrfs_abort_transaction(trans, ret); 3003 goto out; 3004 } 3005 } 3006 mutex_unlock(&fs_devices->device_list_mutex); 3007 3008 ret = btrfs_free_chunk(trans, chunk_offset); 3009 if (ret) { 3010 btrfs_abort_transaction(trans, ret); 3011 goto out; 3012 } 3013 3014 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3015 3016 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3017 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3018 if (ret) { 3019 btrfs_abort_transaction(trans, ret); 3020 goto out; 3021 } 3022 } 3023 3024 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3025 if (ret) { 3026 btrfs_abort_transaction(trans, ret); 3027 goto out; 3028 } 3029 3030 out: 3031 /* once for us */ 3032 free_extent_map(em); 3033 return ret; 3034 } 3035 3036 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3037 { 3038 struct btrfs_root *root = fs_info->chunk_root; 3039 struct btrfs_trans_handle *trans; 3040 struct btrfs_block_group *block_group; 3041 int ret; 3042 3043 /* 3044 * Prevent races with automatic removal of unused block groups. 3045 * After we relocate and before we remove the chunk with offset 3046 * chunk_offset, automatic removal of the block group can kick in, 3047 * resulting in a failure when calling btrfs_remove_chunk() below. 3048 * 3049 * Make sure to acquire this mutex before doing a tree search (dev 3050 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3051 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3052 * we release the path used to search the chunk/dev tree and before 3053 * the current task acquires this mutex and calls us. 3054 */ 3055 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); 3056 3057 /* step one, relocate all the extents inside this chunk */ 3058 btrfs_scrub_pause(fs_info); 3059 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3060 btrfs_scrub_continue(fs_info); 3061 if (ret) 3062 return ret; 3063 3064 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3065 if (!block_group) 3066 return -ENOENT; 3067 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3068 btrfs_put_block_group(block_group); 3069 3070 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3071 chunk_offset); 3072 if (IS_ERR(trans)) { 3073 ret = PTR_ERR(trans); 3074 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3075 return ret; 3076 } 3077 3078 /* 3079 * step two, delete the device extents and the 3080 * chunk tree entries 3081 */ 3082 ret = btrfs_remove_chunk(trans, chunk_offset); 3083 btrfs_end_transaction(trans); 3084 return ret; 3085 } 3086 3087 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3088 { 3089 struct btrfs_root *chunk_root = fs_info->chunk_root; 3090 struct btrfs_path *path; 3091 struct extent_buffer *leaf; 3092 struct btrfs_chunk *chunk; 3093 struct btrfs_key key; 3094 struct btrfs_key found_key; 3095 u64 chunk_type; 3096 bool retried = false; 3097 int failed = 0; 3098 int ret; 3099 3100 path = btrfs_alloc_path(); 3101 if (!path) 3102 return -ENOMEM; 3103 3104 again: 3105 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3106 key.offset = (u64)-1; 3107 key.type = BTRFS_CHUNK_ITEM_KEY; 3108 3109 while (1) { 3110 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3111 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3112 if (ret < 0) { 3113 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3114 goto error; 3115 } 3116 BUG_ON(ret == 0); /* Corruption */ 3117 3118 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3119 key.type); 3120 if (ret) 3121 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3122 if (ret < 0) 3123 goto error; 3124 if (ret > 0) 3125 break; 3126 3127 leaf = path->nodes[0]; 3128 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3129 3130 chunk = btrfs_item_ptr(leaf, path->slots[0], 3131 struct btrfs_chunk); 3132 chunk_type = btrfs_chunk_type(leaf, chunk); 3133 btrfs_release_path(path); 3134 3135 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3136 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3137 if (ret == -ENOSPC) 3138 failed++; 3139 else 3140 BUG_ON(ret); 3141 } 3142 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3143 3144 if (found_key.offset == 0) 3145 break; 3146 key.offset = found_key.offset - 1; 3147 } 3148 ret = 0; 3149 if (failed && !retried) { 3150 failed = 0; 3151 retried = true; 3152 goto again; 3153 } else if (WARN_ON(failed && retried)) { 3154 ret = -ENOSPC; 3155 } 3156 error: 3157 btrfs_free_path(path); 3158 return ret; 3159 } 3160 3161 /* 3162 * return 1 : allocate a data chunk successfully, 3163 * return <0: errors during allocating a data chunk, 3164 * return 0 : no need to allocate a data chunk. 3165 */ 3166 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3167 u64 chunk_offset) 3168 { 3169 struct btrfs_block_group *cache; 3170 u64 bytes_used; 3171 u64 chunk_type; 3172 3173 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3174 ASSERT(cache); 3175 chunk_type = cache->flags; 3176 btrfs_put_block_group(cache); 3177 3178 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3179 return 0; 3180 3181 spin_lock(&fs_info->data_sinfo->lock); 3182 bytes_used = fs_info->data_sinfo->bytes_used; 3183 spin_unlock(&fs_info->data_sinfo->lock); 3184 3185 if (!bytes_used) { 3186 struct btrfs_trans_handle *trans; 3187 int ret; 3188 3189 trans = btrfs_join_transaction(fs_info->tree_root); 3190 if (IS_ERR(trans)) 3191 return PTR_ERR(trans); 3192 3193 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3194 btrfs_end_transaction(trans); 3195 if (ret < 0) 3196 return ret; 3197 return 1; 3198 } 3199 3200 return 0; 3201 } 3202 3203 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3204 struct btrfs_balance_control *bctl) 3205 { 3206 struct btrfs_root *root = fs_info->tree_root; 3207 struct btrfs_trans_handle *trans; 3208 struct btrfs_balance_item *item; 3209 struct btrfs_disk_balance_args disk_bargs; 3210 struct btrfs_path *path; 3211 struct extent_buffer *leaf; 3212 struct btrfs_key key; 3213 int ret, err; 3214 3215 path = btrfs_alloc_path(); 3216 if (!path) 3217 return -ENOMEM; 3218 3219 trans = btrfs_start_transaction(root, 0); 3220 if (IS_ERR(trans)) { 3221 btrfs_free_path(path); 3222 return PTR_ERR(trans); 3223 } 3224 3225 key.objectid = BTRFS_BALANCE_OBJECTID; 3226 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3227 key.offset = 0; 3228 3229 ret = btrfs_insert_empty_item(trans, root, path, &key, 3230 sizeof(*item)); 3231 if (ret) 3232 goto out; 3233 3234 leaf = path->nodes[0]; 3235 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3236 3237 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3238 3239 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3240 btrfs_set_balance_data(leaf, item, &disk_bargs); 3241 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3242 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3243 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3244 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3245 3246 btrfs_set_balance_flags(leaf, item, bctl->flags); 3247 3248 btrfs_mark_buffer_dirty(leaf); 3249 out: 3250 btrfs_free_path(path); 3251 err = btrfs_commit_transaction(trans); 3252 if (err && !ret) 3253 ret = err; 3254 return ret; 3255 } 3256 3257 static int del_balance_item(struct btrfs_fs_info *fs_info) 3258 { 3259 struct btrfs_root *root = fs_info->tree_root; 3260 struct btrfs_trans_handle *trans; 3261 struct btrfs_path *path; 3262 struct btrfs_key key; 3263 int ret, err; 3264 3265 path = btrfs_alloc_path(); 3266 if (!path) 3267 return -ENOMEM; 3268 3269 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3270 if (IS_ERR(trans)) { 3271 btrfs_free_path(path); 3272 return PTR_ERR(trans); 3273 } 3274 3275 key.objectid = BTRFS_BALANCE_OBJECTID; 3276 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3277 key.offset = 0; 3278 3279 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3280 if (ret < 0) 3281 goto out; 3282 if (ret > 0) { 3283 ret = -ENOENT; 3284 goto out; 3285 } 3286 3287 ret = btrfs_del_item(trans, root, path); 3288 out: 3289 btrfs_free_path(path); 3290 err = btrfs_commit_transaction(trans); 3291 if (err && !ret) 3292 ret = err; 3293 return ret; 3294 } 3295 3296 /* 3297 * This is a heuristic used to reduce the number of chunks balanced on 3298 * resume after balance was interrupted. 3299 */ 3300 static void update_balance_args(struct btrfs_balance_control *bctl) 3301 { 3302 /* 3303 * Turn on soft mode for chunk types that were being converted. 3304 */ 3305 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3306 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3307 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3308 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3309 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3310 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3311 3312 /* 3313 * Turn on usage filter if is not already used. The idea is 3314 * that chunks that we have already balanced should be 3315 * reasonably full. Don't do it for chunks that are being 3316 * converted - that will keep us from relocating unconverted 3317 * (albeit full) chunks. 3318 */ 3319 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3320 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3321 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3322 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3323 bctl->data.usage = 90; 3324 } 3325 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3326 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3327 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3328 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3329 bctl->sys.usage = 90; 3330 } 3331 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3332 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3333 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3334 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3335 bctl->meta.usage = 90; 3336 } 3337 } 3338 3339 /* 3340 * Clear the balance status in fs_info and delete the balance item from disk. 3341 */ 3342 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3343 { 3344 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3345 int ret; 3346 3347 BUG_ON(!fs_info->balance_ctl); 3348 3349 spin_lock(&fs_info->balance_lock); 3350 fs_info->balance_ctl = NULL; 3351 spin_unlock(&fs_info->balance_lock); 3352 3353 kfree(bctl); 3354 ret = del_balance_item(fs_info); 3355 if (ret) 3356 btrfs_handle_fs_error(fs_info, ret, NULL); 3357 } 3358 3359 /* 3360 * Balance filters. Return 1 if chunk should be filtered out 3361 * (should not be balanced). 3362 */ 3363 static int chunk_profiles_filter(u64 chunk_type, 3364 struct btrfs_balance_args *bargs) 3365 { 3366 chunk_type = chunk_to_extended(chunk_type) & 3367 BTRFS_EXTENDED_PROFILE_MASK; 3368 3369 if (bargs->profiles & chunk_type) 3370 return 0; 3371 3372 return 1; 3373 } 3374 3375 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3376 struct btrfs_balance_args *bargs) 3377 { 3378 struct btrfs_block_group *cache; 3379 u64 chunk_used; 3380 u64 user_thresh_min; 3381 u64 user_thresh_max; 3382 int ret = 1; 3383 3384 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3385 chunk_used = cache->used; 3386 3387 if (bargs->usage_min == 0) 3388 user_thresh_min = 0; 3389 else 3390 user_thresh_min = div_factor_fine(cache->length, 3391 bargs->usage_min); 3392 3393 if (bargs->usage_max == 0) 3394 user_thresh_max = 1; 3395 else if (bargs->usage_max > 100) 3396 user_thresh_max = cache->length; 3397 else 3398 user_thresh_max = div_factor_fine(cache->length, 3399 bargs->usage_max); 3400 3401 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3402 ret = 0; 3403 3404 btrfs_put_block_group(cache); 3405 return ret; 3406 } 3407 3408 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3409 u64 chunk_offset, struct btrfs_balance_args *bargs) 3410 { 3411 struct btrfs_block_group *cache; 3412 u64 chunk_used, user_thresh; 3413 int ret = 1; 3414 3415 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3416 chunk_used = cache->used; 3417 3418 if (bargs->usage_min == 0) 3419 user_thresh = 1; 3420 else if (bargs->usage > 100) 3421 user_thresh = cache->length; 3422 else 3423 user_thresh = div_factor_fine(cache->length, bargs->usage); 3424 3425 if (chunk_used < user_thresh) 3426 ret = 0; 3427 3428 btrfs_put_block_group(cache); 3429 return ret; 3430 } 3431 3432 static int chunk_devid_filter(struct extent_buffer *leaf, 3433 struct btrfs_chunk *chunk, 3434 struct btrfs_balance_args *bargs) 3435 { 3436 struct btrfs_stripe *stripe; 3437 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3438 int i; 3439 3440 for (i = 0; i < num_stripes; i++) { 3441 stripe = btrfs_stripe_nr(chunk, i); 3442 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3443 return 0; 3444 } 3445 3446 return 1; 3447 } 3448 3449 static u64 calc_data_stripes(u64 type, int num_stripes) 3450 { 3451 const int index = btrfs_bg_flags_to_raid_index(type); 3452 const int ncopies = btrfs_raid_array[index].ncopies; 3453 const int nparity = btrfs_raid_array[index].nparity; 3454 3455 if (nparity) 3456 return num_stripes - nparity; 3457 else 3458 return num_stripes / ncopies; 3459 } 3460 3461 /* [pstart, pend) */ 3462 static int chunk_drange_filter(struct extent_buffer *leaf, 3463 struct btrfs_chunk *chunk, 3464 struct btrfs_balance_args *bargs) 3465 { 3466 struct btrfs_stripe *stripe; 3467 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3468 u64 stripe_offset; 3469 u64 stripe_length; 3470 u64 type; 3471 int factor; 3472 int i; 3473 3474 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3475 return 0; 3476 3477 type = btrfs_chunk_type(leaf, chunk); 3478 factor = calc_data_stripes(type, num_stripes); 3479 3480 for (i = 0; i < num_stripes; i++) { 3481 stripe = btrfs_stripe_nr(chunk, i); 3482 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3483 continue; 3484 3485 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3486 stripe_length = btrfs_chunk_length(leaf, chunk); 3487 stripe_length = div_u64(stripe_length, factor); 3488 3489 if (stripe_offset < bargs->pend && 3490 stripe_offset + stripe_length > bargs->pstart) 3491 return 0; 3492 } 3493 3494 return 1; 3495 } 3496 3497 /* [vstart, vend) */ 3498 static int chunk_vrange_filter(struct extent_buffer *leaf, 3499 struct btrfs_chunk *chunk, 3500 u64 chunk_offset, 3501 struct btrfs_balance_args *bargs) 3502 { 3503 if (chunk_offset < bargs->vend && 3504 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3505 /* at least part of the chunk is inside this vrange */ 3506 return 0; 3507 3508 return 1; 3509 } 3510 3511 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3512 struct btrfs_chunk *chunk, 3513 struct btrfs_balance_args *bargs) 3514 { 3515 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3516 3517 if (bargs->stripes_min <= num_stripes 3518 && num_stripes <= bargs->stripes_max) 3519 return 0; 3520 3521 return 1; 3522 } 3523 3524 static int chunk_soft_convert_filter(u64 chunk_type, 3525 struct btrfs_balance_args *bargs) 3526 { 3527 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3528 return 0; 3529 3530 chunk_type = chunk_to_extended(chunk_type) & 3531 BTRFS_EXTENDED_PROFILE_MASK; 3532 3533 if (bargs->target == chunk_type) 3534 return 1; 3535 3536 return 0; 3537 } 3538 3539 static int should_balance_chunk(struct extent_buffer *leaf, 3540 struct btrfs_chunk *chunk, u64 chunk_offset) 3541 { 3542 struct btrfs_fs_info *fs_info = leaf->fs_info; 3543 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3544 struct btrfs_balance_args *bargs = NULL; 3545 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3546 3547 /* type filter */ 3548 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3549 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3550 return 0; 3551 } 3552 3553 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3554 bargs = &bctl->data; 3555 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3556 bargs = &bctl->sys; 3557 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3558 bargs = &bctl->meta; 3559 3560 /* profiles filter */ 3561 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3562 chunk_profiles_filter(chunk_type, bargs)) { 3563 return 0; 3564 } 3565 3566 /* usage filter */ 3567 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3568 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3569 return 0; 3570 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3571 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3572 return 0; 3573 } 3574 3575 /* devid filter */ 3576 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3577 chunk_devid_filter(leaf, chunk, bargs)) { 3578 return 0; 3579 } 3580 3581 /* drange filter, makes sense only with devid filter */ 3582 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3583 chunk_drange_filter(leaf, chunk, bargs)) { 3584 return 0; 3585 } 3586 3587 /* vrange filter */ 3588 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3589 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3590 return 0; 3591 } 3592 3593 /* stripes filter */ 3594 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3595 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3596 return 0; 3597 } 3598 3599 /* soft profile changing mode */ 3600 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3601 chunk_soft_convert_filter(chunk_type, bargs)) { 3602 return 0; 3603 } 3604 3605 /* 3606 * limited by count, must be the last filter 3607 */ 3608 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3609 if (bargs->limit == 0) 3610 return 0; 3611 else 3612 bargs->limit--; 3613 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3614 /* 3615 * Same logic as the 'limit' filter; the minimum cannot be 3616 * determined here because we do not have the global information 3617 * about the count of all chunks that satisfy the filters. 3618 */ 3619 if (bargs->limit_max == 0) 3620 return 0; 3621 else 3622 bargs->limit_max--; 3623 } 3624 3625 return 1; 3626 } 3627 3628 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3629 { 3630 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3631 struct btrfs_root *chunk_root = fs_info->chunk_root; 3632 u64 chunk_type; 3633 struct btrfs_chunk *chunk; 3634 struct btrfs_path *path = NULL; 3635 struct btrfs_key key; 3636 struct btrfs_key found_key; 3637 struct extent_buffer *leaf; 3638 int slot; 3639 int ret; 3640 int enospc_errors = 0; 3641 bool counting = true; 3642 /* The single value limit and min/max limits use the same bytes in the */ 3643 u64 limit_data = bctl->data.limit; 3644 u64 limit_meta = bctl->meta.limit; 3645 u64 limit_sys = bctl->sys.limit; 3646 u32 count_data = 0; 3647 u32 count_meta = 0; 3648 u32 count_sys = 0; 3649 int chunk_reserved = 0; 3650 3651 path = btrfs_alloc_path(); 3652 if (!path) { 3653 ret = -ENOMEM; 3654 goto error; 3655 } 3656 3657 /* zero out stat counters */ 3658 spin_lock(&fs_info->balance_lock); 3659 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3660 spin_unlock(&fs_info->balance_lock); 3661 again: 3662 if (!counting) { 3663 /* 3664 * The single value limit and min/max limits use the same bytes 3665 * in the 3666 */ 3667 bctl->data.limit = limit_data; 3668 bctl->meta.limit = limit_meta; 3669 bctl->sys.limit = limit_sys; 3670 } 3671 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3672 key.offset = (u64)-1; 3673 key.type = BTRFS_CHUNK_ITEM_KEY; 3674 3675 while (1) { 3676 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3677 atomic_read(&fs_info->balance_cancel_req)) { 3678 ret = -ECANCELED; 3679 goto error; 3680 } 3681 3682 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3683 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3684 if (ret < 0) { 3685 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3686 goto error; 3687 } 3688 3689 /* 3690 * this shouldn't happen, it means the last relocate 3691 * failed 3692 */ 3693 if (ret == 0) 3694 BUG(); /* FIXME break ? */ 3695 3696 ret = btrfs_previous_item(chunk_root, path, 0, 3697 BTRFS_CHUNK_ITEM_KEY); 3698 if (ret) { 3699 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3700 ret = 0; 3701 break; 3702 } 3703 3704 leaf = path->nodes[0]; 3705 slot = path->slots[0]; 3706 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3707 3708 if (found_key.objectid != key.objectid) { 3709 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3710 break; 3711 } 3712 3713 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3714 chunk_type = btrfs_chunk_type(leaf, chunk); 3715 3716 if (!counting) { 3717 spin_lock(&fs_info->balance_lock); 3718 bctl->stat.considered++; 3719 spin_unlock(&fs_info->balance_lock); 3720 } 3721 3722 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3723 3724 btrfs_release_path(path); 3725 if (!ret) { 3726 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3727 goto loop; 3728 } 3729 3730 if (counting) { 3731 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3732 spin_lock(&fs_info->balance_lock); 3733 bctl->stat.expected++; 3734 spin_unlock(&fs_info->balance_lock); 3735 3736 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3737 count_data++; 3738 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3739 count_sys++; 3740 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3741 count_meta++; 3742 3743 goto loop; 3744 } 3745 3746 /* 3747 * Apply limit_min filter, no need to check if the LIMITS 3748 * filter is used, limit_min is 0 by default 3749 */ 3750 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3751 count_data < bctl->data.limit_min) 3752 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3753 count_meta < bctl->meta.limit_min) 3754 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3755 count_sys < bctl->sys.limit_min)) { 3756 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3757 goto loop; 3758 } 3759 3760 if (!chunk_reserved) { 3761 /* 3762 * We may be relocating the only data chunk we have, 3763 * which could potentially end up with losing data's 3764 * raid profile, so lets allocate an empty one in 3765 * advance. 3766 */ 3767 ret = btrfs_may_alloc_data_chunk(fs_info, 3768 found_key.offset); 3769 if (ret < 0) { 3770 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3771 goto error; 3772 } else if (ret == 1) { 3773 chunk_reserved = 1; 3774 } 3775 } 3776 3777 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3778 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3779 if (ret == -ENOSPC) { 3780 enospc_errors++; 3781 } else if (ret == -ETXTBSY) { 3782 btrfs_info(fs_info, 3783 "skipping relocation of block group %llu due to active swapfile", 3784 found_key.offset); 3785 ret = 0; 3786 } else if (ret) { 3787 goto error; 3788 } else { 3789 spin_lock(&fs_info->balance_lock); 3790 bctl->stat.completed++; 3791 spin_unlock(&fs_info->balance_lock); 3792 } 3793 loop: 3794 if (found_key.offset == 0) 3795 break; 3796 key.offset = found_key.offset - 1; 3797 } 3798 3799 if (counting) { 3800 btrfs_release_path(path); 3801 counting = false; 3802 goto again; 3803 } 3804 error: 3805 btrfs_free_path(path); 3806 if (enospc_errors) { 3807 btrfs_info(fs_info, "%d enospc errors during balance", 3808 enospc_errors); 3809 if (!ret) 3810 ret = -ENOSPC; 3811 } 3812 3813 return ret; 3814 } 3815 3816 /** 3817 * alloc_profile_is_valid - see if a given profile is valid and reduced 3818 * @flags: profile to validate 3819 * @extended: if true @flags is treated as an extended profile 3820 */ 3821 static int alloc_profile_is_valid(u64 flags, int extended) 3822 { 3823 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3824 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3825 3826 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3827 3828 /* 1) check that all other bits are zeroed */ 3829 if (flags & ~mask) 3830 return 0; 3831 3832 /* 2) see if profile is reduced */ 3833 if (flags == 0) 3834 return !extended; /* "0" is valid for usual profiles */ 3835 3836 return has_single_bit_set(flags); 3837 } 3838 3839 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3840 { 3841 /* cancel requested || normal exit path */ 3842 return atomic_read(&fs_info->balance_cancel_req) || 3843 (atomic_read(&fs_info->balance_pause_req) == 0 && 3844 atomic_read(&fs_info->balance_cancel_req) == 0); 3845 } 3846 3847 /* 3848 * Validate target profile against allowed profiles and return true if it's OK. 3849 * Otherwise print the error message and return false. 3850 */ 3851 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 3852 const struct btrfs_balance_args *bargs, 3853 u64 allowed, const char *type) 3854 { 3855 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3856 return true; 3857 3858 /* Profile is valid and does not have bits outside of the allowed set */ 3859 if (alloc_profile_is_valid(bargs->target, 1) && 3860 (bargs->target & ~allowed) == 0) 3861 return true; 3862 3863 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 3864 type, btrfs_bg_type_to_raid_name(bargs->target)); 3865 return false; 3866 } 3867 3868 /* 3869 * Fill @buf with textual description of balance filter flags @bargs, up to 3870 * @size_buf including the terminating null. The output may be trimmed if it 3871 * does not fit into the provided buffer. 3872 */ 3873 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 3874 u32 size_buf) 3875 { 3876 int ret; 3877 u32 size_bp = size_buf; 3878 char *bp = buf; 3879 u64 flags = bargs->flags; 3880 char tmp_buf[128] = {'\0'}; 3881 3882 if (!flags) 3883 return; 3884 3885 #define CHECK_APPEND_NOARG(a) \ 3886 do { \ 3887 ret = snprintf(bp, size_bp, (a)); \ 3888 if (ret < 0 || ret >= size_bp) \ 3889 goto out_overflow; \ 3890 size_bp -= ret; \ 3891 bp += ret; \ 3892 } while (0) 3893 3894 #define CHECK_APPEND_1ARG(a, v1) \ 3895 do { \ 3896 ret = snprintf(bp, size_bp, (a), (v1)); \ 3897 if (ret < 0 || ret >= size_bp) \ 3898 goto out_overflow; \ 3899 size_bp -= ret; \ 3900 bp += ret; \ 3901 } while (0) 3902 3903 #define CHECK_APPEND_2ARG(a, v1, v2) \ 3904 do { \ 3905 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 3906 if (ret < 0 || ret >= size_bp) \ 3907 goto out_overflow; \ 3908 size_bp -= ret; \ 3909 bp += ret; \ 3910 } while (0) 3911 3912 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 3913 CHECK_APPEND_1ARG("convert=%s,", 3914 btrfs_bg_type_to_raid_name(bargs->target)); 3915 3916 if (flags & BTRFS_BALANCE_ARGS_SOFT) 3917 CHECK_APPEND_NOARG("soft,"); 3918 3919 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 3920 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 3921 sizeof(tmp_buf)); 3922 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 3923 } 3924 3925 if (flags & BTRFS_BALANCE_ARGS_USAGE) 3926 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 3927 3928 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 3929 CHECK_APPEND_2ARG("usage=%u..%u,", 3930 bargs->usage_min, bargs->usage_max); 3931 3932 if (flags & BTRFS_BALANCE_ARGS_DEVID) 3933 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 3934 3935 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 3936 CHECK_APPEND_2ARG("drange=%llu..%llu,", 3937 bargs->pstart, bargs->pend); 3938 3939 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 3940 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 3941 bargs->vstart, bargs->vend); 3942 3943 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 3944 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 3945 3946 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 3947 CHECK_APPEND_2ARG("limit=%u..%u,", 3948 bargs->limit_min, bargs->limit_max); 3949 3950 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 3951 CHECK_APPEND_2ARG("stripes=%u..%u,", 3952 bargs->stripes_min, bargs->stripes_max); 3953 3954 #undef CHECK_APPEND_2ARG 3955 #undef CHECK_APPEND_1ARG 3956 #undef CHECK_APPEND_NOARG 3957 3958 out_overflow: 3959 3960 if (size_bp < size_buf) 3961 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 3962 else 3963 buf[0] = '\0'; 3964 } 3965 3966 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 3967 { 3968 u32 size_buf = 1024; 3969 char tmp_buf[192] = {'\0'}; 3970 char *buf; 3971 char *bp; 3972 u32 size_bp = size_buf; 3973 int ret; 3974 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3975 3976 buf = kzalloc(size_buf, GFP_KERNEL); 3977 if (!buf) 3978 return; 3979 3980 bp = buf; 3981 3982 #define CHECK_APPEND_1ARG(a, v1) \ 3983 do { \ 3984 ret = snprintf(bp, size_bp, (a), (v1)); \ 3985 if (ret < 0 || ret >= size_bp) \ 3986 goto out_overflow; \ 3987 size_bp -= ret; \ 3988 bp += ret; \ 3989 } while (0) 3990 3991 if (bctl->flags & BTRFS_BALANCE_FORCE) 3992 CHECK_APPEND_1ARG("%s", "-f "); 3993 3994 if (bctl->flags & BTRFS_BALANCE_DATA) { 3995 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 3996 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 3997 } 3998 3999 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4000 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4001 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4002 } 4003 4004 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4005 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4006 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4007 } 4008 4009 #undef CHECK_APPEND_1ARG 4010 4011 out_overflow: 4012 4013 if (size_bp < size_buf) 4014 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4015 btrfs_info(fs_info, "balance: %s %s", 4016 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4017 "resume" : "start", buf); 4018 4019 kfree(buf); 4020 } 4021 4022 /* 4023 * Should be called with balance mutexe held 4024 */ 4025 int btrfs_balance(struct btrfs_fs_info *fs_info, 4026 struct btrfs_balance_control *bctl, 4027 struct btrfs_ioctl_balance_args *bargs) 4028 { 4029 u64 meta_target, data_target; 4030 u64 allowed; 4031 int mixed = 0; 4032 int ret; 4033 u64 num_devices; 4034 unsigned seq; 4035 bool reducing_redundancy; 4036 int i; 4037 4038 if (btrfs_fs_closing(fs_info) || 4039 atomic_read(&fs_info->balance_pause_req) || 4040 btrfs_should_cancel_balance(fs_info)) { 4041 ret = -EINVAL; 4042 goto out; 4043 } 4044 4045 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4046 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4047 mixed = 1; 4048 4049 /* 4050 * In case of mixed groups both data and meta should be picked, 4051 * and identical options should be given for both of them. 4052 */ 4053 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4054 if (mixed && (bctl->flags & allowed)) { 4055 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4056 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4057 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4058 btrfs_err(fs_info, 4059 "balance: mixed groups data and metadata options must be the same"); 4060 ret = -EINVAL; 4061 goto out; 4062 } 4063 } 4064 4065 /* 4066 * rw_devices will not change at the moment, device add/delete/replace 4067 * are exclusive 4068 */ 4069 num_devices = fs_info->fs_devices->rw_devices; 4070 4071 /* 4072 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4073 * special bit for it, to make it easier to distinguish. Thus we need 4074 * to set it manually, or balance would refuse the profile. 4075 */ 4076 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4077 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4078 if (num_devices >= btrfs_raid_array[i].devs_min) 4079 allowed |= btrfs_raid_array[i].bg_flag; 4080 4081 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4082 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4083 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4084 ret = -EINVAL; 4085 goto out; 4086 } 4087 4088 /* 4089 * Allow to reduce metadata or system integrity only if force set for 4090 * profiles with redundancy (copies, parity) 4091 */ 4092 allowed = 0; 4093 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4094 if (btrfs_raid_array[i].ncopies >= 2 || 4095 btrfs_raid_array[i].tolerated_failures >= 1) 4096 allowed |= btrfs_raid_array[i].bg_flag; 4097 } 4098 do { 4099 seq = read_seqbegin(&fs_info->profiles_lock); 4100 4101 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4102 (fs_info->avail_system_alloc_bits & allowed) && 4103 !(bctl->sys.target & allowed)) || 4104 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4105 (fs_info->avail_metadata_alloc_bits & allowed) && 4106 !(bctl->meta.target & allowed))) 4107 reducing_redundancy = true; 4108 else 4109 reducing_redundancy = false; 4110 4111 /* if we're not converting, the target field is uninitialized */ 4112 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4113 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4114 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4115 bctl->data.target : fs_info->avail_data_alloc_bits; 4116 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4117 4118 if (reducing_redundancy) { 4119 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4120 btrfs_info(fs_info, 4121 "balance: force reducing metadata redundancy"); 4122 } else { 4123 btrfs_err(fs_info, 4124 "balance: reduces metadata redundancy, use --force if you want this"); 4125 ret = -EINVAL; 4126 goto out; 4127 } 4128 } 4129 4130 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4131 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4132 btrfs_warn(fs_info, 4133 "balance: metadata profile %s has lower redundancy than data profile %s", 4134 btrfs_bg_type_to_raid_name(meta_target), 4135 btrfs_bg_type_to_raid_name(data_target)); 4136 } 4137 4138 if (fs_info->send_in_progress) { 4139 btrfs_warn_rl(fs_info, 4140 "cannot run balance while send operations are in progress (%d in progress)", 4141 fs_info->send_in_progress); 4142 ret = -EAGAIN; 4143 goto out; 4144 } 4145 4146 ret = insert_balance_item(fs_info, bctl); 4147 if (ret && ret != -EEXIST) 4148 goto out; 4149 4150 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4151 BUG_ON(ret == -EEXIST); 4152 BUG_ON(fs_info->balance_ctl); 4153 spin_lock(&fs_info->balance_lock); 4154 fs_info->balance_ctl = bctl; 4155 spin_unlock(&fs_info->balance_lock); 4156 } else { 4157 BUG_ON(ret != -EEXIST); 4158 spin_lock(&fs_info->balance_lock); 4159 update_balance_args(bctl); 4160 spin_unlock(&fs_info->balance_lock); 4161 } 4162 4163 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4164 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4165 describe_balance_start_or_resume(fs_info); 4166 mutex_unlock(&fs_info->balance_mutex); 4167 4168 ret = __btrfs_balance(fs_info); 4169 4170 mutex_lock(&fs_info->balance_mutex); 4171 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4172 btrfs_info(fs_info, "balance: paused"); 4173 /* 4174 * Balance can be canceled by: 4175 * 4176 * - Regular cancel request 4177 * Then ret == -ECANCELED and balance_cancel_req > 0 4178 * 4179 * - Fatal signal to "btrfs" process 4180 * Either the signal caught by wait_reserve_ticket() and callers 4181 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4182 * got -ECANCELED. 4183 * Either way, in this case balance_cancel_req = 0, and 4184 * ret == -EINTR or ret == -ECANCELED. 4185 * 4186 * So here we only check the return value to catch canceled balance. 4187 */ 4188 else if (ret == -ECANCELED || ret == -EINTR) 4189 btrfs_info(fs_info, "balance: canceled"); 4190 else 4191 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4192 4193 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4194 4195 if (bargs) { 4196 memset(bargs, 0, sizeof(*bargs)); 4197 btrfs_update_ioctl_balance_args(fs_info, bargs); 4198 } 4199 4200 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4201 balance_need_close(fs_info)) { 4202 reset_balance_state(fs_info); 4203 btrfs_exclop_finish(fs_info); 4204 } 4205 4206 wake_up(&fs_info->balance_wait_q); 4207 4208 return ret; 4209 out: 4210 if (bctl->flags & BTRFS_BALANCE_RESUME) 4211 reset_balance_state(fs_info); 4212 else 4213 kfree(bctl); 4214 btrfs_exclop_finish(fs_info); 4215 4216 return ret; 4217 } 4218 4219 static int balance_kthread(void *data) 4220 { 4221 struct btrfs_fs_info *fs_info = data; 4222 int ret = 0; 4223 4224 mutex_lock(&fs_info->balance_mutex); 4225 if (fs_info->balance_ctl) 4226 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4227 mutex_unlock(&fs_info->balance_mutex); 4228 4229 return ret; 4230 } 4231 4232 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4233 { 4234 struct task_struct *tsk; 4235 4236 mutex_lock(&fs_info->balance_mutex); 4237 if (!fs_info->balance_ctl) { 4238 mutex_unlock(&fs_info->balance_mutex); 4239 return 0; 4240 } 4241 mutex_unlock(&fs_info->balance_mutex); 4242 4243 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4244 btrfs_info(fs_info, "balance: resume skipped"); 4245 return 0; 4246 } 4247 4248 /* 4249 * A ro->rw remount sequence should continue with the paused balance 4250 * regardless of who pauses it, system or the user as of now, so set 4251 * the resume flag. 4252 */ 4253 spin_lock(&fs_info->balance_lock); 4254 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4255 spin_unlock(&fs_info->balance_lock); 4256 4257 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4258 return PTR_ERR_OR_ZERO(tsk); 4259 } 4260 4261 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4262 { 4263 struct btrfs_balance_control *bctl; 4264 struct btrfs_balance_item *item; 4265 struct btrfs_disk_balance_args disk_bargs; 4266 struct btrfs_path *path; 4267 struct extent_buffer *leaf; 4268 struct btrfs_key key; 4269 int ret; 4270 4271 path = btrfs_alloc_path(); 4272 if (!path) 4273 return -ENOMEM; 4274 4275 key.objectid = BTRFS_BALANCE_OBJECTID; 4276 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4277 key.offset = 0; 4278 4279 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4280 if (ret < 0) 4281 goto out; 4282 if (ret > 0) { /* ret = -ENOENT; */ 4283 ret = 0; 4284 goto out; 4285 } 4286 4287 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4288 if (!bctl) { 4289 ret = -ENOMEM; 4290 goto out; 4291 } 4292 4293 leaf = path->nodes[0]; 4294 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4295 4296 bctl->flags = btrfs_balance_flags(leaf, item); 4297 bctl->flags |= BTRFS_BALANCE_RESUME; 4298 4299 btrfs_balance_data(leaf, item, &disk_bargs); 4300 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4301 btrfs_balance_meta(leaf, item, &disk_bargs); 4302 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4303 btrfs_balance_sys(leaf, item, &disk_bargs); 4304 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4305 4306 /* 4307 * This should never happen, as the paused balance state is recovered 4308 * during mount without any chance of other exclusive ops to collide. 4309 * 4310 * This gives the exclusive op status to balance and keeps in paused 4311 * state until user intervention (cancel or umount). If the ownership 4312 * cannot be assigned, show a message but do not fail. The balance 4313 * is in a paused state and must have fs_info::balance_ctl properly 4314 * set up. 4315 */ 4316 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 4317 btrfs_warn(fs_info, 4318 "balance: cannot set exclusive op status, resume manually"); 4319 4320 mutex_lock(&fs_info->balance_mutex); 4321 BUG_ON(fs_info->balance_ctl); 4322 spin_lock(&fs_info->balance_lock); 4323 fs_info->balance_ctl = bctl; 4324 spin_unlock(&fs_info->balance_lock); 4325 mutex_unlock(&fs_info->balance_mutex); 4326 out: 4327 btrfs_free_path(path); 4328 return ret; 4329 } 4330 4331 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4332 { 4333 int ret = 0; 4334 4335 mutex_lock(&fs_info->balance_mutex); 4336 if (!fs_info->balance_ctl) { 4337 mutex_unlock(&fs_info->balance_mutex); 4338 return -ENOTCONN; 4339 } 4340 4341 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4342 atomic_inc(&fs_info->balance_pause_req); 4343 mutex_unlock(&fs_info->balance_mutex); 4344 4345 wait_event(fs_info->balance_wait_q, 4346 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4347 4348 mutex_lock(&fs_info->balance_mutex); 4349 /* we are good with balance_ctl ripped off from under us */ 4350 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4351 atomic_dec(&fs_info->balance_pause_req); 4352 } else { 4353 ret = -ENOTCONN; 4354 } 4355 4356 mutex_unlock(&fs_info->balance_mutex); 4357 return ret; 4358 } 4359 4360 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4361 { 4362 mutex_lock(&fs_info->balance_mutex); 4363 if (!fs_info->balance_ctl) { 4364 mutex_unlock(&fs_info->balance_mutex); 4365 return -ENOTCONN; 4366 } 4367 4368 /* 4369 * A paused balance with the item stored on disk can be resumed at 4370 * mount time if the mount is read-write. Otherwise it's still paused 4371 * and we must not allow cancelling as it deletes the item. 4372 */ 4373 if (sb_rdonly(fs_info->sb)) { 4374 mutex_unlock(&fs_info->balance_mutex); 4375 return -EROFS; 4376 } 4377 4378 atomic_inc(&fs_info->balance_cancel_req); 4379 /* 4380 * if we are running just wait and return, balance item is 4381 * deleted in btrfs_balance in this case 4382 */ 4383 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4384 mutex_unlock(&fs_info->balance_mutex); 4385 wait_event(fs_info->balance_wait_q, 4386 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4387 mutex_lock(&fs_info->balance_mutex); 4388 } else { 4389 mutex_unlock(&fs_info->balance_mutex); 4390 /* 4391 * Lock released to allow other waiters to continue, we'll 4392 * reexamine the status again. 4393 */ 4394 mutex_lock(&fs_info->balance_mutex); 4395 4396 if (fs_info->balance_ctl) { 4397 reset_balance_state(fs_info); 4398 btrfs_exclop_finish(fs_info); 4399 btrfs_info(fs_info, "balance: canceled"); 4400 } 4401 } 4402 4403 BUG_ON(fs_info->balance_ctl || 4404 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4405 atomic_dec(&fs_info->balance_cancel_req); 4406 mutex_unlock(&fs_info->balance_mutex); 4407 return 0; 4408 } 4409 4410 int btrfs_uuid_scan_kthread(void *data) 4411 { 4412 struct btrfs_fs_info *fs_info = data; 4413 struct btrfs_root *root = fs_info->tree_root; 4414 struct btrfs_key key; 4415 struct btrfs_path *path = NULL; 4416 int ret = 0; 4417 struct extent_buffer *eb; 4418 int slot; 4419 struct btrfs_root_item root_item; 4420 u32 item_size; 4421 struct btrfs_trans_handle *trans = NULL; 4422 bool closing = false; 4423 4424 path = btrfs_alloc_path(); 4425 if (!path) { 4426 ret = -ENOMEM; 4427 goto out; 4428 } 4429 4430 key.objectid = 0; 4431 key.type = BTRFS_ROOT_ITEM_KEY; 4432 key.offset = 0; 4433 4434 while (1) { 4435 if (btrfs_fs_closing(fs_info)) { 4436 closing = true; 4437 break; 4438 } 4439 ret = btrfs_search_forward(root, &key, path, 4440 BTRFS_OLDEST_GENERATION); 4441 if (ret) { 4442 if (ret > 0) 4443 ret = 0; 4444 break; 4445 } 4446 4447 if (key.type != BTRFS_ROOT_ITEM_KEY || 4448 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4449 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4450 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4451 goto skip; 4452 4453 eb = path->nodes[0]; 4454 slot = path->slots[0]; 4455 item_size = btrfs_item_size_nr(eb, slot); 4456 if (item_size < sizeof(root_item)) 4457 goto skip; 4458 4459 read_extent_buffer(eb, &root_item, 4460 btrfs_item_ptr_offset(eb, slot), 4461 (int)sizeof(root_item)); 4462 if (btrfs_root_refs(&root_item) == 0) 4463 goto skip; 4464 4465 if (!btrfs_is_empty_uuid(root_item.uuid) || 4466 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4467 if (trans) 4468 goto update_tree; 4469 4470 btrfs_release_path(path); 4471 /* 4472 * 1 - subvol uuid item 4473 * 1 - received_subvol uuid item 4474 */ 4475 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4476 if (IS_ERR(trans)) { 4477 ret = PTR_ERR(trans); 4478 break; 4479 } 4480 continue; 4481 } else { 4482 goto skip; 4483 } 4484 update_tree: 4485 btrfs_release_path(path); 4486 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4487 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4488 BTRFS_UUID_KEY_SUBVOL, 4489 key.objectid); 4490 if (ret < 0) { 4491 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4492 ret); 4493 break; 4494 } 4495 } 4496 4497 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4498 ret = btrfs_uuid_tree_add(trans, 4499 root_item.received_uuid, 4500 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4501 key.objectid); 4502 if (ret < 0) { 4503 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4504 ret); 4505 break; 4506 } 4507 } 4508 4509 skip: 4510 btrfs_release_path(path); 4511 if (trans) { 4512 ret = btrfs_end_transaction(trans); 4513 trans = NULL; 4514 if (ret) 4515 break; 4516 } 4517 4518 if (key.offset < (u64)-1) { 4519 key.offset++; 4520 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4521 key.offset = 0; 4522 key.type = BTRFS_ROOT_ITEM_KEY; 4523 } else if (key.objectid < (u64)-1) { 4524 key.offset = 0; 4525 key.type = BTRFS_ROOT_ITEM_KEY; 4526 key.objectid++; 4527 } else { 4528 break; 4529 } 4530 cond_resched(); 4531 } 4532 4533 out: 4534 btrfs_free_path(path); 4535 if (trans && !IS_ERR(trans)) 4536 btrfs_end_transaction(trans); 4537 if (ret) 4538 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4539 else if (!closing) 4540 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4541 up(&fs_info->uuid_tree_rescan_sem); 4542 return 0; 4543 } 4544 4545 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4546 { 4547 struct btrfs_trans_handle *trans; 4548 struct btrfs_root *tree_root = fs_info->tree_root; 4549 struct btrfs_root *uuid_root; 4550 struct task_struct *task; 4551 int ret; 4552 4553 /* 4554 * 1 - root node 4555 * 1 - root item 4556 */ 4557 trans = btrfs_start_transaction(tree_root, 2); 4558 if (IS_ERR(trans)) 4559 return PTR_ERR(trans); 4560 4561 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4562 if (IS_ERR(uuid_root)) { 4563 ret = PTR_ERR(uuid_root); 4564 btrfs_abort_transaction(trans, ret); 4565 btrfs_end_transaction(trans); 4566 return ret; 4567 } 4568 4569 fs_info->uuid_root = uuid_root; 4570 4571 ret = btrfs_commit_transaction(trans); 4572 if (ret) 4573 return ret; 4574 4575 down(&fs_info->uuid_tree_rescan_sem); 4576 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4577 if (IS_ERR(task)) { 4578 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4579 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4580 up(&fs_info->uuid_tree_rescan_sem); 4581 return PTR_ERR(task); 4582 } 4583 4584 return 0; 4585 } 4586 4587 /* 4588 * shrinking a device means finding all of the device extents past 4589 * the new size, and then following the back refs to the chunks. 4590 * The chunk relocation code actually frees the device extent 4591 */ 4592 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4593 { 4594 struct btrfs_fs_info *fs_info = device->fs_info; 4595 struct btrfs_root *root = fs_info->dev_root; 4596 struct btrfs_trans_handle *trans; 4597 struct btrfs_dev_extent *dev_extent = NULL; 4598 struct btrfs_path *path; 4599 u64 length; 4600 u64 chunk_offset; 4601 int ret; 4602 int slot; 4603 int failed = 0; 4604 bool retried = false; 4605 struct extent_buffer *l; 4606 struct btrfs_key key; 4607 struct btrfs_super_block *super_copy = fs_info->super_copy; 4608 u64 old_total = btrfs_super_total_bytes(super_copy); 4609 u64 old_size = btrfs_device_get_total_bytes(device); 4610 u64 diff; 4611 u64 start; 4612 4613 new_size = round_down(new_size, fs_info->sectorsize); 4614 start = new_size; 4615 diff = round_down(old_size - new_size, fs_info->sectorsize); 4616 4617 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4618 return -EINVAL; 4619 4620 path = btrfs_alloc_path(); 4621 if (!path) 4622 return -ENOMEM; 4623 4624 path->reada = READA_BACK; 4625 4626 trans = btrfs_start_transaction(root, 0); 4627 if (IS_ERR(trans)) { 4628 btrfs_free_path(path); 4629 return PTR_ERR(trans); 4630 } 4631 4632 mutex_lock(&fs_info->chunk_mutex); 4633 4634 btrfs_device_set_total_bytes(device, new_size); 4635 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4636 device->fs_devices->total_rw_bytes -= diff; 4637 atomic64_sub(diff, &fs_info->free_chunk_space); 4638 } 4639 4640 /* 4641 * Once the device's size has been set to the new size, ensure all 4642 * in-memory chunks are synced to disk so that the loop below sees them 4643 * and relocates them accordingly. 4644 */ 4645 if (contains_pending_extent(device, &start, diff)) { 4646 mutex_unlock(&fs_info->chunk_mutex); 4647 ret = btrfs_commit_transaction(trans); 4648 if (ret) 4649 goto done; 4650 } else { 4651 mutex_unlock(&fs_info->chunk_mutex); 4652 btrfs_end_transaction(trans); 4653 } 4654 4655 again: 4656 key.objectid = device->devid; 4657 key.offset = (u64)-1; 4658 key.type = BTRFS_DEV_EXTENT_KEY; 4659 4660 do { 4661 mutex_lock(&fs_info->delete_unused_bgs_mutex); 4662 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4663 if (ret < 0) { 4664 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4665 goto done; 4666 } 4667 4668 ret = btrfs_previous_item(root, path, 0, key.type); 4669 if (ret) 4670 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4671 if (ret < 0) 4672 goto done; 4673 if (ret) { 4674 ret = 0; 4675 btrfs_release_path(path); 4676 break; 4677 } 4678 4679 l = path->nodes[0]; 4680 slot = path->slots[0]; 4681 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4682 4683 if (key.objectid != device->devid) { 4684 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4685 btrfs_release_path(path); 4686 break; 4687 } 4688 4689 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4690 length = btrfs_dev_extent_length(l, dev_extent); 4691 4692 if (key.offset + length <= new_size) { 4693 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4694 btrfs_release_path(path); 4695 break; 4696 } 4697 4698 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4699 btrfs_release_path(path); 4700 4701 /* 4702 * We may be relocating the only data chunk we have, 4703 * which could potentially end up with losing data's 4704 * raid profile, so lets allocate an empty one in 4705 * advance. 4706 */ 4707 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4708 if (ret < 0) { 4709 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4710 goto done; 4711 } 4712 4713 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4714 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4715 if (ret == -ENOSPC) { 4716 failed++; 4717 } else if (ret) { 4718 if (ret == -ETXTBSY) { 4719 btrfs_warn(fs_info, 4720 "could not shrink block group %llu due to active swapfile", 4721 chunk_offset); 4722 } 4723 goto done; 4724 } 4725 } while (key.offset-- > 0); 4726 4727 if (failed && !retried) { 4728 failed = 0; 4729 retried = true; 4730 goto again; 4731 } else if (failed && retried) { 4732 ret = -ENOSPC; 4733 goto done; 4734 } 4735 4736 /* Shrinking succeeded, else we would be at "done". */ 4737 trans = btrfs_start_transaction(root, 0); 4738 if (IS_ERR(trans)) { 4739 ret = PTR_ERR(trans); 4740 goto done; 4741 } 4742 4743 mutex_lock(&fs_info->chunk_mutex); 4744 /* Clear all state bits beyond the shrunk device size */ 4745 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4746 CHUNK_STATE_MASK); 4747 4748 btrfs_device_set_disk_total_bytes(device, new_size); 4749 if (list_empty(&device->post_commit_list)) 4750 list_add_tail(&device->post_commit_list, 4751 &trans->transaction->dev_update_list); 4752 4753 WARN_ON(diff > old_total); 4754 btrfs_set_super_total_bytes(super_copy, 4755 round_down(old_total - diff, fs_info->sectorsize)); 4756 mutex_unlock(&fs_info->chunk_mutex); 4757 4758 /* Now btrfs_update_device() will change the on-disk size. */ 4759 ret = btrfs_update_device(trans, device); 4760 if (ret < 0) { 4761 btrfs_abort_transaction(trans, ret); 4762 btrfs_end_transaction(trans); 4763 } else { 4764 ret = btrfs_commit_transaction(trans); 4765 } 4766 done: 4767 btrfs_free_path(path); 4768 if (ret) { 4769 mutex_lock(&fs_info->chunk_mutex); 4770 btrfs_device_set_total_bytes(device, old_size); 4771 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4772 device->fs_devices->total_rw_bytes += diff; 4773 atomic64_add(diff, &fs_info->free_chunk_space); 4774 mutex_unlock(&fs_info->chunk_mutex); 4775 } 4776 return ret; 4777 } 4778 4779 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4780 struct btrfs_key *key, 4781 struct btrfs_chunk *chunk, int item_size) 4782 { 4783 struct btrfs_super_block *super_copy = fs_info->super_copy; 4784 struct btrfs_disk_key disk_key; 4785 u32 array_size; 4786 u8 *ptr; 4787 4788 mutex_lock(&fs_info->chunk_mutex); 4789 array_size = btrfs_super_sys_array_size(super_copy); 4790 if (array_size + item_size + sizeof(disk_key) 4791 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4792 mutex_unlock(&fs_info->chunk_mutex); 4793 return -EFBIG; 4794 } 4795 4796 ptr = super_copy->sys_chunk_array + array_size; 4797 btrfs_cpu_key_to_disk(&disk_key, key); 4798 memcpy(ptr, &disk_key, sizeof(disk_key)); 4799 ptr += sizeof(disk_key); 4800 memcpy(ptr, chunk, item_size); 4801 item_size += sizeof(disk_key); 4802 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4803 mutex_unlock(&fs_info->chunk_mutex); 4804 4805 return 0; 4806 } 4807 4808 /* 4809 * sort the devices in descending order by max_avail, total_avail 4810 */ 4811 static int btrfs_cmp_device_info(const void *a, const void *b) 4812 { 4813 const struct btrfs_device_info *di_a = a; 4814 const struct btrfs_device_info *di_b = b; 4815 4816 if (di_a->max_avail > di_b->max_avail) 4817 return -1; 4818 if (di_a->max_avail < di_b->max_avail) 4819 return 1; 4820 if (di_a->total_avail > di_b->total_avail) 4821 return -1; 4822 if (di_a->total_avail < di_b->total_avail) 4823 return 1; 4824 return 0; 4825 } 4826 4827 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4828 { 4829 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4830 return; 4831 4832 btrfs_set_fs_incompat(info, RAID56); 4833 } 4834 4835 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 4836 { 4837 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 4838 return; 4839 4840 btrfs_set_fs_incompat(info, RAID1C34); 4841 } 4842 4843 /* 4844 * Structure used internally for __btrfs_alloc_chunk() function. 4845 * Wraps needed parameters. 4846 */ 4847 struct alloc_chunk_ctl { 4848 u64 start; 4849 u64 type; 4850 /* Total number of stripes to allocate */ 4851 int num_stripes; 4852 /* sub_stripes info for map */ 4853 int sub_stripes; 4854 /* Stripes per device */ 4855 int dev_stripes; 4856 /* Maximum number of devices to use */ 4857 int devs_max; 4858 /* Minimum number of devices to use */ 4859 int devs_min; 4860 /* ndevs has to be a multiple of this */ 4861 int devs_increment; 4862 /* Number of copies */ 4863 int ncopies; 4864 /* Number of stripes worth of bytes to store parity information */ 4865 int nparity; 4866 u64 max_stripe_size; 4867 u64 max_chunk_size; 4868 u64 dev_extent_min; 4869 u64 stripe_size; 4870 u64 chunk_size; 4871 int ndevs; 4872 }; 4873 4874 static void init_alloc_chunk_ctl_policy_regular( 4875 struct btrfs_fs_devices *fs_devices, 4876 struct alloc_chunk_ctl *ctl) 4877 { 4878 u64 type = ctl->type; 4879 4880 if (type & BTRFS_BLOCK_GROUP_DATA) { 4881 ctl->max_stripe_size = SZ_1G; 4882 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 4883 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4884 /* For larger filesystems, use larger metadata chunks */ 4885 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4886 ctl->max_stripe_size = SZ_1G; 4887 else 4888 ctl->max_stripe_size = SZ_256M; 4889 ctl->max_chunk_size = ctl->max_stripe_size; 4890 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4891 ctl->max_stripe_size = SZ_32M; 4892 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 4893 ctl->devs_max = min_t(int, ctl->devs_max, 4894 BTRFS_MAX_DEVS_SYS_CHUNK); 4895 } else { 4896 BUG(); 4897 } 4898 4899 /* We don't want a chunk larger than 10% of writable space */ 4900 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4901 ctl->max_chunk_size); 4902 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 4903 } 4904 4905 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 4906 struct alloc_chunk_ctl *ctl) 4907 { 4908 int index = btrfs_bg_flags_to_raid_index(ctl->type); 4909 4910 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 4911 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 4912 ctl->devs_max = btrfs_raid_array[index].devs_max; 4913 if (!ctl->devs_max) 4914 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 4915 ctl->devs_min = btrfs_raid_array[index].devs_min; 4916 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 4917 ctl->ncopies = btrfs_raid_array[index].ncopies; 4918 ctl->nparity = btrfs_raid_array[index].nparity; 4919 ctl->ndevs = 0; 4920 4921 switch (fs_devices->chunk_alloc_policy) { 4922 case BTRFS_CHUNK_ALLOC_REGULAR: 4923 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 4924 break; 4925 default: 4926 BUG(); 4927 } 4928 } 4929 4930 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 4931 struct alloc_chunk_ctl *ctl, 4932 struct btrfs_device_info *devices_info) 4933 { 4934 struct btrfs_fs_info *info = fs_devices->fs_info; 4935 struct btrfs_device *device; 4936 u64 total_avail; 4937 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 4938 int ret; 4939 int ndevs = 0; 4940 u64 max_avail; 4941 u64 dev_offset; 4942 4943 /* 4944 * in the first pass through the devices list, we gather information 4945 * about the available holes on each device. 4946 */ 4947 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 4948 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4949 WARN(1, KERN_ERR 4950 "BTRFS: read-only device in alloc_list\n"); 4951 continue; 4952 } 4953 4954 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 4955 &device->dev_state) || 4956 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4957 continue; 4958 4959 if (device->total_bytes > device->bytes_used) 4960 total_avail = device->total_bytes - device->bytes_used; 4961 else 4962 total_avail = 0; 4963 4964 /* If there is no space on this device, skip it. */ 4965 if (total_avail < ctl->dev_extent_min) 4966 continue; 4967 4968 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 4969 &max_avail); 4970 if (ret && ret != -ENOSPC) 4971 return ret; 4972 4973 if (ret == 0) 4974 max_avail = dev_extent_want; 4975 4976 if (max_avail < ctl->dev_extent_min) { 4977 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4978 btrfs_debug(info, 4979 "%s: devid %llu has no free space, have=%llu want=%llu", 4980 __func__, device->devid, max_avail, 4981 ctl->dev_extent_min); 4982 continue; 4983 } 4984 4985 if (ndevs == fs_devices->rw_devices) { 4986 WARN(1, "%s: found more than %llu devices\n", 4987 __func__, fs_devices->rw_devices); 4988 break; 4989 } 4990 devices_info[ndevs].dev_offset = dev_offset; 4991 devices_info[ndevs].max_avail = max_avail; 4992 devices_info[ndevs].total_avail = total_avail; 4993 devices_info[ndevs].dev = device; 4994 ++ndevs; 4995 } 4996 ctl->ndevs = ndevs; 4997 4998 /* 4999 * now sort the devices by hole size / available space 5000 */ 5001 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5002 btrfs_cmp_device_info, NULL); 5003 5004 return 0; 5005 } 5006 5007 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5008 struct btrfs_device_info *devices_info) 5009 { 5010 /* Number of stripes that count for block group size */ 5011 int data_stripes; 5012 5013 /* 5014 * The primary goal is to maximize the number of stripes, so use as 5015 * many devices as possible, even if the stripes are not maximum sized. 5016 * 5017 * The DUP profile stores more than one stripe per device, the 5018 * max_avail is the total size so we have to adjust. 5019 */ 5020 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5021 ctl->dev_stripes); 5022 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5023 5024 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5025 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5026 5027 /* 5028 * Use the number of data stripes to figure out how big this chunk is 5029 * really going to be in terms of logical address space, and compare 5030 * that answer with the max chunk size. If it's higher, we try to 5031 * reduce stripe_size. 5032 */ 5033 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5034 /* 5035 * Reduce stripe_size, round it up to a 16MB boundary again and 5036 * then use it, unless it ends up being even bigger than the 5037 * previous value we had already. 5038 */ 5039 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5040 data_stripes), SZ_16M), 5041 ctl->stripe_size); 5042 } 5043 5044 /* Align to BTRFS_STRIPE_LEN */ 5045 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5046 ctl->chunk_size = ctl->stripe_size * data_stripes; 5047 5048 return 0; 5049 } 5050 5051 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5052 struct alloc_chunk_ctl *ctl, 5053 struct btrfs_device_info *devices_info) 5054 { 5055 struct btrfs_fs_info *info = fs_devices->fs_info; 5056 5057 /* 5058 * Round down to number of usable stripes, devs_increment can be any 5059 * number so we can't use round_down() that requires power of 2, while 5060 * rounddown is safe. 5061 */ 5062 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5063 5064 if (ctl->ndevs < ctl->devs_min) { 5065 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5066 btrfs_debug(info, 5067 "%s: not enough devices with free space: have=%d minimum required=%d", 5068 __func__, ctl->ndevs, ctl->devs_min); 5069 } 5070 return -ENOSPC; 5071 } 5072 5073 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5074 5075 switch (fs_devices->chunk_alloc_policy) { 5076 case BTRFS_CHUNK_ALLOC_REGULAR: 5077 return decide_stripe_size_regular(ctl, devices_info); 5078 default: 5079 BUG(); 5080 } 5081 } 5082 5083 static int create_chunk(struct btrfs_trans_handle *trans, 5084 struct alloc_chunk_ctl *ctl, 5085 struct btrfs_device_info *devices_info) 5086 { 5087 struct btrfs_fs_info *info = trans->fs_info; 5088 struct map_lookup *map = NULL; 5089 struct extent_map_tree *em_tree; 5090 struct extent_map *em; 5091 u64 start = ctl->start; 5092 u64 type = ctl->type; 5093 int ret; 5094 int i; 5095 int j; 5096 5097 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5098 if (!map) 5099 return -ENOMEM; 5100 map->num_stripes = ctl->num_stripes; 5101 5102 for (i = 0; i < ctl->ndevs; ++i) { 5103 for (j = 0; j < ctl->dev_stripes; ++j) { 5104 int s = i * ctl->dev_stripes + j; 5105 map->stripes[s].dev = devices_info[i].dev; 5106 map->stripes[s].physical = devices_info[i].dev_offset + 5107 j * ctl->stripe_size; 5108 } 5109 } 5110 map->stripe_len = BTRFS_STRIPE_LEN; 5111 map->io_align = BTRFS_STRIPE_LEN; 5112 map->io_width = BTRFS_STRIPE_LEN; 5113 map->type = type; 5114 map->sub_stripes = ctl->sub_stripes; 5115 5116 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5117 5118 em = alloc_extent_map(); 5119 if (!em) { 5120 kfree(map); 5121 return -ENOMEM; 5122 } 5123 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5124 em->map_lookup = map; 5125 em->start = start; 5126 em->len = ctl->chunk_size; 5127 em->block_start = 0; 5128 em->block_len = em->len; 5129 em->orig_block_len = ctl->stripe_size; 5130 5131 em_tree = &info->mapping_tree; 5132 write_lock(&em_tree->lock); 5133 ret = add_extent_mapping(em_tree, em, 0); 5134 if (ret) { 5135 write_unlock(&em_tree->lock); 5136 free_extent_map(em); 5137 return ret; 5138 } 5139 write_unlock(&em_tree->lock); 5140 5141 ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5142 if (ret) 5143 goto error_del_extent; 5144 5145 for (i = 0; i < map->num_stripes; i++) { 5146 struct btrfs_device *dev = map->stripes[i].dev; 5147 5148 btrfs_device_set_bytes_used(dev, 5149 dev->bytes_used + ctl->stripe_size); 5150 if (list_empty(&dev->post_commit_list)) 5151 list_add_tail(&dev->post_commit_list, 5152 &trans->transaction->dev_update_list); 5153 } 5154 5155 atomic64_sub(ctl->stripe_size * map->num_stripes, 5156 &info->free_chunk_space); 5157 5158 free_extent_map(em); 5159 check_raid56_incompat_flag(info, type); 5160 check_raid1c34_incompat_flag(info, type); 5161 5162 return 0; 5163 5164 error_del_extent: 5165 write_lock(&em_tree->lock); 5166 remove_extent_mapping(em_tree, em); 5167 write_unlock(&em_tree->lock); 5168 5169 /* One for our allocation */ 5170 free_extent_map(em); 5171 /* One for the tree reference */ 5172 free_extent_map(em); 5173 5174 return ret; 5175 } 5176 5177 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) 5178 { 5179 struct btrfs_fs_info *info = trans->fs_info; 5180 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5181 struct btrfs_device_info *devices_info = NULL; 5182 struct alloc_chunk_ctl ctl; 5183 int ret; 5184 5185 lockdep_assert_held(&info->chunk_mutex); 5186 5187 if (!alloc_profile_is_valid(type, 0)) { 5188 ASSERT(0); 5189 return -EINVAL; 5190 } 5191 5192 if (list_empty(&fs_devices->alloc_list)) { 5193 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5194 btrfs_debug(info, "%s: no writable device", __func__); 5195 return -ENOSPC; 5196 } 5197 5198 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5199 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5200 ASSERT(0); 5201 return -EINVAL; 5202 } 5203 5204 ctl.start = find_next_chunk(info); 5205 ctl.type = type; 5206 init_alloc_chunk_ctl(fs_devices, &ctl); 5207 5208 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5209 GFP_NOFS); 5210 if (!devices_info) 5211 return -ENOMEM; 5212 5213 ret = gather_device_info(fs_devices, &ctl, devices_info); 5214 if (ret < 0) 5215 goto out; 5216 5217 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5218 if (ret < 0) 5219 goto out; 5220 5221 ret = create_chunk(trans, &ctl, devices_info); 5222 5223 out: 5224 kfree(devices_info); 5225 return ret; 5226 } 5227 5228 /* 5229 * Chunk allocation falls into two parts. The first part does work 5230 * that makes the new allocated chunk usable, but does not do any operation 5231 * that modifies the chunk tree. The second part does the work that 5232 * requires modifying the chunk tree. This division is important for the 5233 * bootstrap process of adding storage to a seed btrfs. 5234 */ 5235 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 5236 u64 chunk_offset, u64 chunk_size) 5237 { 5238 struct btrfs_fs_info *fs_info = trans->fs_info; 5239 struct btrfs_root *extent_root = fs_info->extent_root; 5240 struct btrfs_root *chunk_root = fs_info->chunk_root; 5241 struct btrfs_key key; 5242 struct btrfs_device *device; 5243 struct btrfs_chunk *chunk; 5244 struct btrfs_stripe *stripe; 5245 struct extent_map *em; 5246 struct map_lookup *map; 5247 size_t item_size; 5248 u64 dev_offset; 5249 u64 stripe_size; 5250 int i = 0; 5251 int ret = 0; 5252 5253 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 5254 if (IS_ERR(em)) 5255 return PTR_ERR(em); 5256 5257 map = em->map_lookup; 5258 item_size = btrfs_chunk_item_size(map->num_stripes); 5259 stripe_size = em->orig_block_len; 5260 5261 chunk = kzalloc(item_size, GFP_NOFS); 5262 if (!chunk) { 5263 ret = -ENOMEM; 5264 goto out; 5265 } 5266 5267 /* 5268 * Take the device list mutex to prevent races with the final phase of 5269 * a device replace operation that replaces the device object associated 5270 * with the map's stripes, because the device object's id can change 5271 * at any time during that final phase of the device replace operation 5272 * (dev-replace.c:btrfs_dev_replace_finishing()). 5273 */ 5274 mutex_lock(&fs_info->fs_devices->device_list_mutex); 5275 for (i = 0; i < map->num_stripes; i++) { 5276 device = map->stripes[i].dev; 5277 dev_offset = map->stripes[i].physical; 5278 5279 ret = btrfs_update_device(trans, device); 5280 if (ret) 5281 break; 5282 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, 5283 dev_offset, stripe_size); 5284 if (ret) 5285 break; 5286 } 5287 if (ret) { 5288 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5289 goto out; 5290 } 5291 5292 stripe = &chunk->stripe; 5293 for (i = 0; i < map->num_stripes; i++) { 5294 device = map->stripes[i].dev; 5295 dev_offset = map->stripes[i].physical; 5296 5297 btrfs_set_stack_stripe_devid(stripe, device->devid); 5298 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5299 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5300 stripe++; 5301 } 5302 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5303 5304 btrfs_set_stack_chunk_length(chunk, chunk_size); 5305 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5306 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5307 btrfs_set_stack_chunk_type(chunk, map->type); 5308 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5309 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5310 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5311 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5312 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5313 5314 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5315 key.type = BTRFS_CHUNK_ITEM_KEY; 5316 key.offset = chunk_offset; 5317 5318 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5319 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5320 /* 5321 * TODO: Cleanup of inserted chunk root in case of 5322 * failure. 5323 */ 5324 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5325 } 5326 5327 out: 5328 kfree(chunk); 5329 free_extent_map(em); 5330 return ret; 5331 } 5332 5333 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5334 { 5335 struct btrfs_fs_info *fs_info = trans->fs_info; 5336 u64 alloc_profile; 5337 int ret; 5338 5339 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5340 ret = btrfs_alloc_chunk(trans, alloc_profile); 5341 if (ret) 5342 return ret; 5343 5344 alloc_profile = btrfs_system_alloc_profile(fs_info); 5345 ret = btrfs_alloc_chunk(trans, alloc_profile); 5346 return ret; 5347 } 5348 5349 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5350 { 5351 const int index = btrfs_bg_flags_to_raid_index(map->type); 5352 5353 return btrfs_raid_array[index].tolerated_failures; 5354 } 5355 5356 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5357 { 5358 struct extent_map *em; 5359 struct map_lookup *map; 5360 int readonly = 0; 5361 int miss_ndevs = 0; 5362 int i; 5363 5364 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5365 if (IS_ERR(em)) 5366 return 1; 5367 5368 map = em->map_lookup; 5369 for (i = 0; i < map->num_stripes; i++) { 5370 if (test_bit(BTRFS_DEV_STATE_MISSING, 5371 &map->stripes[i].dev->dev_state)) { 5372 miss_ndevs++; 5373 continue; 5374 } 5375 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5376 &map->stripes[i].dev->dev_state)) { 5377 readonly = 1; 5378 goto end; 5379 } 5380 } 5381 5382 /* 5383 * If the number of missing devices is larger than max errors, 5384 * we can not write the data into that chunk successfully, so 5385 * set it readonly. 5386 */ 5387 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5388 readonly = 1; 5389 end: 5390 free_extent_map(em); 5391 return readonly; 5392 } 5393 5394 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5395 { 5396 struct extent_map *em; 5397 5398 while (1) { 5399 write_lock(&tree->lock); 5400 em = lookup_extent_mapping(tree, 0, (u64)-1); 5401 if (em) 5402 remove_extent_mapping(tree, em); 5403 write_unlock(&tree->lock); 5404 if (!em) 5405 break; 5406 /* once for us */ 5407 free_extent_map(em); 5408 /* once for the tree */ 5409 free_extent_map(em); 5410 } 5411 } 5412 5413 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5414 { 5415 struct extent_map *em; 5416 struct map_lookup *map; 5417 int ret; 5418 5419 em = btrfs_get_chunk_map(fs_info, logical, len); 5420 if (IS_ERR(em)) 5421 /* 5422 * We could return errors for these cases, but that could get 5423 * ugly and we'd probably do the same thing which is just not do 5424 * anything else and exit, so return 1 so the callers don't try 5425 * to use other copies. 5426 */ 5427 return 1; 5428 5429 map = em->map_lookup; 5430 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5431 ret = map->num_stripes; 5432 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5433 ret = map->sub_stripes; 5434 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5435 ret = 2; 5436 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5437 /* 5438 * There could be two corrupted data stripes, we need 5439 * to loop retry in order to rebuild the correct data. 5440 * 5441 * Fail a stripe at a time on every retry except the 5442 * stripe under reconstruction. 5443 */ 5444 ret = map->num_stripes; 5445 else 5446 ret = 1; 5447 free_extent_map(em); 5448 5449 down_read(&fs_info->dev_replace.rwsem); 5450 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5451 fs_info->dev_replace.tgtdev) 5452 ret++; 5453 up_read(&fs_info->dev_replace.rwsem); 5454 5455 return ret; 5456 } 5457 5458 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5459 u64 logical) 5460 { 5461 struct extent_map *em; 5462 struct map_lookup *map; 5463 unsigned long len = fs_info->sectorsize; 5464 5465 em = btrfs_get_chunk_map(fs_info, logical, len); 5466 5467 if (!WARN_ON(IS_ERR(em))) { 5468 map = em->map_lookup; 5469 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5470 len = map->stripe_len * nr_data_stripes(map); 5471 free_extent_map(em); 5472 } 5473 return len; 5474 } 5475 5476 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5477 { 5478 struct extent_map *em; 5479 struct map_lookup *map; 5480 int ret = 0; 5481 5482 em = btrfs_get_chunk_map(fs_info, logical, len); 5483 5484 if(!WARN_ON(IS_ERR(em))) { 5485 map = em->map_lookup; 5486 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5487 ret = 1; 5488 free_extent_map(em); 5489 } 5490 return ret; 5491 } 5492 5493 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5494 struct map_lookup *map, int first, 5495 int dev_replace_is_ongoing) 5496 { 5497 int i; 5498 int num_stripes; 5499 int preferred_mirror; 5500 int tolerance; 5501 struct btrfs_device *srcdev; 5502 5503 ASSERT((map->type & 5504 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5505 5506 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5507 num_stripes = map->sub_stripes; 5508 else 5509 num_stripes = map->num_stripes; 5510 5511 switch (fs_info->fs_devices->read_policy) { 5512 default: 5513 /* Shouldn't happen, just warn and use pid instead of failing */ 5514 btrfs_warn_rl(fs_info, 5515 "unknown read_policy type %u, reset to pid", 5516 fs_info->fs_devices->read_policy); 5517 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5518 fallthrough; 5519 case BTRFS_READ_POLICY_PID: 5520 preferred_mirror = first + (current->pid % num_stripes); 5521 break; 5522 } 5523 5524 if (dev_replace_is_ongoing && 5525 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5526 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5527 srcdev = fs_info->dev_replace.srcdev; 5528 else 5529 srcdev = NULL; 5530 5531 /* 5532 * try to avoid the drive that is the source drive for a 5533 * dev-replace procedure, only choose it if no other non-missing 5534 * mirror is available 5535 */ 5536 for (tolerance = 0; tolerance < 2; tolerance++) { 5537 if (map->stripes[preferred_mirror].dev->bdev && 5538 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5539 return preferred_mirror; 5540 for (i = first; i < first + num_stripes; i++) { 5541 if (map->stripes[i].dev->bdev && 5542 (tolerance || map->stripes[i].dev != srcdev)) 5543 return i; 5544 } 5545 } 5546 5547 /* we couldn't find one that doesn't fail. Just return something 5548 * and the io error handling code will clean up eventually 5549 */ 5550 return preferred_mirror; 5551 } 5552 5553 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5554 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5555 { 5556 int i; 5557 int again = 1; 5558 5559 while (again) { 5560 again = 0; 5561 for (i = 0; i < num_stripes - 1; i++) { 5562 /* Swap if parity is on a smaller index */ 5563 if (bbio->raid_map[i] > bbio->raid_map[i + 1]) { 5564 swap(bbio->stripes[i], bbio->stripes[i + 1]); 5565 swap(bbio->raid_map[i], bbio->raid_map[i + 1]); 5566 again = 1; 5567 } 5568 } 5569 } 5570 } 5571 5572 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5573 { 5574 struct btrfs_bio *bbio = kzalloc( 5575 /* the size of the btrfs_bio */ 5576 sizeof(struct btrfs_bio) + 5577 /* plus the variable array for the stripes */ 5578 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5579 /* plus the variable array for the tgt dev */ 5580 sizeof(int) * (real_stripes) + 5581 /* 5582 * plus the raid_map, which includes both the tgt dev 5583 * and the stripes 5584 */ 5585 sizeof(u64) * (total_stripes), 5586 GFP_NOFS|__GFP_NOFAIL); 5587 5588 atomic_set(&bbio->error, 0); 5589 refcount_set(&bbio->refs, 1); 5590 5591 bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes); 5592 bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes); 5593 5594 return bbio; 5595 } 5596 5597 void btrfs_get_bbio(struct btrfs_bio *bbio) 5598 { 5599 WARN_ON(!refcount_read(&bbio->refs)); 5600 refcount_inc(&bbio->refs); 5601 } 5602 5603 void btrfs_put_bbio(struct btrfs_bio *bbio) 5604 { 5605 if (!bbio) 5606 return; 5607 if (refcount_dec_and_test(&bbio->refs)) 5608 kfree(bbio); 5609 } 5610 5611 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5612 /* 5613 * Please note that, discard won't be sent to target device of device 5614 * replace. 5615 */ 5616 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5617 u64 logical, u64 *length_ret, 5618 struct btrfs_bio **bbio_ret) 5619 { 5620 struct extent_map *em; 5621 struct map_lookup *map; 5622 struct btrfs_bio *bbio; 5623 u64 length = *length_ret; 5624 u64 offset; 5625 u64 stripe_nr; 5626 u64 stripe_nr_end; 5627 u64 stripe_end_offset; 5628 u64 stripe_cnt; 5629 u64 stripe_len; 5630 u64 stripe_offset; 5631 u64 num_stripes; 5632 u32 stripe_index; 5633 u32 factor = 0; 5634 u32 sub_stripes = 0; 5635 u64 stripes_per_dev = 0; 5636 u32 remaining_stripes = 0; 5637 u32 last_stripe = 0; 5638 int ret = 0; 5639 int i; 5640 5641 /* discard always return a bbio */ 5642 ASSERT(bbio_ret); 5643 5644 em = btrfs_get_chunk_map(fs_info, logical, length); 5645 if (IS_ERR(em)) 5646 return PTR_ERR(em); 5647 5648 map = em->map_lookup; 5649 /* we don't discard raid56 yet */ 5650 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5651 ret = -EOPNOTSUPP; 5652 goto out; 5653 } 5654 5655 offset = logical - em->start; 5656 length = min_t(u64, em->start + em->len - logical, length); 5657 *length_ret = length; 5658 5659 stripe_len = map->stripe_len; 5660 /* 5661 * stripe_nr counts the total number of stripes we have to stride 5662 * to get to this block 5663 */ 5664 stripe_nr = div64_u64(offset, stripe_len); 5665 5666 /* stripe_offset is the offset of this block in its stripe */ 5667 stripe_offset = offset - stripe_nr * stripe_len; 5668 5669 stripe_nr_end = round_up(offset + length, map->stripe_len); 5670 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5671 stripe_cnt = stripe_nr_end - stripe_nr; 5672 stripe_end_offset = stripe_nr_end * map->stripe_len - 5673 (offset + length); 5674 /* 5675 * after this, stripe_nr is the number of stripes on this 5676 * device we have to walk to find the data, and stripe_index is 5677 * the number of our device in the stripe array 5678 */ 5679 num_stripes = 1; 5680 stripe_index = 0; 5681 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5682 BTRFS_BLOCK_GROUP_RAID10)) { 5683 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5684 sub_stripes = 1; 5685 else 5686 sub_stripes = map->sub_stripes; 5687 5688 factor = map->num_stripes / sub_stripes; 5689 num_stripes = min_t(u64, map->num_stripes, 5690 sub_stripes * stripe_cnt); 5691 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5692 stripe_index *= sub_stripes; 5693 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5694 &remaining_stripes); 5695 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5696 last_stripe *= sub_stripes; 5697 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 5698 BTRFS_BLOCK_GROUP_DUP)) { 5699 num_stripes = map->num_stripes; 5700 } else { 5701 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5702 &stripe_index); 5703 } 5704 5705 bbio = alloc_btrfs_bio(num_stripes, 0); 5706 if (!bbio) { 5707 ret = -ENOMEM; 5708 goto out; 5709 } 5710 5711 for (i = 0; i < num_stripes; i++) { 5712 bbio->stripes[i].physical = 5713 map->stripes[stripe_index].physical + 5714 stripe_offset + stripe_nr * map->stripe_len; 5715 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5716 5717 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5718 BTRFS_BLOCK_GROUP_RAID10)) { 5719 bbio->stripes[i].length = stripes_per_dev * 5720 map->stripe_len; 5721 5722 if (i / sub_stripes < remaining_stripes) 5723 bbio->stripes[i].length += 5724 map->stripe_len; 5725 5726 /* 5727 * Special for the first stripe and 5728 * the last stripe: 5729 * 5730 * |-------|...|-------| 5731 * |----------| 5732 * off end_off 5733 */ 5734 if (i < sub_stripes) 5735 bbio->stripes[i].length -= 5736 stripe_offset; 5737 5738 if (stripe_index >= last_stripe && 5739 stripe_index <= (last_stripe + 5740 sub_stripes - 1)) 5741 bbio->stripes[i].length -= 5742 stripe_end_offset; 5743 5744 if (i == sub_stripes - 1) 5745 stripe_offset = 0; 5746 } else { 5747 bbio->stripes[i].length = length; 5748 } 5749 5750 stripe_index++; 5751 if (stripe_index == map->num_stripes) { 5752 stripe_index = 0; 5753 stripe_nr++; 5754 } 5755 } 5756 5757 *bbio_ret = bbio; 5758 bbio->map_type = map->type; 5759 bbio->num_stripes = num_stripes; 5760 out: 5761 free_extent_map(em); 5762 return ret; 5763 } 5764 5765 /* 5766 * In dev-replace case, for repair case (that's the only case where the mirror 5767 * is selected explicitly when calling btrfs_map_block), blocks left of the 5768 * left cursor can also be read from the target drive. 5769 * 5770 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 5771 * array of stripes. 5772 * For READ, it also needs to be supported using the same mirror number. 5773 * 5774 * If the requested block is not left of the left cursor, EIO is returned. This 5775 * can happen because btrfs_num_copies() returns one more in the dev-replace 5776 * case. 5777 */ 5778 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 5779 u64 logical, u64 length, 5780 u64 srcdev_devid, int *mirror_num, 5781 u64 *physical) 5782 { 5783 struct btrfs_bio *bbio = NULL; 5784 int num_stripes; 5785 int index_srcdev = 0; 5786 int found = 0; 5787 u64 physical_of_found = 0; 5788 int i; 5789 int ret = 0; 5790 5791 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 5792 logical, &length, &bbio, 0, 0); 5793 if (ret) { 5794 ASSERT(bbio == NULL); 5795 return ret; 5796 } 5797 5798 num_stripes = bbio->num_stripes; 5799 if (*mirror_num > num_stripes) { 5800 /* 5801 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 5802 * that means that the requested area is not left of the left 5803 * cursor 5804 */ 5805 btrfs_put_bbio(bbio); 5806 return -EIO; 5807 } 5808 5809 /* 5810 * process the rest of the function using the mirror_num of the source 5811 * drive. Therefore look it up first. At the end, patch the device 5812 * pointer to the one of the target drive. 5813 */ 5814 for (i = 0; i < num_stripes; i++) { 5815 if (bbio->stripes[i].dev->devid != srcdev_devid) 5816 continue; 5817 5818 /* 5819 * In case of DUP, in order to keep it simple, only add the 5820 * mirror with the lowest physical address 5821 */ 5822 if (found && 5823 physical_of_found <= bbio->stripes[i].physical) 5824 continue; 5825 5826 index_srcdev = i; 5827 found = 1; 5828 physical_of_found = bbio->stripes[i].physical; 5829 } 5830 5831 btrfs_put_bbio(bbio); 5832 5833 ASSERT(found); 5834 if (!found) 5835 return -EIO; 5836 5837 *mirror_num = index_srcdev + 1; 5838 *physical = physical_of_found; 5839 return ret; 5840 } 5841 5842 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 5843 struct btrfs_bio **bbio_ret, 5844 struct btrfs_dev_replace *dev_replace, 5845 int *num_stripes_ret, int *max_errors_ret) 5846 { 5847 struct btrfs_bio *bbio = *bbio_ret; 5848 u64 srcdev_devid = dev_replace->srcdev->devid; 5849 int tgtdev_indexes = 0; 5850 int num_stripes = *num_stripes_ret; 5851 int max_errors = *max_errors_ret; 5852 int i; 5853 5854 if (op == BTRFS_MAP_WRITE) { 5855 int index_where_to_add; 5856 5857 /* 5858 * duplicate the write operations while the dev replace 5859 * procedure is running. Since the copying of the old disk to 5860 * the new disk takes place at run time while the filesystem is 5861 * mounted writable, the regular write operations to the old 5862 * disk have to be duplicated to go to the new disk as well. 5863 * 5864 * Note that device->missing is handled by the caller, and that 5865 * the write to the old disk is already set up in the stripes 5866 * array. 5867 */ 5868 index_where_to_add = num_stripes; 5869 for (i = 0; i < num_stripes; i++) { 5870 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5871 /* write to new disk, too */ 5872 struct btrfs_bio_stripe *new = 5873 bbio->stripes + index_where_to_add; 5874 struct btrfs_bio_stripe *old = 5875 bbio->stripes + i; 5876 5877 new->physical = old->physical; 5878 new->length = old->length; 5879 new->dev = dev_replace->tgtdev; 5880 bbio->tgtdev_map[i] = index_where_to_add; 5881 index_where_to_add++; 5882 max_errors++; 5883 tgtdev_indexes++; 5884 } 5885 } 5886 num_stripes = index_where_to_add; 5887 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 5888 int index_srcdev = 0; 5889 int found = 0; 5890 u64 physical_of_found = 0; 5891 5892 /* 5893 * During the dev-replace procedure, the target drive can also 5894 * be used to read data in case it is needed to repair a corrupt 5895 * block elsewhere. This is possible if the requested area is 5896 * left of the left cursor. In this area, the target drive is a 5897 * full copy of the source drive. 5898 */ 5899 for (i = 0; i < num_stripes; i++) { 5900 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5901 /* 5902 * In case of DUP, in order to keep it simple, 5903 * only add the mirror with the lowest physical 5904 * address 5905 */ 5906 if (found && 5907 physical_of_found <= 5908 bbio->stripes[i].physical) 5909 continue; 5910 index_srcdev = i; 5911 found = 1; 5912 physical_of_found = bbio->stripes[i].physical; 5913 } 5914 } 5915 if (found) { 5916 struct btrfs_bio_stripe *tgtdev_stripe = 5917 bbio->stripes + num_stripes; 5918 5919 tgtdev_stripe->physical = physical_of_found; 5920 tgtdev_stripe->length = 5921 bbio->stripes[index_srcdev].length; 5922 tgtdev_stripe->dev = dev_replace->tgtdev; 5923 bbio->tgtdev_map[index_srcdev] = num_stripes; 5924 5925 tgtdev_indexes++; 5926 num_stripes++; 5927 } 5928 } 5929 5930 *num_stripes_ret = num_stripes; 5931 *max_errors_ret = max_errors; 5932 bbio->num_tgtdevs = tgtdev_indexes; 5933 *bbio_ret = bbio; 5934 } 5935 5936 static bool need_full_stripe(enum btrfs_map_op op) 5937 { 5938 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 5939 } 5940 5941 /* 5942 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len) 5943 * tuple. This information is used to calculate how big a 5944 * particular bio can get before it straddles a stripe. 5945 * 5946 * @fs_info - the filesystem 5947 * @logical - address that we want to figure out the geometry of 5948 * @len - the length of IO we are going to perform, starting at @logical 5949 * @op - type of operation - write or read 5950 * @io_geom - pointer used to return values 5951 * 5952 * Returns < 0 in case a chunk for the given logical address cannot be found, 5953 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 5954 */ 5955 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5956 u64 logical, u64 len, struct btrfs_io_geometry *io_geom) 5957 { 5958 struct extent_map *em; 5959 struct map_lookup *map; 5960 u64 offset; 5961 u64 stripe_offset; 5962 u64 stripe_nr; 5963 u64 stripe_len; 5964 u64 raid56_full_stripe_start = (u64)-1; 5965 int data_stripes; 5966 int ret = 0; 5967 5968 ASSERT(op != BTRFS_MAP_DISCARD); 5969 5970 em = btrfs_get_chunk_map(fs_info, logical, len); 5971 if (IS_ERR(em)) 5972 return PTR_ERR(em); 5973 5974 map = em->map_lookup; 5975 /* Offset of this logical address in the chunk */ 5976 offset = logical - em->start; 5977 /* Len of a stripe in a chunk */ 5978 stripe_len = map->stripe_len; 5979 /* Stripe wher this block falls in */ 5980 stripe_nr = div64_u64(offset, stripe_len); 5981 /* Offset of stripe in the chunk */ 5982 stripe_offset = stripe_nr * stripe_len; 5983 if (offset < stripe_offset) { 5984 btrfs_crit(fs_info, 5985 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 5986 stripe_offset, offset, em->start, logical, stripe_len); 5987 ret = -EINVAL; 5988 goto out; 5989 } 5990 5991 /* stripe_offset is the offset of this block in its stripe */ 5992 stripe_offset = offset - stripe_offset; 5993 data_stripes = nr_data_stripes(map); 5994 5995 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5996 u64 max_len = stripe_len - stripe_offset; 5997 5998 /* 5999 * In case of raid56, we need to know the stripe aligned start 6000 */ 6001 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6002 unsigned long full_stripe_len = stripe_len * data_stripes; 6003 raid56_full_stripe_start = offset; 6004 6005 /* 6006 * Allow a write of a full stripe, but make sure we 6007 * don't allow straddling of stripes 6008 */ 6009 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 6010 full_stripe_len); 6011 raid56_full_stripe_start *= full_stripe_len; 6012 6013 /* 6014 * For writes to RAID[56], allow a full stripeset across 6015 * all disks. For other RAID types and for RAID[56] 6016 * reads, just allow a single stripe (on a single disk). 6017 */ 6018 if (op == BTRFS_MAP_WRITE) { 6019 max_len = stripe_len * data_stripes - 6020 (offset - raid56_full_stripe_start); 6021 } 6022 } 6023 len = min_t(u64, em->len - offset, max_len); 6024 } else { 6025 len = em->len - offset; 6026 } 6027 6028 io_geom->len = len; 6029 io_geom->offset = offset; 6030 io_geom->stripe_len = stripe_len; 6031 io_geom->stripe_nr = stripe_nr; 6032 io_geom->stripe_offset = stripe_offset; 6033 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6034 6035 out: 6036 /* once for us */ 6037 free_extent_map(em); 6038 return ret; 6039 } 6040 6041 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6042 enum btrfs_map_op op, 6043 u64 logical, u64 *length, 6044 struct btrfs_bio **bbio_ret, 6045 int mirror_num, int need_raid_map) 6046 { 6047 struct extent_map *em; 6048 struct map_lookup *map; 6049 u64 stripe_offset; 6050 u64 stripe_nr; 6051 u64 stripe_len; 6052 u32 stripe_index; 6053 int data_stripes; 6054 int i; 6055 int ret = 0; 6056 int num_stripes; 6057 int max_errors = 0; 6058 int tgtdev_indexes = 0; 6059 struct btrfs_bio *bbio = NULL; 6060 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6061 int dev_replace_is_ongoing = 0; 6062 int num_alloc_stripes; 6063 int patch_the_first_stripe_for_dev_replace = 0; 6064 u64 physical_to_patch_in_first_stripe = 0; 6065 u64 raid56_full_stripe_start = (u64)-1; 6066 struct btrfs_io_geometry geom; 6067 6068 ASSERT(bbio_ret); 6069 ASSERT(op != BTRFS_MAP_DISCARD); 6070 6071 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom); 6072 if (ret < 0) 6073 return ret; 6074 6075 em = btrfs_get_chunk_map(fs_info, logical, *length); 6076 ASSERT(!IS_ERR(em)); 6077 map = em->map_lookup; 6078 6079 *length = geom.len; 6080 stripe_len = geom.stripe_len; 6081 stripe_nr = geom.stripe_nr; 6082 stripe_offset = geom.stripe_offset; 6083 raid56_full_stripe_start = geom.raid56_stripe_offset; 6084 data_stripes = nr_data_stripes(map); 6085 6086 down_read(&dev_replace->rwsem); 6087 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6088 /* 6089 * Hold the semaphore for read during the whole operation, write is 6090 * requested at commit time but must wait. 6091 */ 6092 if (!dev_replace_is_ongoing) 6093 up_read(&dev_replace->rwsem); 6094 6095 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6096 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6097 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6098 dev_replace->srcdev->devid, 6099 &mirror_num, 6100 &physical_to_patch_in_first_stripe); 6101 if (ret) 6102 goto out; 6103 else 6104 patch_the_first_stripe_for_dev_replace = 1; 6105 } else if (mirror_num > map->num_stripes) { 6106 mirror_num = 0; 6107 } 6108 6109 num_stripes = 1; 6110 stripe_index = 0; 6111 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6112 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6113 &stripe_index); 6114 if (!need_full_stripe(op)) 6115 mirror_num = 1; 6116 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6117 if (need_full_stripe(op)) 6118 num_stripes = map->num_stripes; 6119 else if (mirror_num) 6120 stripe_index = mirror_num - 1; 6121 else { 6122 stripe_index = find_live_mirror(fs_info, map, 0, 6123 dev_replace_is_ongoing); 6124 mirror_num = stripe_index + 1; 6125 } 6126 6127 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6128 if (need_full_stripe(op)) { 6129 num_stripes = map->num_stripes; 6130 } else if (mirror_num) { 6131 stripe_index = mirror_num - 1; 6132 } else { 6133 mirror_num = 1; 6134 } 6135 6136 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6137 u32 factor = map->num_stripes / map->sub_stripes; 6138 6139 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6140 stripe_index *= map->sub_stripes; 6141 6142 if (need_full_stripe(op)) 6143 num_stripes = map->sub_stripes; 6144 else if (mirror_num) 6145 stripe_index += mirror_num - 1; 6146 else { 6147 int old_stripe_index = stripe_index; 6148 stripe_index = find_live_mirror(fs_info, map, 6149 stripe_index, 6150 dev_replace_is_ongoing); 6151 mirror_num = stripe_index - old_stripe_index + 1; 6152 } 6153 6154 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6155 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6156 /* push stripe_nr back to the start of the full stripe */ 6157 stripe_nr = div64_u64(raid56_full_stripe_start, 6158 stripe_len * data_stripes); 6159 6160 /* RAID[56] write or recovery. Return all stripes */ 6161 num_stripes = map->num_stripes; 6162 max_errors = nr_parity_stripes(map); 6163 6164 *length = map->stripe_len; 6165 stripe_index = 0; 6166 stripe_offset = 0; 6167 } else { 6168 /* 6169 * Mirror #0 or #1 means the original data block. 6170 * Mirror #2 is RAID5 parity block. 6171 * Mirror #3 is RAID6 Q block. 6172 */ 6173 stripe_nr = div_u64_rem(stripe_nr, 6174 data_stripes, &stripe_index); 6175 if (mirror_num > 1) 6176 stripe_index = data_stripes + mirror_num - 2; 6177 6178 /* We distribute the parity blocks across stripes */ 6179 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6180 &stripe_index); 6181 if (!need_full_stripe(op) && mirror_num <= 1) 6182 mirror_num = 1; 6183 } 6184 } else { 6185 /* 6186 * after this, stripe_nr is the number of stripes on this 6187 * device we have to walk to find the data, and stripe_index is 6188 * the number of our device in the stripe array 6189 */ 6190 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6191 &stripe_index); 6192 mirror_num = stripe_index + 1; 6193 } 6194 if (stripe_index >= map->num_stripes) { 6195 btrfs_crit(fs_info, 6196 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6197 stripe_index, map->num_stripes); 6198 ret = -EINVAL; 6199 goto out; 6200 } 6201 6202 num_alloc_stripes = num_stripes; 6203 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6204 if (op == BTRFS_MAP_WRITE) 6205 num_alloc_stripes <<= 1; 6206 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6207 num_alloc_stripes++; 6208 tgtdev_indexes = num_stripes; 6209 } 6210 6211 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 6212 if (!bbio) { 6213 ret = -ENOMEM; 6214 goto out; 6215 } 6216 6217 for (i = 0; i < num_stripes; i++) { 6218 bbio->stripes[i].physical = map->stripes[stripe_index].physical + 6219 stripe_offset + stripe_nr * map->stripe_len; 6220 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 6221 stripe_index++; 6222 } 6223 6224 /* build raid_map */ 6225 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6226 (need_full_stripe(op) || mirror_num > 1)) { 6227 u64 tmp; 6228 unsigned rot; 6229 6230 /* Work out the disk rotation on this stripe-set */ 6231 div_u64_rem(stripe_nr, num_stripes, &rot); 6232 6233 /* Fill in the logical address of each stripe */ 6234 tmp = stripe_nr * data_stripes; 6235 for (i = 0; i < data_stripes; i++) 6236 bbio->raid_map[(i+rot) % num_stripes] = 6237 em->start + (tmp + i) * map->stripe_len; 6238 6239 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 6240 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6241 bbio->raid_map[(i+rot+1) % num_stripes] = 6242 RAID6_Q_STRIPE; 6243 6244 sort_parity_stripes(bbio, num_stripes); 6245 } 6246 6247 if (need_full_stripe(op)) 6248 max_errors = btrfs_chunk_max_errors(map); 6249 6250 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6251 need_full_stripe(op)) { 6252 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, 6253 &max_errors); 6254 } 6255 6256 *bbio_ret = bbio; 6257 bbio->map_type = map->type; 6258 bbio->num_stripes = num_stripes; 6259 bbio->max_errors = max_errors; 6260 bbio->mirror_num = mirror_num; 6261 6262 /* 6263 * this is the case that REQ_READ && dev_replace_is_ongoing && 6264 * mirror_num == num_stripes + 1 && dev_replace target drive is 6265 * available as a mirror 6266 */ 6267 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6268 WARN_ON(num_stripes > 1); 6269 bbio->stripes[0].dev = dev_replace->tgtdev; 6270 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 6271 bbio->mirror_num = map->num_stripes + 1; 6272 } 6273 out: 6274 if (dev_replace_is_ongoing) { 6275 lockdep_assert_held(&dev_replace->rwsem); 6276 /* Unlock and let waiting writers proceed */ 6277 up_read(&dev_replace->rwsem); 6278 } 6279 free_extent_map(em); 6280 return ret; 6281 } 6282 6283 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6284 u64 logical, u64 *length, 6285 struct btrfs_bio **bbio_ret, int mirror_num) 6286 { 6287 if (op == BTRFS_MAP_DISCARD) 6288 return __btrfs_map_block_for_discard(fs_info, logical, 6289 length, bbio_ret); 6290 6291 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 6292 mirror_num, 0); 6293 } 6294 6295 /* For Scrub/replace */ 6296 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6297 u64 logical, u64 *length, 6298 struct btrfs_bio **bbio_ret) 6299 { 6300 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); 6301 } 6302 6303 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 6304 { 6305 bio->bi_private = bbio->private; 6306 bio->bi_end_io = bbio->end_io; 6307 bio_endio(bio); 6308 6309 btrfs_put_bbio(bbio); 6310 } 6311 6312 static void btrfs_end_bio(struct bio *bio) 6313 { 6314 struct btrfs_bio *bbio = bio->bi_private; 6315 int is_orig_bio = 0; 6316 6317 if (bio->bi_status) { 6318 atomic_inc(&bbio->error); 6319 if (bio->bi_status == BLK_STS_IOERR || 6320 bio->bi_status == BLK_STS_TARGET) { 6321 struct btrfs_device *dev = btrfs_io_bio(bio)->device; 6322 6323 ASSERT(dev->bdev); 6324 if (bio_op(bio) == REQ_OP_WRITE) 6325 btrfs_dev_stat_inc_and_print(dev, 6326 BTRFS_DEV_STAT_WRITE_ERRS); 6327 else if (!(bio->bi_opf & REQ_RAHEAD)) 6328 btrfs_dev_stat_inc_and_print(dev, 6329 BTRFS_DEV_STAT_READ_ERRS); 6330 if (bio->bi_opf & REQ_PREFLUSH) 6331 btrfs_dev_stat_inc_and_print(dev, 6332 BTRFS_DEV_STAT_FLUSH_ERRS); 6333 } 6334 } 6335 6336 if (bio == bbio->orig_bio) 6337 is_orig_bio = 1; 6338 6339 btrfs_bio_counter_dec(bbio->fs_info); 6340 6341 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6342 if (!is_orig_bio) { 6343 bio_put(bio); 6344 bio = bbio->orig_bio; 6345 } 6346 6347 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6348 /* only send an error to the higher layers if it is 6349 * beyond the tolerance of the btrfs bio 6350 */ 6351 if (atomic_read(&bbio->error) > bbio->max_errors) { 6352 bio->bi_status = BLK_STS_IOERR; 6353 } else { 6354 /* 6355 * this bio is actually up to date, we didn't 6356 * go over the max number of errors 6357 */ 6358 bio->bi_status = BLK_STS_OK; 6359 } 6360 6361 btrfs_end_bbio(bbio, bio); 6362 } else if (!is_orig_bio) { 6363 bio_put(bio); 6364 } 6365 } 6366 6367 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, 6368 u64 physical, struct btrfs_device *dev) 6369 { 6370 struct btrfs_fs_info *fs_info = bbio->fs_info; 6371 6372 bio->bi_private = bbio; 6373 btrfs_io_bio(bio)->device = dev; 6374 bio->bi_end_io = btrfs_end_bio; 6375 bio->bi_iter.bi_sector = physical >> 9; 6376 btrfs_debug_in_rcu(fs_info, 6377 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6378 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, 6379 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6380 dev->devid, bio->bi_iter.bi_size); 6381 bio_set_dev(bio, dev->bdev); 6382 6383 btrfs_bio_counter_inc_noblocked(fs_info); 6384 6385 btrfsic_submit_bio(bio); 6386 } 6387 6388 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6389 { 6390 atomic_inc(&bbio->error); 6391 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6392 /* Should be the original bio. */ 6393 WARN_ON(bio != bbio->orig_bio); 6394 6395 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6396 bio->bi_iter.bi_sector = logical >> 9; 6397 if (atomic_read(&bbio->error) > bbio->max_errors) 6398 bio->bi_status = BLK_STS_IOERR; 6399 else 6400 bio->bi_status = BLK_STS_OK; 6401 btrfs_end_bbio(bbio, bio); 6402 } 6403 } 6404 6405 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6406 int mirror_num) 6407 { 6408 struct btrfs_device *dev; 6409 struct bio *first_bio = bio; 6410 u64 logical = bio->bi_iter.bi_sector << 9; 6411 u64 length = 0; 6412 u64 map_length; 6413 int ret; 6414 int dev_nr; 6415 int total_devs; 6416 struct btrfs_bio *bbio = NULL; 6417 6418 length = bio->bi_iter.bi_size; 6419 map_length = length; 6420 6421 btrfs_bio_counter_inc_blocked(fs_info); 6422 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6423 &map_length, &bbio, mirror_num, 1); 6424 if (ret) { 6425 btrfs_bio_counter_dec(fs_info); 6426 return errno_to_blk_status(ret); 6427 } 6428 6429 total_devs = bbio->num_stripes; 6430 bbio->orig_bio = first_bio; 6431 bbio->private = first_bio->bi_private; 6432 bbio->end_io = first_bio->bi_end_io; 6433 bbio->fs_info = fs_info; 6434 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6435 6436 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6437 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { 6438 /* In this case, map_length has been set to the length of 6439 a single stripe; not the whole write */ 6440 if (bio_op(bio) == REQ_OP_WRITE) { 6441 ret = raid56_parity_write(fs_info, bio, bbio, 6442 map_length); 6443 } else { 6444 ret = raid56_parity_recover(fs_info, bio, bbio, 6445 map_length, mirror_num, 1); 6446 } 6447 6448 btrfs_bio_counter_dec(fs_info); 6449 return errno_to_blk_status(ret); 6450 } 6451 6452 if (map_length < length) { 6453 btrfs_crit(fs_info, 6454 "mapping failed logical %llu bio len %llu len %llu", 6455 logical, length, map_length); 6456 BUG(); 6457 } 6458 6459 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6460 dev = bbio->stripes[dev_nr].dev; 6461 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6462 &dev->dev_state) || 6463 (bio_op(first_bio) == REQ_OP_WRITE && 6464 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6465 bbio_error(bbio, first_bio, logical); 6466 continue; 6467 } 6468 6469 if (dev_nr < total_devs - 1) 6470 bio = btrfs_bio_clone(first_bio); 6471 else 6472 bio = first_bio; 6473 6474 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev); 6475 } 6476 btrfs_bio_counter_dec(fs_info); 6477 return BLK_STS_OK; 6478 } 6479 6480 /* 6481 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6482 * return NULL. 6483 * 6484 * If devid and uuid are both specified, the match must be exact, otherwise 6485 * only devid is used. 6486 * 6487 * If @seed is true, traverse through the seed devices. 6488 */ 6489 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, 6490 u64 devid, u8 *uuid, u8 *fsid) 6491 { 6492 struct btrfs_device *device; 6493 struct btrfs_fs_devices *seed_devs; 6494 6495 if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { 6496 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6497 if (device->devid == devid && 6498 (!uuid || memcmp(device->uuid, uuid, 6499 BTRFS_UUID_SIZE) == 0)) 6500 return device; 6501 } 6502 } 6503 6504 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6505 if (!fsid || 6506 !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { 6507 list_for_each_entry(device, &seed_devs->devices, 6508 dev_list) { 6509 if (device->devid == devid && 6510 (!uuid || memcmp(device->uuid, uuid, 6511 BTRFS_UUID_SIZE) == 0)) 6512 return device; 6513 } 6514 } 6515 } 6516 6517 return NULL; 6518 } 6519 6520 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6521 u64 devid, u8 *dev_uuid) 6522 { 6523 struct btrfs_device *device; 6524 unsigned int nofs_flag; 6525 6526 /* 6527 * We call this under the chunk_mutex, so we want to use NOFS for this 6528 * allocation, however we don't want to change btrfs_alloc_device() to 6529 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6530 * places. 6531 */ 6532 nofs_flag = memalloc_nofs_save(); 6533 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6534 memalloc_nofs_restore(nofs_flag); 6535 if (IS_ERR(device)) 6536 return device; 6537 6538 list_add(&device->dev_list, &fs_devices->devices); 6539 device->fs_devices = fs_devices; 6540 fs_devices->num_devices++; 6541 6542 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6543 fs_devices->missing_devices++; 6544 6545 return device; 6546 } 6547 6548 /** 6549 * btrfs_alloc_device - allocate struct btrfs_device 6550 * @fs_info: used only for generating a new devid, can be NULL if 6551 * devid is provided (i.e. @devid != NULL). 6552 * @devid: a pointer to devid for this device. If NULL a new devid 6553 * is generated. 6554 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6555 * is generated. 6556 * 6557 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6558 * on error. Returned struct is not linked onto any lists and must be 6559 * destroyed with btrfs_free_device. 6560 */ 6561 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6562 const u64 *devid, 6563 const u8 *uuid) 6564 { 6565 struct btrfs_device *dev; 6566 u64 tmp; 6567 6568 if (WARN_ON(!devid && !fs_info)) 6569 return ERR_PTR(-EINVAL); 6570 6571 dev = __alloc_device(fs_info); 6572 if (IS_ERR(dev)) 6573 return dev; 6574 6575 if (devid) 6576 tmp = *devid; 6577 else { 6578 int ret; 6579 6580 ret = find_next_devid(fs_info, &tmp); 6581 if (ret) { 6582 btrfs_free_device(dev); 6583 return ERR_PTR(ret); 6584 } 6585 } 6586 dev->devid = tmp; 6587 6588 if (uuid) 6589 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6590 else 6591 generate_random_uuid(dev->uuid); 6592 6593 return dev; 6594 } 6595 6596 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6597 u64 devid, u8 *uuid, bool error) 6598 { 6599 if (error) 6600 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6601 devid, uuid); 6602 else 6603 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6604 devid, uuid); 6605 } 6606 6607 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6608 { 6609 int index = btrfs_bg_flags_to_raid_index(type); 6610 int ncopies = btrfs_raid_array[index].ncopies; 6611 const int nparity = btrfs_raid_array[index].nparity; 6612 int data_stripes; 6613 6614 if (nparity) 6615 data_stripes = num_stripes - nparity; 6616 else 6617 data_stripes = num_stripes / ncopies; 6618 6619 return div_u64(chunk_len, data_stripes); 6620 } 6621 6622 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6623 struct btrfs_chunk *chunk) 6624 { 6625 struct btrfs_fs_info *fs_info = leaf->fs_info; 6626 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6627 struct map_lookup *map; 6628 struct extent_map *em; 6629 u64 logical; 6630 u64 length; 6631 u64 devid; 6632 u8 uuid[BTRFS_UUID_SIZE]; 6633 int num_stripes; 6634 int ret; 6635 int i; 6636 6637 logical = key->offset; 6638 length = btrfs_chunk_length(leaf, chunk); 6639 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6640 6641 /* 6642 * Only need to verify chunk item if we're reading from sys chunk array, 6643 * as chunk item in tree block is already verified by tree-checker. 6644 */ 6645 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6646 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6647 if (ret) 6648 return ret; 6649 } 6650 6651 read_lock(&map_tree->lock); 6652 em = lookup_extent_mapping(map_tree, logical, 1); 6653 read_unlock(&map_tree->lock); 6654 6655 /* already mapped? */ 6656 if (em && em->start <= logical && em->start + em->len > logical) { 6657 free_extent_map(em); 6658 return 0; 6659 } else if (em) { 6660 free_extent_map(em); 6661 } 6662 6663 em = alloc_extent_map(); 6664 if (!em) 6665 return -ENOMEM; 6666 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6667 if (!map) { 6668 free_extent_map(em); 6669 return -ENOMEM; 6670 } 6671 6672 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6673 em->map_lookup = map; 6674 em->start = logical; 6675 em->len = length; 6676 em->orig_start = 0; 6677 em->block_start = 0; 6678 em->block_len = em->len; 6679 6680 map->num_stripes = num_stripes; 6681 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6682 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6683 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6684 map->type = btrfs_chunk_type(leaf, chunk); 6685 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6686 map->verified_stripes = 0; 6687 em->orig_block_len = calc_stripe_length(map->type, em->len, 6688 map->num_stripes); 6689 for (i = 0; i < num_stripes; i++) { 6690 map->stripes[i].physical = 6691 btrfs_stripe_offset_nr(leaf, chunk, i); 6692 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6693 read_extent_buffer(leaf, uuid, (unsigned long) 6694 btrfs_stripe_dev_uuid_nr(chunk, i), 6695 BTRFS_UUID_SIZE); 6696 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, 6697 devid, uuid, NULL); 6698 if (!map->stripes[i].dev && 6699 !btrfs_test_opt(fs_info, DEGRADED)) { 6700 free_extent_map(em); 6701 btrfs_report_missing_device(fs_info, devid, uuid, true); 6702 return -ENOENT; 6703 } 6704 if (!map->stripes[i].dev) { 6705 map->stripes[i].dev = 6706 add_missing_dev(fs_info->fs_devices, devid, 6707 uuid); 6708 if (IS_ERR(map->stripes[i].dev)) { 6709 free_extent_map(em); 6710 btrfs_err(fs_info, 6711 "failed to init missing dev %llu: %ld", 6712 devid, PTR_ERR(map->stripes[i].dev)); 6713 return PTR_ERR(map->stripes[i].dev); 6714 } 6715 btrfs_report_missing_device(fs_info, devid, uuid, false); 6716 } 6717 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6718 &(map->stripes[i].dev->dev_state)); 6719 6720 } 6721 6722 write_lock(&map_tree->lock); 6723 ret = add_extent_mapping(map_tree, em, 0); 6724 write_unlock(&map_tree->lock); 6725 if (ret < 0) { 6726 btrfs_err(fs_info, 6727 "failed to add chunk map, start=%llu len=%llu: %d", 6728 em->start, em->len, ret); 6729 } 6730 free_extent_map(em); 6731 6732 return ret; 6733 } 6734 6735 static void fill_device_from_item(struct extent_buffer *leaf, 6736 struct btrfs_dev_item *dev_item, 6737 struct btrfs_device *device) 6738 { 6739 unsigned long ptr; 6740 6741 device->devid = btrfs_device_id(leaf, dev_item); 6742 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6743 device->total_bytes = device->disk_total_bytes; 6744 device->commit_total_bytes = device->disk_total_bytes; 6745 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6746 device->commit_bytes_used = device->bytes_used; 6747 device->type = btrfs_device_type(leaf, dev_item); 6748 device->io_align = btrfs_device_io_align(leaf, dev_item); 6749 device->io_width = btrfs_device_io_width(leaf, dev_item); 6750 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6751 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6752 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6753 6754 ptr = btrfs_device_uuid(dev_item); 6755 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6756 } 6757 6758 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6759 u8 *fsid) 6760 { 6761 struct btrfs_fs_devices *fs_devices; 6762 int ret; 6763 6764 lockdep_assert_held(&uuid_mutex); 6765 ASSERT(fsid); 6766 6767 /* This will match only for multi-device seed fs */ 6768 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 6769 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6770 return fs_devices; 6771 6772 6773 fs_devices = find_fsid(fsid, NULL); 6774 if (!fs_devices) { 6775 if (!btrfs_test_opt(fs_info, DEGRADED)) 6776 return ERR_PTR(-ENOENT); 6777 6778 fs_devices = alloc_fs_devices(fsid, NULL); 6779 if (IS_ERR(fs_devices)) 6780 return fs_devices; 6781 6782 fs_devices->seeding = true; 6783 fs_devices->opened = 1; 6784 return fs_devices; 6785 } 6786 6787 /* 6788 * Upon first call for a seed fs fsid, just create a private copy of the 6789 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 6790 */ 6791 fs_devices = clone_fs_devices(fs_devices); 6792 if (IS_ERR(fs_devices)) 6793 return fs_devices; 6794 6795 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 6796 if (ret) { 6797 free_fs_devices(fs_devices); 6798 return ERR_PTR(ret); 6799 } 6800 6801 if (!fs_devices->seeding) { 6802 close_fs_devices(fs_devices); 6803 free_fs_devices(fs_devices); 6804 return ERR_PTR(-EINVAL); 6805 } 6806 6807 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 6808 6809 return fs_devices; 6810 } 6811 6812 static int read_one_dev(struct extent_buffer *leaf, 6813 struct btrfs_dev_item *dev_item) 6814 { 6815 struct btrfs_fs_info *fs_info = leaf->fs_info; 6816 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6817 struct btrfs_device *device; 6818 u64 devid; 6819 int ret; 6820 u8 fs_uuid[BTRFS_FSID_SIZE]; 6821 u8 dev_uuid[BTRFS_UUID_SIZE]; 6822 6823 devid = btrfs_device_id(leaf, dev_item); 6824 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6825 BTRFS_UUID_SIZE); 6826 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6827 BTRFS_FSID_SIZE); 6828 6829 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 6830 fs_devices = open_seed_devices(fs_info, fs_uuid); 6831 if (IS_ERR(fs_devices)) 6832 return PTR_ERR(fs_devices); 6833 } 6834 6835 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 6836 fs_uuid); 6837 if (!device) { 6838 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6839 btrfs_report_missing_device(fs_info, devid, 6840 dev_uuid, true); 6841 return -ENOENT; 6842 } 6843 6844 device = add_missing_dev(fs_devices, devid, dev_uuid); 6845 if (IS_ERR(device)) { 6846 btrfs_err(fs_info, 6847 "failed to add missing dev %llu: %ld", 6848 devid, PTR_ERR(device)); 6849 return PTR_ERR(device); 6850 } 6851 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6852 } else { 6853 if (!device->bdev) { 6854 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6855 btrfs_report_missing_device(fs_info, 6856 devid, dev_uuid, true); 6857 return -ENOENT; 6858 } 6859 btrfs_report_missing_device(fs_info, devid, 6860 dev_uuid, false); 6861 } 6862 6863 if (!device->bdev && 6864 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 6865 /* 6866 * this happens when a device that was properly setup 6867 * in the device info lists suddenly goes bad. 6868 * device->bdev is NULL, and so we have to set 6869 * device->missing to one here 6870 */ 6871 device->fs_devices->missing_devices++; 6872 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6873 } 6874 6875 /* Move the device to its own fs_devices */ 6876 if (device->fs_devices != fs_devices) { 6877 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 6878 &device->dev_state)); 6879 6880 list_move(&device->dev_list, &fs_devices->devices); 6881 device->fs_devices->num_devices--; 6882 fs_devices->num_devices++; 6883 6884 device->fs_devices->missing_devices--; 6885 fs_devices->missing_devices++; 6886 6887 device->fs_devices = fs_devices; 6888 } 6889 } 6890 6891 if (device->fs_devices != fs_info->fs_devices) { 6892 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 6893 if (device->generation != 6894 btrfs_device_generation(leaf, dev_item)) 6895 return -EINVAL; 6896 } 6897 6898 fill_device_from_item(leaf, dev_item, device); 6899 if (device->bdev) { 6900 u64 max_total_bytes = i_size_read(device->bdev->bd_inode); 6901 6902 if (device->total_bytes > max_total_bytes) { 6903 btrfs_err(fs_info, 6904 "device total_bytes should be at most %llu but found %llu", 6905 max_total_bytes, device->total_bytes); 6906 return -EINVAL; 6907 } 6908 } 6909 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 6910 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 6911 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 6912 device->fs_devices->total_rw_bytes += device->total_bytes; 6913 atomic64_add(device->total_bytes - device->bytes_used, 6914 &fs_info->free_chunk_space); 6915 } 6916 ret = 0; 6917 return ret; 6918 } 6919 6920 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 6921 { 6922 struct btrfs_root *root = fs_info->tree_root; 6923 struct btrfs_super_block *super_copy = fs_info->super_copy; 6924 struct extent_buffer *sb; 6925 struct btrfs_disk_key *disk_key; 6926 struct btrfs_chunk *chunk; 6927 u8 *array_ptr; 6928 unsigned long sb_array_offset; 6929 int ret = 0; 6930 u32 num_stripes; 6931 u32 array_size; 6932 u32 len = 0; 6933 u32 cur_offset; 6934 u64 type; 6935 struct btrfs_key key; 6936 6937 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 6938 /* 6939 * This will create extent buffer of nodesize, superblock size is 6940 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6941 * overallocate but we can keep it as-is, only the first page is used. 6942 */ 6943 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET, 6944 root->root_key.objectid, 0); 6945 if (IS_ERR(sb)) 6946 return PTR_ERR(sb); 6947 set_extent_buffer_uptodate(sb); 6948 /* 6949 * The sb extent buffer is artificial and just used to read the system array. 6950 * set_extent_buffer_uptodate() call does not properly mark all it's 6951 * pages up-to-date when the page is larger: extent does not cover the 6952 * whole page and consequently check_page_uptodate does not find all 6953 * the page's extents up-to-date (the hole beyond sb), 6954 * write_extent_buffer then triggers a WARN_ON. 6955 * 6956 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6957 * but sb spans only this function. Add an explicit SetPageUptodate call 6958 * to silence the warning eg. on PowerPC 64. 6959 */ 6960 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 6961 SetPageUptodate(sb->pages[0]); 6962 6963 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6964 array_size = btrfs_super_sys_array_size(super_copy); 6965 6966 array_ptr = super_copy->sys_chunk_array; 6967 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6968 cur_offset = 0; 6969 6970 while (cur_offset < array_size) { 6971 disk_key = (struct btrfs_disk_key *)array_ptr; 6972 len = sizeof(*disk_key); 6973 if (cur_offset + len > array_size) 6974 goto out_short_read; 6975 6976 btrfs_disk_key_to_cpu(&key, disk_key); 6977 6978 array_ptr += len; 6979 sb_array_offset += len; 6980 cur_offset += len; 6981 6982 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 6983 btrfs_err(fs_info, 6984 "unexpected item type %u in sys_array at offset %u", 6985 (u32)key.type, cur_offset); 6986 ret = -EIO; 6987 break; 6988 } 6989 6990 chunk = (struct btrfs_chunk *)sb_array_offset; 6991 /* 6992 * At least one btrfs_chunk with one stripe must be present, 6993 * exact stripe count check comes afterwards 6994 */ 6995 len = btrfs_chunk_item_size(1); 6996 if (cur_offset + len > array_size) 6997 goto out_short_read; 6998 6999 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7000 if (!num_stripes) { 7001 btrfs_err(fs_info, 7002 "invalid number of stripes %u in sys_array at offset %u", 7003 num_stripes, cur_offset); 7004 ret = -EIO; 7005 break; 7006 } 7007 7008 type = btrfs_chunk_type(sb, chunk); 7009 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7010 btrfs_err(fs_info, 7011 "invalid chunk type %llu in sys_array at offset %u", 7012 type, cur_offset); 7013 ret = -EIO; 7014 break; 7015 } 7016 7017 len = btrfs_chunk_item_size(num_stripes); 7018 if (cur_offset + len > array_size) 7019 goto out_short_read; 7020 7021 ret = read_one_chunk(&key, sb, chunk); 7022 if (ret) 7023 break; 7024 7025 array_ptr += len; 7026 sb_array_offset += len; 7027 cur_offset += len; 7028 } 7029 clear_extent_buffer_uptodate(sb); 7030 free_extent_buffer_stale(sb); 7031 return ret; 7032 7033 out_short_read: 7034 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7035 len, cur_offset); 7036 clear_extent_buffer_uptodate(sb); 7037 free_extent_buffer_stale(sb); 7038 return -EIO; 7039 } 7040 7041 /* 7042 * Check if all chunks in the fs are OK for read-write degraded mount 7043 * 7044 * If the @failing_dev is specified, it's accounted as missing. 7045 * 7046 * Return true if all chunks meet the minimal RW mount requirements. 7047 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7048 */ 7049 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7050 struct btrfs_device *failing_dev) 7051 { 7052 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7053 struct extent_map *em; 7054 u64 next_start = 0; 7055 bool ret = true; 7056 7057 read_lock(&map_tree->lock); 7058 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7059 read_unlock(&map_tree->lock); 7060 /* No chunk at all? Return false anyway */ 7061 if (!em) { 7062 ret = false; 7063 goto out; 7064 } 7065 while (em) { 7066 struct map_lookup *map; 7067 int missing = 0; 7068 int max_tolerated; 7069 int i; 7070 7071 map = em->map_lookup; 7072 max_tolerated = 7073 btrfs_get_num_tolerated_disk_barrier_failures( 7074 map->type); 7075 for (i = 0; i < map->num_stripes; i++) { 7076 struct btrfs_device *dev = map->stripes[i].dev; 7077 7078 if (!dev || !dev->bdev || 7079 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7080 dev->last_flush_error) 7081 missing++; 7082 else if (failing_dev && failing_dev == dev) 7083 missing++; 7084 } 7085 if (missing > max_tolerated) { 7086 if (!failing_dev) 7087 btrfs_warn(fs_info, 7088 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7089 em->start, missing, max_tolerated); 7090 free_extent_map(em); 7091 ret = false; 7092 goto out; 7093 } 7094 next_start = extent_map_end(em); 7095 free_extent_map(em); 7096 7097 read_lock(&map_tree->lock); 7098 em = lookup_extent_mapping(map_tree, next_start, 7099 (u64)(-1) - next_start); 7100 read_unlock(&map_tree->lock); 7101 } 7102 out: 7103 return ret; 7104 } 7105 7106 static void readahead_tree_node_children(struct extent_buffer *node) 7107 { 7108 int i; 7109 const int nr_items = btrfs_header_nritems(node); 7110 7111 for (i = 0; i < nr_items; i++) 7112 btrfs_readahead_node_child(node, i); 7113 } 7114 7115 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7116 { 7117 struct btrfs_root *root = fs_info->chunk_root; 7118 struct btrfs_path *path; 7119 struct extent_buffer *leaf; 7120 struct btrfs_key key; 7121 struct btrfs_key found_key; 7122 int ret; 7123 int slot; 7124 u64 total_dev = 0; 7125 u64 last_ra_node = 0; 7126 7127 path = btrfs_alloc_path(); 7128 if (!path) 7129 return -ENOMEM; 7130 7131 /* 7132 * uuid_mutex is needed only if we are mounting a sprout FS 7133 * otherwise we don't need it. 7134 */ 7135 mutex_lock(&uuid_mutex); 7136 7137 /* 7138 * It is possible for mount and umount to race in such a way that 7139 * we execute this code path, but open_fs_devices failed to clear 7140 * total_rw_bytes. We certainly want it cleared before reading the 7141 * device items, so clear it here. 7142 */ 7143 fs_info->fs_devices->total_rw_bytes = 0; 7144 7145 /* 7146 * Read all device items, and then all the chunk items. All 7147 * device items are found before any chunk item (their object id 7148 * is smaller than the lowest possible object id for a chunk 7149 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7150 */ 7151 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7152 key.offset = 0; 7153 key.type = 0; 7154 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7155 if (ret < 0) 7156 goto error; 7157 while (1) { 7158 struct extent_buffer *node; 7159 7160 leaf = path->nodes[0]; 7161 slot = path->slots[0]; 7162 if (slot >= btrfs_header_nritems(leaf)) { 7163 ret = btrfs_next_leaf(root, path); 7164 if (ret == 0) 7165 continue; 7166 if (ret < 0) 7167 goto error; 7168 break; 7169 } 7170 /* 7171 * The nodes on level 1 are not locked but we don't need to do 7172 * that during mount time as nothing else can access the tree 7173 */ 7174 node = path->nodes[1]; 7175 if (node) { 7176 if (last_ra_node != node->start) { 7177 readahead_tree_node_children(node); 7178 last_ra_node = node->start; 7179 } 7180 } 7181 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7182 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7183 struct btrfs_dev_item *dev_item; 7184 dev_item = btrfs_item_ptr(leaf, slot, 7185 struct btrfs_dev_item); 7186 ret = read_one_dev(leaf, dev_item); 7187 if (ret) 7188 goto error; 7189 total_dev++; 7190 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7191 struct btrfs_chunk *chunk; 7192 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7193 mutex_lock(&fs_info->chunk_mutex); 7194 ret = read_one_chunk(&found_key, leaf, chunk); 7195 mutex_unlock(&fs_info->chunk_mutex); 7196 if (ret) 7197 goto error; 7198 } 7199 path->slots[0]++; 7200 } 7201 7202 /* 7203 * After loading chunk tree, we've got all device information, 7204 * do another round of validation checks. 7205 */ 7206 if (total_dev != fs_info->fs_devices->total_devices) { 7207 btrfs_err(fs_info, 7208 "super_num_devices %llu mismatch with num_devices %llu found here", 7209 btrfs_super_num_devices(fs_info->super_copy), 7210 total_dev); 7211 ret = -EINVAL; 7212 goto error; 7213 } 7214 if (btrfs_super_total_bytes(fs_info->super_copy) < 7215 fs_info->fs_devices->total_rw_bytes) { 7216 btrfs_err(fs_info, 7217 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7218 btrfs_super_total_bytes(fs_info->super_copy), 7219 fs_info->fs_devices->total_rw_bytes); 7220 ret = -EINVAL; 7221 goto error; 7222 } 7223 ret = 0; 7224 error: 7225 mutex_unlock(&uuid_mutex); 7226 7227 btrfs_free_path(path); 7228 return ret; 7229 } 7230 7231 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7232 { 7233 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7234 struct btrfs_device *device; 7235 7236 fs_devices->fs_info = fs_info; 7237 7238 mutex_lock(&fs_devices->device_list_mutex); 7239 list_for_each_entry(device, &fs_devices->devices, dev_list) 7240 device->fs_info = fs_info; 7241 7242 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7243 list_for_each_entry(device, &seed_devs->devices, dev_list) 7244 device->fs_info = fs_info; 7245 7246 seed_devs->fs_info = fs_info; 7247 } 7248 mutex_unlock(&fs_devices->device_list_mutex); 7249 } 7250 7251 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7252 const struct btrfs_dev_stats_item *ptr, 7253 int index) 7254 { 7255 u64 val; 7256 7257 read_extent_buffer(eb, &val, 7258 offsetof(struct btrfs_dev_stats_item, values) + 7259 ((unsigned long)ptr) + (index * sizeof(u64)), 7260 sizeof(val)); 7261 return val; 7262 } 7263 7264 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7265 struct btrfs_dev_stats_item *ptr, 7266 int index, u64 val) 7267 { 7268 write_extent_buffer(eb, &val, 7269 offsetof(struct btrfs_dev_stats_item, values) + 7270 ((unsigned long)ptr) + (index * sizeof(u64)), 7271 sizeof(val)); 7272 } 7273 7274 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7275 struct btrfs_path *path) 7276 { 7277 struct btrfs_dev_stats_item *ptr; 7278 struct extent_buffer *eb; 7279 struct btrfs_key key; 7280 int item_size; 7281 int i, ret, slot; 7282 7283 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7284 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7285 key.offset = device->devid; 7286 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7287 if (ret) { 7288 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7289 btrfs_dev_stat_set(device, i, 0); 7290 device->dev_stats_valid = 1; 7291 btrfs_release_path(path); 7292 return ret < 0 ? ret : 0; 7293 } 7294 slot = path->slots[0]; 7295 eb = path->nodes[0]; 7296 item_size = btrfs_item_size_nr(eb, slot); 7297 7298 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7299 7300 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7301 if (item_size >= (1 + i) * sizeof(__le64)) 7302 btrfs_dev_stat_set(device, i, 7303 btrfs_dev_stats_value(eb, ptr, i)); 7304 else 7305 btrfs_dev_stat_set(device, i, 0); 7306 } 7307 7308 device->dev_stats_valid = 1; 7309 btrfs_dev_stat_print_on_load(device); 7310 btrfs_release_path(path); 7311 7312 return 0; 7313 } 7314 7315 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7316 { 7317 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7318 struct btrfs_device *device; 7319 struct btrfs_path *path = NULL; 7320 int ret = 0; 7321 7322 path = btrfs_alloc_path(); 7323 if (!path) 7324 return -ENOMEM; 7325 7326 mutex_lock(&fs_devices->device_list_mutex); 7327 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7328 ret = btrfs_device_init_dev_stats(device, path); 7329 if (ret) 7330 goto out; 7331 } 7332 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7333 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7334 ret = btrfs_device_init_dev_stats(device, path); 7335 if (ret) 7336 goto out; 7337 } 7338 } 7339 out: 7340 mutex_unlock(&fs_devices->device_list_mutex); 7341 7342 btrfs_free_path(path); 7343 return ret; 7344 } 7345 7346 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7347 struct btrfs_device *device) 7348 { 7349 struct btrfs_fs_info *fs_info = trans->fs_info; 7350 struct btrfs_root *dev_root = fs_info->dev_root; 7351 struct btrfs_path *path; 7352 struct btrfs_key key; 7353 struct extent_buffer *eb; 7354 struct btrfs_dev_stats_item *ptr; 7355 int ret; 7356 int i; 7357 7358 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7359 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7360 key.offset = device->devid; 7361 7362 path = btrfs_alloc_path(); 7363 if (!path) 7364 return -ENOMEM; 7365 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7366 if (ret < 0) { 7367 btrfs_warn_in_rcu(fs_info, 7368 "error %d while searching for dev_stats item for device %s", 7369 ret, rcu_str_deref(device->name)); 7370 goto out; 7371 } 7372 7373 if (ret == 0 && 7374 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7375 /* need to delete old one and insert a new one */ 7376 ret = btrfs_del_item(trans, dev_root, path); 7377 if (ret != 0) { 7378 btrfs_warn_in_rcu(fs_info, 7379 "delete too small dev_stats item for device %s failed %d", 7380 rcu_str_deref(device->name), ret); 7381 goto out; 7382 } 7383 ret = 1; 7384 } 7385 7386 if (ret == 1) { 7387 /* need to insert a new item */ 7388 btrfs_release_path(path); 7389 ret = btrfs_insert_empty_item(trans, dev_root, path, 7390 &key, sizeof(*ptr)); 7391 if (ret < 0) { 7392 btrfs_warn_in_rcu(fs_info, 7393 "insert dev_stats item for device %s failed %d", 7394 rcu_str_deref(device->name), ret); 7395 goto out; 7396 } 7397 } 7398 7399 eb = path->nodes[0]; 7400 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7401 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7402 btrfs_set_dev_stats_value(eb, ptr, i, 7403 btrfs_dev_stat_read(device, i)); 7404 btrfs_mark_buffer_dirty(eb); 7405 7406 out: 7407 btrfs_free_path(path); 7408 return ret; 7409 } 7410 7411 /* 7412 * called from commit_transaction. Writes all changed device stats to disk. 7413 */ 7414 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7415 { 7416 struct btrfs_fs_info *fs_info = trans->fs_info; 7417 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7418 struct btrfs_device *device; 7419 int stats_cnt; 7420 int ret = 0; 7421 7422 mutex_lock(&fs_devices->device_list_mutex); 7423 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7424 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7425 if (!device->dev_stats_valid || stats_cnt == 0) 7426 continue; 7427 7428 7429 /* 7430 * There is a LOAD-LOAD control dependency between the value of 7431 * dev_stats_ccnt and updating the on-disk values which requires 7432 * reading the in-memory counters. Such control dependencies 7433 * require explicit read memory barriers. 7434 * 7435 * This memory barriers pairs with smp_mb__before_atomic in 7436 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7437 * barrier implied by atomic_xchg in 7438 * btrfs_dev_stats_read_and_reset 7439 */ 7440 smp_rmb(); 7441 7442 ret = update_dev_stat_item(trans, device); 7443 if (!ret) 7444 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7445 } 7446 mutex_unlock(&fs_devices->device_list_mutex); 7447 7448 return ret; 7449 } 7450 7451 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7452 { 7453 btrfs_dev_stat_inc(dev, index); 7454 btrfs_dev_stat_print_on_error(dev); 7455 } 7456 7457 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7458 { 7459 if (!dev->dev_stats_valid) 7460 return; 7461 btrfs_err_rl_in_rcu(dev->fs_info, 7462 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7463 rcu_str_deref(dev->name), 7464 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7465 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7466 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7467 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7468 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7469 } 7470 7471 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7472 { 7473 int i; 7474 7475 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7476 if (btrfs_dev_stat_read(dev, i) != 0) 7477 break; 7478 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7479 return; /* all values == 0, suppress message */ 7480 7481 btrfs_info_in_rcu(dev->fs_info, 7482 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7483 rcu_str_deref(dev->name), 7484 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7485 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7486 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7487 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7488 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7489 } 7490 7491 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7492 struct btrfs_ioctl_get_dev_stats *stats) 7493 { 7494 struct btrfs_device *dev; 7495 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7496 int i; 7497 7498 mutex_lock(&fs_devices->device_list_mutex); 7499 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL); 7500 mutex_unlock(&fs_devices->device_list_mutex); 7501 7502 if (!dev) { 7503 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7504 return -ENODEV; 7505 } else if (!dev->dev_stats_valid) { 7506 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7507 return -ENODEV; 7508 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7509 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7510 if (stats->nr_items > i) 7511 stats->values[i] = 7512 btrfs_dev_stat_read_and_reset(dev, i); 7513 else 7514 btrfs_dev_stat_set(dev, i, 0); 7515 } 7516 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7517 current->comm, task_pid_nr(current)); 7518 } else { 7519 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7520 if (stats->nr_items > i) 7521 stats->values[i] = btrfs_dev_stat_read(dev, i); 7522 } 7523 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7524 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7525 return 0; 7526 } 7527 7528 /* 7529 * Update the size and bytes used for each device where it changed. This is 7530 * delayed since we would otherwise get errors while writing out the 7531 * superblocks. 7532 * 7533 * Must be invoked during transaction commit. 7534 */ 7535 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7536 { 7537 struct btrfs_device *curr, *next; 7538 7539 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7540 7541 if (list_empty(&trans->dev_update_list)) 7542 return; 7543 7544 /* 7545 * We don't need the device_list_mutex here. This list is owned by the 7546 * transaction and the transaction must complete before the device is 7547 * released. 7548 */ 7549 mutex_lock(&trans->fs_info->chunk_mutex); 7550 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7551 post_commit_list) { 7552 list_del_init(&curr->post_commit_list); 7553 curr->commit_total_bytes = curr->disk_total_bytes; 7554 curr->commit_bytes_used = curr->bytes_used; 7555 } 7556 mutex_unlock(&trans->fs_info->chunk_mutex); 7557 } 7558 7559 /* 7560 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7561 */ 7562 int btrfs_bg_type_to_factor(u64 flags) 7563 { 7564 const int index = btrfs_bg_flags_to_raid_index(flags); 7565 7566 return btrfs_raid_array[index].ncopies; 7567 } 7568 7569 7570 7571 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7572 u64 chunk_offset, u64 devid, 7573 u64 physical_offset, u64 physical_len) 7574 { 7575 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7576 struct extent_map *em; 7577 struct map_lookup *map; 7578 struct btrfs_device *dev; 7579 u64 stripe_len; 7580 bool found = false; 7581 int ret = 0; 7582 int i; 7583 7584 read_lock(&em_tree->lock); 7585 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7586 read_unlock(&em_tree->lock); 7587 7588 if (!em) { 7589 btrfs_err(fs_info, 7590 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7591 physical_offset, devid); 7592 ret = -EUCLEAN; 7593 goto out; 7594 } 7595 7596 map = em->map_lookup; 7597 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 7598 if (physical_len != stripe_len) { 7599 btrfs_err(fs_info, 7600 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7601 physical_offset, devid, em->start, physical_len, 7602 stripe_len); 7603 ret = -EUCLEAN; 7604 goto out; 7605 } 7606 7607 for (i = 0; i < map->num_stripes; i++) { 7608 if (map->stripes[i].dev->devid == devid && 7609 map->stripes[i].physical == physical_offset) { 7610 found = true; 7611 if (map->verified_stripes >= map->num_stripes) { 7612 btrfs_err(fs_info, 7613 "too many dev extents for chunk %llu found", 7614 em->start); 7615 ret = -EUCLEAN; 7616 goto out; 7617 } 7618 map->verified_stripes++; 7619 break; 7620 } 7621 } 7622 if (!found) { 7623 btrfs_err(fs_info, 7624 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7625 physical_offset, devid); 7626 ret = -EUCLEAN; 7627 } 7628 7629 /* Make sure no dev extent is beyond device bondary */ 7630 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); 7631 if (!dev) { 7632 btrfs_err(fs_info, "failed to find devid %llu", devid); 7633 ret = -EUCLEAN; 7634 goto out; 7635 } 7636 7637 if (physical_offset + physical_len > dev->disk_total_bytes) { 7638 btrfs_err(fs_info, 7639 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7640 devid, physical_offset, physical_len, 7641 dev->disk_total_bytes); 7642 ret = -EUCLEAN; 7643 goto out; 7644 } 7645 out: 7646 free_extent_map(em); 7647 return ret; 7648 } 7649 7650 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7651 { 7652 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7653 struct extent_map *em; 7654 struct rb_node *node; 7655 int ret = 0; 7656 7657 read_lock(&em_tree->lock); 7658 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7659 em = rb_entry(node, struct extent_map, rb_node); 7660 if (em->map_lookup->num_stripes != 7661 em->map_lookup->verified_stripes) { 7662 btrfs_err(fs_info, 7663 "chunk %llu has missing dev extent, have %d expect %d", 7664 em->start, em->map_lookup->verified_stripes, 7665 em->map_lookup->num_stripes); 7666 ret = -EUCLEAN; 7667 goto out; 7668 } 7669 } 7670 out: 7671 read_unlock(&em_tree->lock); 7672 return ret; 7673 } 7674 7675 /* 7676 * Ensure that all dev extents are mapped to correct chunk, otherwise 7677 * later chunk allocation/free would cause unexpected behavior. 7678 * 7679 * NOTE: This will iterate through the whole device tree, which should be of 7680 * the same size level as the chunk tree. This slightly increases mount time. 7681 */ 7682 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7683 { 7684 struct btrfs_path *path; 7685 struct btrfs_root *root = fs_info->dev_root; 7686 struct btrfs_key key; 7687 u64 prev_devid = 0; 7688 u64 prev_dev_ext_end = 0; 7689 int ret = 0; 7690 7691 /* 7692 * We don't have a dev_root because we mounted with ignorebadroots and 7693 * failed to load the root, so we want to skip the verification in this 7694 * case for sure. 7695 * 7696 * However if the dev root is fine, but the tree itself is corrupted 7697 * we'd still fail to mount. This verification is only to make sure 7698 * writes can happen safely, so instead just bypass this check 7699 * completely in the case of IGNOREBADROOTS. 7700 */ 7701 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 7702 return 0; 7703 7704 key.objectid = 1; 7705 key.type = BTRFS_DEV_EXTENT_KEY; 7706 key.offset = 0; 7707 7708 path = btrfs_alloc_path(); 7709 if (!path) 7710 return -ENOMEM; 7711 7712 path->reada = READA_FORWARD; 7713 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7714 if (ret < 0) 7715 goto out; 7716 7717 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7718 ret = btrfs_next_item(root, path); 7719 if (ret < 0) 7720 goto out; 7721 /* No dev extents at all? Not good */ 7722 if (ret > 0) { 7723 ret = -EUCLEAN; 7724 goto out; 7725 } 7726 } 7727 while (1) { 7728 struct extent_buffer *leaf = path->nodes[0]; 7729 struct btrfs_dev_extent *dext; 7730 int slot = path->slots[0]; 7731 u64 chunk_offset; 7732 u64 physical_offset; 7733 u64 physical_len; 7734 u64 devid; 7735 7736 btrfs_item_key_to_cpu(leaf, &key, slot); 7737 if (key.type != BTRFS_DEV_EXTENT_KEY) 7738 break; 7739 devid = key.objectid; 7740 physical_offset = key.offset; 7741 7742 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 7743 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 7744 physical_len = btrfs_dev_extent_length(leaf, dext); 7745 7746 /* Check if this dev extent overlaps with the previous one */ 7747 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 7748 btrfs_err(fs_info, 7749 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 7750 devid, physical_offset, prev_dev_ext_end); 7751 ret = -EUCLEAN; 7752 goto out; 7753 } 7754 7755 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 7756 physical_offset, physical_len); 7757 if (ret < 0) 7758 goto out; 7759 prev_devid = devid; 7760 prev_dev_ext_end = physical_offset + physical_len; 7761 7762 ret = btrfs_next_item(root, path); 7763 if (ret < 0) 7764 goto out; 7765 if (ret > 0) { 7766 ret = 0; 7767 break; 7768 } 7769 } 7770 7771 /* Ensure all chunks have corresponding dev extents */ 7772 ret = verify_chunk_dev_extent_mapping(fs_info); 7773 out: 7774 btrfs_free_path(path); 7775 return ret; 7776 } 7777 7778 /* 7779 * Check whether the given block group or device is pinned by any inode being 7780 * used as a swapfile. 7781 */ 7782 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 7783 { 7784 struct btrfs_swapfile_pin *sp; 7785 struct rb_node *node; 7786 7787 spin_lock(&fs_info->swapfile_pins_lock); 7788 node = fs_info->swapfile_pins.rb_node; 7789 while (node) { 7790 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 7791 if (ptr < sp->ptr) 7792 node = node->rb_left; 7793 else if (ptr > sp->ptr) 7794 node = node->rb_right; 7795 else 7796 break; 7797 } 7798 spin_unlock(&fs_info->swapfile_pins_lock); 7799 return node != NULL; 7800 } 7801