1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include "misc.h" 18 #include "ctree.h" 19 #include "extent_map.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "print-tree.h" 23 #include "volumes.h" 24 #include "raid56.h" 25 #include "async-thread.h" 26 #include "check-integrity.h" 27 #include "rcu-string.h" 28 #include "dev-replace.h" 29 #include "sysfs.h" 30 #include "tree-checker.h" 31 #include "space-info.h" 32 #include "block-group.h" 33 #include "discard.h" 34 35 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 36 [BTRFS_RAID_RAID10] = { 37 .sub_stripes = 2, 38 .dev_stripes = 1, 39 .devs_max = 0, /* 0 == as many as possible */ 40 .devs_min = 4, 41 .tolerated_failures = 1, 42 .devs_increment = 2, 43 .ncopies = 2, 44 .nparity = 0, 45 .raid_name = "raid10", 46 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 47 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 48 }, 49 [BTRFS_RAID_RAID1] = { 50 .sub_stripes = 1, 51 .dev_stripes = 1, 52 .devs_max = 2, 53 .devs_min = 2, 54 .tolerated_failures = 1, 55 .devs_increment = 2, 56 .ncopies = 2, 57 .nparity = 0, 58 .raid_name = "raid1", 59 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 60 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 61 }, 62 [BTRFS_RAID_RAID1C3] = { 63 .sub_stripes = 1, 64 .dev_stripes = 1, 65 .devs_max = 3, 66 .devs_min = 3, 67 .tolerated_failures = 2, 68 .devs_increment = 3, 69 .ncopies = 3, 70 .nparity = 0, 71 .raid_name = "raid1c3", 72 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 73 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 74 }, 75 [BTRFS_RAID_RAID1C4] = { 76 .sub_stripes = 1, 77 .dev_stripes = 1, 78 .devs_max = 4, 79 .devs_min = 4, 80 .tolerated_failures = 3, 81 .devs_increment = 4, 82 .ncopies = 4, 83 .nparity = 0, 84 .raid_name = "raid1c4", 85 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 86 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 87 }, 88 [BTRFS_RAID_DUP] = { 89 .sub_stripes = 1, 90 .dev_stripes = 2, 91 .devs_max = 1, 92 .devs_min = 1, 93 .tolerated_failures = 0, 94 .devs_increment = 1, 95 .ncopies = 2, 96 .nparity = 0, 97 .raid_name = "dup", 98 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 99 .mindev_error = 0, 100 }, 101 [BTRFS_RAID_RAID0] = { 102 .sub_stripes = 1, 103 .dev_stripes = 1, 104 .devs_max = 0, 105 .devs_min = 2, 106 .tolerated_failures = 0, 107 .devs_increment = 1, 108 .ncopies = 1, 109 .nparity = 0, 110 .raid_name = "raid0", 111 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 112 .mindev_error = 0, 113 }, 114 [BTRFS_RAID_SINGLE] = { 115 .sub_stripes = 1, 116 .dev_stripes = 1, 117 .devs_max = 1, 118 .devs_min = 1, 119 .tolerated_failures = 0, 120 .devs_increment = 1, 121 .ncopies = 1, 122 .nparity = 0, 123 .raid_name = "single", 124 .bg_flag = 0, 125 .mindev_error = 0, 126 }, 127 [BTRFS_RAID_RAID5] = { 128 .sub_stripes = 1, 129 .dev_stripes = 1, 130 .devs_max = 0, 131 .devs_min = 2, 132 .tolerated_failures = 1, 133 .devs_increment = 1, 134 .ncopies = 1, 135 .nparity = 1, 136 .raid_name = "raid5", 137 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 138 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 139 }, 140 [BTRFS_RAID_RAID6] = { 141 .sub_stripes = 1, 142 .dev_stripes = 1, 143 .devs_max = 0, 144 .devs_min = 3, 145 .tolerated_failures = 2, 146 .devs_increment = 1, 147 .ncopies = 1, 148 .nparity = 2, 149 .raid_name = "raid6", 150 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 151 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 152 }, 153 }; 154 155 const char *btrfs_bg_type_to_raid_name(u64 flags) 156 { 157 const int index = btrfs_bg_flags_to_raid_index(flags); 158 159 if (index >= BTRFS_NR_RAID_TYPES) 160 return NULL; 161 162 return btrfs_raid_array[index].raid_name; 163 } 164 165 /* 166 * Fill @buf with textual description of @bg_flags, no more than @size_buf 167 * bytes including terminating null byte. 168 */ 169 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 170 { 171 int i; 172 int ret; 173 char *bp = buf; 174 u64 flags = bg_flags; 175 u32 size_bp = size_buf; 176 177 if (!flags) { 178 strcpy(bp, "NONE"); 179 return; 180 } 181 182 #define DESCRIBE_FLAG(flag, desc) \ 183 do { \ 184 if (flags & (flag)) { \ 185 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 186 if (ret < 0 || ret >= size_bp) \ 187 goto out_overflow; \ 188 size_bp -= ret; \ 189 bp += ret; \ 190 flags &= ~(flag); \ 191 } \ 192 } while (0) 193 194 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 197 198 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 199 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 200 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 201 btrfs_raid_array[i].raid_name); 202 #undef DESCRIBE_FLAG 203 204 if (flags) { 205 ret = snprintf(bp, size_bp, "0x%llx|", flags); 206 size_bp -= ret; 207 } 208 209 if (size_bp < size_buf) 210 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 211 212 /* 213 * The text is trimmed, it's up to the caller to provide sufficiently 214 * large buffer 215 */ 216 out_overflow:; 217 } 218 219 static int init_first_rw_device(struct btrfs_trans_handle *trans); 220 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 221 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 222 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 223 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 224 enum btrfs_map_op op, 225 u64 logical, u64 *length, 226 struct btrfs_bio **bbio_ret, 227 int mirror_num, int need_raid_map); 228 229 /* 230 * Device locking 231 * ============== 232 * 233 * There are several mutexes that protect manipulation of devices and low-level 234 * structures like chunks but not block groups, extents or files 235 * 236 * uuid_mutex (global lock) 237 * ------------------------ 238 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 239 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 240 * device) or requested by the device= mount option 241 * 242 * the mutex can be very coarse and can cover long-running operations 243 * 244 * protects: updates to fs_devices counters like missing devices, rw devices, 245 * seeding, structure cloning, opening/closing devices at mount/umount time 246 * 247 * global::fs_devs - add, remove, updates to the global list 248 * 249 * does not protect: manipulation of the fs_devices::devices list in general 250 * but in mount context it could be used to exclude list modifications by eg. 251 * scan ioctl 252 * 253 * btrfs_device::name - renames (write side), read is RCU 254 * 255 * fs_devices::device_list_mutex (per-fs, with RCU) 256 * ------------------------------------------------ 257 * protects updates to fs_devices::devices, ie. adding and deleting 258 * 259 * simple list traversal with read-only actions can be done with RCU protection 260 * 261 * may be used to exclude some operations from running concurrently without any 262 * modifications to the list (see write_all_supers) 263 * 264 * Is not required at mount and close times, because our device list is 265 * protected by the uuid_mutex at that point. 266 * 267 * balance_mutex 268 * ------------- 269 * protects balance structures (status, state) and context accessed from 270 * several places (internally, ioctl) 271 * 272 * chunk_mutex 273 * ----------- 274 * protects chunks, adding or removing during allocation, trim or when a new 275 * device is added/removed. Additionally it also protects post_commit_list of 276 * individual devices, since they can be added to the transaction's 277 * post_commit_list only with chunk_mutex held. 278 * 279 * cleaner_mutex 280 * ------------- 281 * a big lock that is held by the cleaner thread and prevents running subvolume 282 * cleaning together with relocation or delayed iputs 283 * 284 * 285 * Lock nesting 286 * ============ 287 * 288 * uuid_mutex 289 * device_list_mutex 290 * chunk_mutex 291 * balance_mutex 292 * 293 * 294 * Exclusive operations, BTRFS_FS_EXCL_OP 295 * ====================================== 296 * 297 * Maintains the exclusivity of the following operations that apply to the 298 * whole filesystem and cannot run in parallel. 299 * 300 * - Balance (*) 301 * - Device add 302 * - Device remove 303 * - Device replace (*) 304 * - Resize 305 * 306 * The device operations (as above) can be in one of the following states: 307 * 308 * - Running state 309 * - Paused state 310 * - Completed state 311 * 312 * Only device operations marked with (*) can go into the Paused state for the 313 * following reasons: 314 * 315 * - ioctl (only Balance can be Paused through ioctl) 316 * - filesystem remounted as read-only 317 * - filesystem unmounted and mounted as read-only 318 * - system power-cycle and filesystem mounted as read-only 319 * - filesystem or device errors leading to forced read-only 320 * 321 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations. 322 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set. 323 * A device operation in Paused or Running state can be canceled or resumed 324 * either by ioctl (Balance only) or when remounted as read-write. 325 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or 326 * completed. 327 */ 328 329 DEFINE_MUTEX(uuid_mutex); 330 static LIST_HEAD(fs_uuids); 331 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 332 { 333 return &fs_uuids; 334 } 335 336 /* 337 * alloc_fs_devices - allocate struct btrfs_fs_devices 338 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 339 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 340 * 341 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 342 * The returned struct is not linked onto any lists and can be destroyed with 343 * kfree() right away. 344 */ 345 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 346 const u8 *metadata_fsid) 347 { 348 struct btrfs_fs_devices *fs_devs; 349 350 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 351 if (!fs_devs) 352 return ERR_PTR(-ENOMEM); 353 354 mutex_init(&fs_devs->device_list_mutex); 355 356 INIT_LIST_HEAD(&fs_devs->devices); 357 INIT_LIST_HEAD(&fs_devs->alloc_list); 358 INIT_LIST_HEAD(&fs_devs->fs_list); 359 if (fsid) 360 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 361 362 if (metadata_fsid) 363 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 364 else if (fsid) 365 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 366 367 return fs_devs; 368 } 369 370 void btrfs_free_device(struct btrfs_device *device) 371 { 372 WARN_ON(!list_empty(&device->post_commit_list)); 373 rcu_string_free(device->name); 374 extent_io_tree_release(&device->alloc_state); 375 bio_put(device->flush_bio); 376 kfree(device); 377 } 378 379 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 380 { 381 struct btrfs_device *device; 382 WARN_ON(fs_devices->opened); 383 while (!list_empty(&fs_devices->devices)) { 384 device = list_entry(fs_devices->devices.next, 385 struct btrfs_device, dev_list); 386 list_del(&device->dev_list); 387 btrfs_free_device(device); 388 } 389 kfree(fs_devices); 390 } 391 392 void __exit btrfs_cleanup_fs_uuids(void) 393 { 394 struct btrfs_fs_devices *fs_devices; 395 396 while (!list_empty(&fs_uuids)) { 397 fs_devices = list_entry(fs_uuids.next, 398 struct btrfs_fs_devices, fs_list); 399 list_del(&fs_devices->fs_list); 400 free_fs_devices(fs_devices); 401 } 402 } 403 404 /* 405 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. 406 * Returned struct is not linked onto any lists and must be destroyed using 407 * btrfs_free_device. 408 */ 409 static struct btrfs_device *__alloc_device(void) 410 { 411 struct btrfs_device *dev; 412 413 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 414 if (!dev) 415 return ERR_PTR(-ENOMEM); 416 417 /* 418 * Preallocate a bio that's always going to be used for flushing device 419 * barriers and matches the device lifespan 420 */ 421 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); 422 if (!dev->flush_bio) { 423 kfree(dev); 424 return ERR_PTR(-ENOMEM); 425 } 426 427 INIT_LIST_HEAD(&dev->dev_list); 428 INIT_LIST_HEAD(&dev->dev_alloc_list); 429 INIT_LIST_HEAD(&dev->post_commit_list); 430 431 atomic_set(&dev->reada_in_flight, 0); 432 atomic_set(&dev->dev_stats_ccnt, 0); 433 btrfs_device_data_ordered_init(dev); 434 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 435 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 436 extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL); 437 438 return dev; 439 } 440 441 static noinline struct btrfs_fs_devices *find_fsid( 442 const u8 *fsid, const u8 *metadata_fsid) 443 { 444 struct btrfs_fs_devices *fs_devices; 445 446 ASSERT(fsid); 447 448 /* Handle non-split brain cases */ 449 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 450 if (metadata_fsid) { 451 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 452 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 453 BTRFS_FSID_SIZE) == 0) 454 return fs_devices; 455 } else { 456 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 457 return fs_devices; 458 } 459 } 460 return NULL; 461 } 462 463 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 464 struct btrfs_super_block *disk_super) 465 { 466 467 struct btrfs_fs_devices *fs_devices; 468 469 /* 470 * Handle scanned device having completed its fsid change but 471 * belonging to a fs_devices that was created by first scanning 472 * a device which didn't have its fsid/metadata_uuid changed 473 * at all and the CHANGING_FSID_V2 flag set. 474 */ 475 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 476 if (fs_devices->fsid_change && 477 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 478 BTRFS_FSID_SIZE) == 0 && 479 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 480 BTRFS_FSID_SIZE) == 0) { 481 return fs_devices; 482 } 483 } 484 /* 485 * Handle scanned device having completed its fsid change but 486 * belonging to a fs_devices that was created by a device that 487 * has an outdated pair of fsid/metadata_uuid and 488 * CHANGING_FSID_V2 flag set. 489 */ 490 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 491 if (fs_devices->fsid_change && 492 memcmp(fs_devices->metadata_uuid, 493 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 494 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 495 BTRFS_FSID_SIZE) == 0) { 496 return fs_devices; 497 } 498 } 499 500 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 501 } 502 503 504 static int 505 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 506 int flush, struct block_device **bdev, 507 struct btrfs_super_block **disk_super) 508 { 509 int ret; 510 511 *bdev = blkdev_get_by_path(device_path, flags, holder); 512 513 if (IS_ERR(*bdev)) { 514 ret = PTR_ERR(*bdev); 515 goto error; 516 } 517 518 if (flush) 519 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 520 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 521 if (ret) { 522 blkdev_put(*bdev, flags); 523 goto error; 524 } 525 invalidate_bdev(*bdev); 526 *disk_super = btrfs_read_dev_super(*bdev); 527 if (IS_ERR(*disk_super)) { 528 ret = PTR_ERR(*disk_super); 529 blkdev_put(*bdev, flags); 530 goto error; 531 } 532 533 return 0; 534 535 error: 536 *bdev = NULL; 537 return ret; 538 } 539 540 static bool device_path_matched(const char *path, struct btrfs_device *device) 541 { 542 int found; 543 544 rcu_read_lock(); 545 found = strcmp(rcu_str_deref(device->name), path); 546 rcu_read_unlock(); 547 548 return found == 0; 549 } 550 551 /* 552 * Search and remove all stale (devices which are not mounted) devices. 553 * When both inputs are NULL, it will search and release all stale devices. 554 * path: Optional. When provided will it release all unmounted devices 555 * matching this path only. 556 * skip_dev: Optional. Will skip this device when searching for the stale 557 * devices. 558 * Return: 0 for success or if @path is NULL. 559 * -EBUSY if @path is a mounted device. 560 * -ENOENT if @path does not match any device in the list. 561 */ 562 static int btrfs_free_stale_devices(const char *path, 563 struct btrfs_device *skip_device) 564 { 565 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 566 struct btrfs_device *device, *tmp_device; 567 int ret = 0; 568 569 if (path) 570 ret = -ENOENT; 571 572 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 573 574 mutex_lock(&fs_devices->device_list_mutex); 575 list_for_each_entry_safe(device, tmp_device, 576 &fs_devices->devices, dev_list) { 577 if (skip_device && skip_device == device) 578 continue; 579 if (path && !device->name) 580 continue; 581 if (path && !device_path_matched(path, device)) 582 continue; 583 if (fs_devices->opened) { 584 /* for an already deleted device return 0 */ 585 if (path && ret != 0) 586 ret = -EBUSY; 587 break; 588 } 589 590 /* delete the stale device */ 591 fs_devices->num_devices--; 592 list_del(&device->dev_list); 593 btrfs_free_device(device); 594 595 ret = 0; 596 if (fs_devices->num_devices == 0) 597 break; 598 } 599 mutex_unlock(&fs_devices->device_list_mutex); 600 601 if (fs_devices->num_devices == 0) { 602 btrfs_sysfs_remove_fsid(fs_devices); 603 list_del(&fs_devices->fs_list); 604 free_fs_devices(fs_devices); 605 } 606 } 607 608 return ret; 609 } 610 611 /* 612 * This is only used on mount, and we are protected from competing things 613 * messing with our fs_devices by the uuid_mutex, thus we do not need the 614 * fs_devices->device_list_mutex here. 615 */ 616 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 617 struct btrfs_device *device, fmode_t flags, 618 void *holder) 619 { 620 struct request_queue *q; 621 struct block_device *bdev; 622 struct btrfs_super_block *disk_super; 623 u64 devid; 624 int ret; 625 626 if (device->bdev) 627 return -EINVAL; 628 if (!device->name) 629 return -EINVAL; 630 631 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 632 &bdev, &disk_super); 633 if (ret) 634 return ret; 635 636 devid = btrfs_stack_device_id(&disk_super->dev_item); 637 if (devid != device->devid) 638 goto error_free_page; 639 640 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 641 goto error_free_page; 642 643 device->generation = btrfs_super_generation(disk_super); 644 645 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 646 if (btrfs_super_incompat_flags(disk_super) & 647 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 648 pr_err( 649 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 650 goto error_free_page; 651 } 652 653 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 654 fs_devices->seeding = true; 655 } else { 656 if (bdev_read_only(bdev)) 657 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 658 else 659 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 660 } 661 662 q = bdev_get_queue(bdev); 663 if (!blk_queue_nonrot(q)) 664 fs_devices->rotating = true; 665 666 device->bdev = bdev; 667 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 668 device->mode = flags; 669 670 fs_devices->open_devices++; 671 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 672 device->devid != BTRFS_DEV_REPLACE_DEVID) { 673 fs_devices->rw_devices++; 674 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 675 } 676 btrfs_release_disk_super(disk_super); 677 678 return 0; 679 680 error_free_page: 681 btrfs_release_disk_super(disk_super); 682 blkdev_put(bdev, flags); 683 684 return -EINVAL; 685 } 686 687 /* 688 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 689 * being created with a disk that has already completed its fsid change. Such 690 * disk can belong to an fs which has its FSID changed or to one which doesn't. 691 * Handle both cases here. 692 */ 693 static struct btrfs_fs_devices *find_fsid_inprogress( 694 struct btrfs_super_block *disk_super) 695 { 696 struct btrfs_fs_devices *fs_devices; 697 698 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 699 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 700 BTRFS_FSID_SIZE) != 0 && 701 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 702 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 703 return fs_devices; 704 } 705 } 706 707 return find_fsid(disk_super->fsid, NULL); 708 } 709 710 711 static struct btrfs_fs_devices *find_fsid_changed( 712 struct btrfs_super_block *disk_super) 713 { 714 struct btrfs_fs_devices *fs_devices; 715 716 /* 717 * Handles the case where scanned device is part of an fs that had 718 * multiple successful changes of FSID but curently device didn't 719 * observe it. Meaning our fsid will be different than theirs. We need 720 * to handle two subcases : 721 * 1 - The fs still continues to have different METADATA/FSID uuids. 722 * 2 - The fs is switched back to its original FSID (METADATA/FSID 723 * are equal). 724 */ 725 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 726 /* Changed UUIDs */ 727 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 728 BTRFS_FSID_SIZE) != 0 && 729 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 730 BTRFS_FSID_SIZE) == 0 && 731 memcmp(fs_devices->fsid, disk_super->fsid, 732 BTRFS_FSID_SIZE) != 0) 733 return fs_devices; 734 735 /* Unchanged UUIDs */ 736 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 737 BTRFS_FSID_SIZE) == 0 && 738 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 739 BTRFS_FSID_SIZE) == 0) 740 return fs_devices; 741 } 742 743 return NULL; 744 } 745 746 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 747 struct btrfs_super_block *disk_super) 748 { 749 struct btrfs_fs_devices *fs_devices; 750 751 /* 752 * Handle the case where the scanned device is part of an fs whose last 753 * metadata UUID change reverted it to the original FSID. At the same 754 * time * fs_devices was first created by another constitutent device 755 * which didn't fully observe the operation. This results in an 756 * btrfs_fs_devices created with metadata/fsid different AND 757 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 758 * fs_devices equal to the FSID of the disk. 759 */ 760 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 761 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 762 BTRFS_FSID_SIZE) != 0 && 763 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 764 BTRFS_FSID_SIZE) == 0 && 765 fs_devices->fsid_change) 766 return fs_devices; 767 } 768 769 return NULL; 770 } 771 /* 772 * Add new device to list of registered devices 773 * 774 * Returns: 775 * device pointer which was just added or updated when successful 776 * error pointer when failed 777 */ 778 static noinline struct btrfs_device *device_list_add(const char *path, 779 struct btrfs_super_block *disk_super, 780 bool *new_device_added) 781 { 782 struct btrfs_device *device; 783 struct btrfs_fs_devices *fs_devices = NULL; 784 struct rcu_string *name; 785 u64 found_transid = btrfs_super_generation(disk_super); 786 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 787 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 788 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 789 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 790 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 791 792 if (fsid_change_in_progress) { 793 if (!has_metadata_uuid) 794 fs_devices = find_fsid_inprogress(disk_super); 795 else 796 fs_devices = find_fsid_changed(disk_super); 797 } else if (has_metadata_uuid) { 798 fs_devices = find_fsid_with_metadata_uuid(disk_super); 799 } else { 800 fs_devices = find_fsid_reverted_metadata(disk_super); 801 if (!fs_devices) 802 fs_devices = find_fsid(disk_super->fsid, NULL); 803 } 804 805 806 if (!fs_devices) { 807 if (has_metadata_uuid) 808 fs_devices = alloc_fs_devices(disk_super->fsid, 809 disk_super->metadata_uuid); 810 else 811 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 812 813 if (IS_ERR(fs_devices)) 814 return ERR_CAST(fs_devices); 815 816 fs_devices->fsid_change = fsid_change_in_progress; 817 818 mutex_lock(&fs_devices->device_list_mutex); 819 list_add(&fs_devices->fs_list, &fs_uuids); 820 821 device = NULL; 822 } else { 823 mutex_lock(&fs_devices->device_list_mutex); 824 device = btrfs_find_device(fs_devices, devid, 825 disk_super->dev_item.uuid, NULL, false); 826 827 /* 828 * If this disk has been pulled into an fs devices created by 829 * a device which had the CHANGING_FSID_V2 flag then replace the 830 * metadata_uuid/fsid values of the fs_devices. 831 */ 832 if (fs_devices->fsid_change && 833 found_transid > fs_devices->latest_generation) { 834 memcpy(fs_devices->fsid, disk_super->fsid, 835 BTRFS_FSID_SIZE); 836 837 if (has_metadata_uuid) 838 memcpy(fs_devices->metadata_uuid, 839 disk_super->metadata_uuid, 840 BTRFS_FSID_SIZE); 841 else 842 memcpy(fs_devices->metadata_uuid, 843 disk_super->fsid, BTRFS_FSID_SIZE); 844 845 fs_devices->fsid_change = false; 846 } 847 } 848 849 if (!device) { 850 if (fs_devices->opened) { 851 mutex_unlock(&fs_devices->device_list_mutex); 852 return ERR_PTR(-EBUSY); 853 } 854 855 device = btrfs_alloc_device(NULL, &devid, 856 disk_super->dev_item.uuid); 857 if (IS_ERR(device)) { 858 mutex_unlock(&fs_devices->device_list_mutex); 859 /* we can safely leave the fs_devices entry around */ 860 return device; 861 } 862 863 name = rcu_string_strdup(path, GFP_NOFS); 864 if (!name) { 865 btrfs_free_device(device); 866 mutex_unlock(&fs_devices->device_list_mutex); 867 return ERR_PTR(-ENOMEM); 868 } 869 rcu_assign_pointer(device->name, name); 870 871 list_add_rcu(&device->dev_list, &fs_devices->devices); 872 fs_devices->num_devices++; 873 874 device->fs_devices = fs_devices; 875 *new_device_added = true; 876 877 if (disk_super->label[0]) 878 pr_info( 879 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 880 disk_super->label, devid, found_transid, path, 881 current->comm, task_pid_nr(current)); 882 else 883 pr_info( 884 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 885 disk_super->fsid, devid, found_transid, path, 886 current->comm, task_pid_nr(current)); 887 888 } else if (!device->name || strcmp(device->name->str, path)) { 889 /* 890 * When FS is already mounted. 891 * 1. If you are here and if the device->name is NULL that 892 * means this device was missing at time of FS mount. 893 * 2. If you are here and if the device->name is different 894 * from 'path' that means either 895 * a. The same device disappeared and reappeared with 896 * different name. or 897 * b. The missing-disk-which-was-replaced, has 898 * reappeared now. 899 * 900 * We must allow 1 and 2a above. But 2b would be a spurious 901 * and unintentional. 902 * 903 * Further in case of 1 and 2a above, the disk at 'path' 904 * would have missed some transaction when it was away and 905 * in case of 2a the stale bdev has to be updated as well. 906 * 2b must not be allowed at all time. 907 */ 908 909 /* 910 * For now, we do allow update to btrfs_fs_device through the 911 * btrfs dev scan cli after FS has been mounted. We're still 912 * tracking a problem where systems fail mount by subvolume id 913 * when we reject replacement on a mounted FS. 914 */ 915 if (!fs_devices->opened && found_transid < device->generation) { 916 /* 917 * That is if the FS is _not_ mounted and if you 918 * are here, that means there is more than one 919 * disk with same uuid and devid.We keep the one 920 * with larger generation number or the last-in if 921 * generation are equal. 922 */ 923 mutex_unlock(&fs_devices->device_list_mutex); 924 return ERR_PTR(-EEXIST); 925 } 926 927 /* 928 * We are going to replace the device path for a given devid, 929 * make sure it's the same device if the device is mounted 930 */ 931 if (device->bdev) { 932 struct block_device *path_bdev; 933 934 path_bdev = lookup_bdev(path); 935 if (IS_ERR(path_bdev)) { 936 mutex_unlock(&fs_devices->device_list_mutex); 937 return ERR_CAST(path_bdev); 938 } 939 940 if (device->bdev != path_bdev) { 941 bdput(path_bdev); 942 mutex_unlock(&fs_devices->device_list_mutex); 943 btrfs_warn_in_rcu(device->fs_info, 944 "duplicate device fsid:devid for %pU:%llu old:%s new:%s", 945 disk_super->fsid, devid, 946 rcu_str_deref(device->name), path); 947 return ERR_PTR(-EEXIST); 948 } 949 bdput(path_bdev); 950 btrfs_info_in_rcu(device->fs_info, 951 "device fsid %pU devid %llu moved old:%s new:%s", 952 disk_super->fsid, devid, 953 rcu_str_deref(device->name), path); 954 } 955 956 name = rcu_string_strdup(path, GFP_NOFS); 957 if (!name) { 958 mutex_unlock(&fs_devices->device_list_mutex); 959 return ERR_PTR(-ENOMEM); 960 } 961 rcu_string_free(device->name); 962 rcu_assign_pointer(device->name, name); 963 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 964 fs_devices->missing_devices--; 965 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 966 } 967 } 968 969 /* 970 * Unmount does not free the btrfs_device struct but would zero 971 * generation along with most of the other members. So just update 972 * it back. We need it to pick the disk with largest generation 973 * (as above). 974 */ 975 if (!fs_devices->opened) { 976 device->generation = found_transid; 977 fs_devices->latest_generation = max_t(u64, found_transid, 978 fs_devices->latest_generation); 979 } 980 981 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 982 983 mutex_unlock(&fs_devices->device_list_mutex); 984 return device; 985 } 986 987 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 988 { 989 struct btrfs_fs_devices *fs_devices; 990 struct btrfs_device *device; 991 struct btrfs_device *orig_dev; 992 int ret = 0; 993 994 fs_devices = alloc_fs_devices(orig->fsid, NULL); 995 if (IS_ERR(fs_devices)) 996 return fs_devices; 997 998 mutex_lock(&orig->device_list_mutex); 999 fs_devices->total_devices = orig->total_devices; 1000 1001 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1002 struct rcu_string *name; 1003 1004 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1005 orig_dev->uuid); 1006 if (IS_ERR(device)) { 1007 ret = PTR_ERR(device); 1008 goto error; 1009 } 1010 1011 /* 1012 * This is ok to do without rcu read locked because we hold the 1013 * uuid mutex so nothing we touch in here is going to disappear. 1014 */ 1015 if (orig_dev->name) { 1016 name = rcu_string_strdup(orig_dev->name->str, 1017 GFP_KERNEL); 1018 if (!name) { 1019 btrfs_free_device(device); 1020 ret = -ENOMEM; 1021 goto error; 1022 } 1023 rcu_assign_pointer(device->name, name); 1024 } 1025 1026 list_add(&device->dev_list, &fs_devices->devices); 1027 device->fs_devices = fs_devices; 1028 fs_devices->num_devices++; 1029 } 1030 mutex_unlock(&orig->device_list_mutex); 1031 return fs_devices; 1032 error: 1033 mutex_unlock(&orig->device_list_mutex); 1034 free_fs_devices(fs_devices); 1035 return ERR_PTR(ret); 1036 } 1037 1038 /* 1039 * After we have read the system tree and know devids belonging to 1040 * this filesystem, remove the device which does not belong there. 1041 */ 1042 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step) 1043 { 1044 struct btrfs_device *device, *next; 1045 struct btrfs_device *latest_dev = NULL; 1046 1047 mutex_lock(&uuid_mutex); 1048 again: 1049 /* This is the initialized path, it is safe to release the devices. */ 1050 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1051 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 1052 &device->dev_state)) { 1053 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1054 &device->dev_state) && 1055 !test_bit(BTRFS_DEV_STATE_MISSING, 1056 &device->dev_state) && 1057 (!latest_dev || 1058 device->generation > latest_dev->generation)) { 1059 latest_dev = device; 1060 } 1061 continue; 1062 } 1063 1064 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 1065 /* 1066 * In the first step, keep the device which has 1067 * the correct fsid and the devid that is used 1068 * for the dev_replace procedure. 1069 * In the second step, the dev_replace state is 1070 * read from the device tree and it is known 1071 * whether the procedure is really active or 1072 * not, which means whether this device is 1073 * used or whether it should be removed. 1074 */ 1075 if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1076 &device->dev_state)) { 1077 continue; 1078 } 1079 } 1080 if (device->bdev) { 1081 blkdev_put(device->bdev, device->mode); 1082 device->bdev = NULL; 1083 fs_devices->open_devices--; 1084 } 1085 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1086 list_del_init(&device->dev_alloc_list); 1087 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1088 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1089 &device->dev_state)) 1090 fs_devices->rw_devices--; 1091 } 1092 list_del_init(&device->dev_list); 1093 fs_devices->num_devices--; 1094 btrfs_free_device(device); 1095 } 1096 1097 if (fs_devices->seed) { 1098 fs_devices = fs_devices->seed; 1099 goto again; 1100 } 1101 1102 fs_devices->latest_bdev = latest_dev->bdev; 1103 1104 mutex_unlock(&uuid_mutex); 1105 } 1106 1107 static void btrfs_close_bdev(struct btrfs_device *device) 1108 { 1109 if (!device->bdev) 1110 return; 1111 1112 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1113 sync_blockdev(device->bdev); 1114 invalidate_bdev(device->bdev); 1115 } 1116 1117 blkdev_put(device->bdev, device->mode); 1118 } 1119 1120 static void btrfs_close_one_device(struct btrfs_device *device) 1121 { 1122 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1123 1124 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1125 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1126 list_del_init(&device->dev_alloc_list); 1127 fs_devices->rw_devices--; 1128 } 1129 1130 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1131 fs_devices->missing_devices--; 1132 1133 btrfs_close_bdev(device); 1134 if (device->bdev) { 1135 fs_devices->open_devices--; 1136 device->bdev = NULL; 1137 } 1138 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1139 1140 device->fs_info = NULL; 1141 atomic_set(&device->dev_stats_ccnt, 0); 1142 extent_io_tree_release(&device->alloc_state); 1143 1144 /* Verify the device is back in a pristine state */ 1145 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1146 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1147 ASSERT(list_empty(&device->dev_alloc_list)); 1148 ASSERT(list_empty(&device->post_commit_list)); 1149 ASSERT(atomic_read(&device->reada_in_flight) == 0); 1150 } 1151 1152 static int close_fs_devices(struct btrfs_fs_devices *fs_devices) 1153 { 1154 struct btrfs_device *device, *tmp; 1155 1156 if (--fs_devices->opened > 0) 1157 return 0; 1158 1159 mutex_lock(&fs_devices->device_list_mutex); 1160 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 1161 btrfs_close_one_device(device); 1162 } 1163 mutex_unlock(&fs_devices->device_list_mutex); 1164 1165 WARN_ON(fs_devices->open_devices); 1166 WARN_ON(fs_devices->rw_devices); 1167 fs_devices->opened = 0; 1168 fs_devices->seeding = false; 1169 1170 return 0; 1171 } 1172 1173 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1174 { 1175 struct btrfs_fs_devices *seed_devices = NULL; 1176 int ret; 1177 1178 mutex_lock(&uuid_mutex); 1179 ret = close_fs_devices(fs_devices); 1180 if (!fs_devices->opened) { 1181 seed_devices = fs_devices->seed; 1182 fs_devices->seed = NULL; 1183 } 1184 mutex_unlock(&uuid_mutex); 1185 1186 while (seed_devices) { 1187 fs_devices = seed_devices; 1188 seed_devices = fs_devices->seed; 1189 close_fs_devices(fs_devices); 1190 free_fs_devices(fs_devices); 1191 } 1192 return ret; 1193 } 1194 1195 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1196 fmode_t flags, void *holder) 1197 { 1198 struct btrfs_device *device; 1199 struct btrfs_device *latest_dev = NULL; 1200 1201 flags |= FMODE_EXCL; 1202 1203 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1204 /* Just open everything we can; ignore failures here */ 1205 if (btrfs_open_one_device(fs_devices, device, flags, holder)) 1206 continue; 1207 1208 if (!latest_dev || 1209 device->generation > latest_dev->generation) 1210 latest_dev = device; 1211 } 1212 if (fs_devices->open_devices == 0) 1213 return -EINVAL; 1214 1215 fs_devices->opened = 1; 1216 fs_devices->latest_bdev = latest_dev->bdev; 1217 fs_devices->total_rw_bytes = 0; 1218 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1219 1220 return 0; 1221 } 1222 1223 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) 1224 { 1225 struct btrfs_device *dev1, *dev2; 1226 1227 dev1 = list_entry(a, struct btrfs_device, dev_list); 1228 dev2 = list_entry(b, struct btrfs_device, dev_list); 1229 1230 if (dev1->devid < dev2->devid) 1231 return -1; 1232 else if (dev1->devid > dev2->devid) 1233 return 1; 1234 return 0; 1235 } 1236 1237 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1238 fmode_t flags, void *holder) 1239 { 1240 int ret; 1241 1242 lockdep_assert_held(&uuid_mutex); 1243 /* 1244 * The device_list_mutex cannot be taken here in case opening the 1245 * underlying device takes further locks like bd_mutex. 1246 * 1247 * We also don't need the lock here as this is called during mount and 1248 * exclusion is provided by uuid_mutex 1249 */ 1250 1251 if (fs_devices->opened) { 1252 fs_devices->opened++; 1253 ret = 0; 1254 } else { 1255 list_sort(NULL, &fs_devices->devices, devid_cmp); 1256 ret = open_fs_devices(fs_devices, flags, holder); 1257 } 1258 1259 return ret; 1260 } 1261 1262 void btrfs_release_disk_super(struct btrfs_super_block *super) 1263 { 1264 struct page *page = virt_to_page(super); 1265 1266 put_page(page); 1267 } 1268 1269 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1270 u64 bytenr) 1271 { 1272 struct btrfs_super_block *disk_super; 1273 struct page *page; 1274 void *p; 1275 pgoff_t index; 1276 1277 /* make sure our super fits in the device */ 1278 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1279 return ERR_PTR(-EINVAL); 1280 1281 /* make sure our super fits in the page */ 1282 if (sizeof(*disk_super) > PAGE_SIZE) 1283 return ERR_PTR(-EINVAL); 1284 1285 /* make sure our super doesn't straddle pages on disk */ 1286 index = bytenr >> PAGE_SHIFT; 1287 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1288 return ERR_PTR(-EINVAL); 1289 1290 /* pull in the page with our super */ 1291 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1292 1293 if (IS_ERR(page)) 1294 return ERR_CAST(page); 1295 1296 p = page_address(page); 1297 1298 /* align our pointer to the offset of the super block */ 1299 disk_super = p + offset_in_page(bytenr); 1300 1301 if (btrfs_super_bytenr(disk_super) != bytenr || 1302 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1303 btrfs_release_disk_super(p); 1304 return ERR_PTR(-EINVAL); 1305 } 1306 1307 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1308 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1309 1310 return disk_super; 1311 } 1312 1313 int btrfs_forget_devices(const char *path) 1314 { 1315 int ret; 1316 1317 mutex_lock(&uuid_mutex); 1318 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1319 mutex_unlock(&uuid_mutex); 1320 1321 return ret; 1322 } 1323 1324 /* 1325 * Look for a btrfs signature on a device. This may be called out of the mount path 1326 * and we are not allowed to call set_blocksize during the scan. The superblock 1327 * is read via pagecache 1328 */ 1329 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1330 void *holder) 1331 { 1332 struct btrfs_super_block *disk_super; 1333 bool new_device_added = false; 1334 struct btrfs_device *device = NULL; 1335 struct block_device *bdev; 1336 u64 bytenr; 1337 1338 lockdep_assert_held(&uuid_mutex); 1339 1340 /* 1341 * we would like to check all the supers, but that would make 1342 * a btrfs mount succeed after a mkfs from a different FS. 1343 * So, we need to add a special mount option to scan for 1344 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1345 */ 1346 bytenr = btrfs_sb_offset(0); 1347 flags |= FMODE_EXCL; 1348 1349 bdev = blkdev_get_by_path(path, flags, holder); 1350 if (IS_ERR(bdev)) 1351 return ERR_CAST(bdev); 1352 1353 disk_super = btrfs_read_disk_super(bdev, bytenr); 1354 if (IS_ERR(disk_super)) { 1355 device = ERR_CAST(disk_super); 1356 goto error_bdev_put; 1357 } 1358 1359 device = device_list_add(path, disk_super, &new_device_added); 1360 if (!IS_ERR(device)) { 1361 if (new_device_added) 1362 btrfs_free_stale_devices(path, device); 1363 } 1364 1365 btrfs_release_disk_super(disk_super); 1366 1367 error_bdev_put: 1368 blkdev_put(bdev, flags); 1369 1370 return device; 1371 } 1372 1373 /* 1374 * Try to find a chunk that intersects [start, start + len] range and when one 1375 * such is found, record the end of it in *start 1376 */ 1377 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1378 u64 len) 1379 { 1380 u64 physical_start, physical_end; 1381 1382 lockdep_assert_held(&device->fs_info->chunk_mutex); 1383 1384 if (!find_first_extent_bit(&device->alloc_state, *start, 1385 &physical_start, &physical_end, 1386 CHUNK_ALLOCATED, NULL)) { 1387 1388 if (in_range(physical_start, *start, len) || 1389 in_range(*start, physical_start, 1390 physical_end - physical_start)) { 1391 *start = physical_end + 1; 1392 return true; 1393 } 1394 } 1395 return false; 1396 } 1397 1398 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1399 { 1400 switch (device->fs_devices->chunk_alloc_policy) { 1401 case BTRFS_CHUNK_ALLOC_REGULAR: 1402 /* 1403 * We don't want to overwrite the superblock on the drive nor 1404 * any area used by the boot loader (grub for example), so we 1405 * make sure to start at an offset of at least 1MB. 1406 */ 1407 return max_t(u64, start, SZ_1M); 1408 default: 1409 BUG(); 1410 } 1411 } 1412 1413 /** 1414 * dev_extent_hole_check - check if specified hole is suitable for allocation 1415 * @device: the device which we have the hole 1416 * @hole_start: starting position of the hole 1417 * @hole_size: the size of the hole 1418 * @num_bytes: the size of the free space that we need 1419 * 1420 * This function may modify @hole_start and @hole_end to reflect the suitable 1421 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1422 */ 1423 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1424 u64 *hole_size, u64 num_bytes) 1425 { 1426 bool changed = false; 1427 u64 hole_end = *hole_start + *hole_size; 1428 1429 /* 1430 * Check before we set max_hole_start, otherwise we could end up 1431 * sending back this offset anyway. 1432 */ 1433 if (contains_pending_extent(device, hole_start, *hole_size)) { 1434 if (hole_end >= *hole_start) 1435 *hole_size = hole_end - *hole_start; 1436 else 1437 *hole_size = 0; 1438 changed = true; 1439 } 1440 1441 switch (device->fs_devices->chunk_alloc_policy) { 1442 case BTRFS_CHUNK_ALLOC_REGULAR: 1443 /* No extra check */ 1444 break; 1445 default: 1446 BUG(); 1447 } 1448 1449 return changed; 1450 } 1451 1452 /* 1453 * find_free_dev_extent_start - find free space in the specified device 1454 * @device: the device which we search the free space in 1455 * @num_bytes: the size of the free space that we need 1456 * @search_start: the position from which to begin the search 1457 * @start: store the start of the free space. 1458 * @len: the size of the free space. that we find, or the size 1459 * of the max free space if we don't find suitable free space 1460 * 1461 * this uses a pretty simple search, the expectation is that it is 1462 * called very infrequently and that a given device has a small number 1463 * of extents 1464 * 1465 * @start is used to store the start of the free space if we find. But if we 1466 * don't find suitable free space, it will be used to store the start position 1467 * of the max free space. 1468 * 1469 * @len is used to store the size of the free space that we find. 1470 * But if we don't find suitable free space, it is used to store the size of 1471 * the max free space. 1472 * 1473 * NOTE: This function will search *commit* root of device tree, and does extra 1474 * check to ensure dev extents are not double allocated. 1475 * This makes the function safe to allocate dev extents but may not report 1476 * correct usable device space, as device extent freed in current transaction 1477 * is not reported as avaiable. 1478 */ 1479 static int find_free_dev_extent_start(struct btrfs_device *device, 1480 u64 num_bytes, u64 search_start, u64 *start, 1481 u64 *len) 1482 { 1483 struct btrfs_fs_info *fs_info = device->fs_info; 1484 struct btrfs_root *root = fs_info->dev_root; 1485 struct btrfs_key key; 1486 struct btrfs_dev_extent *dev_extent; 1487 struct btrfs_path *path; 1488 u64 hole_size; 1489 u64 max_hole_start; 1490 u64 max_hole_size; 1491 u64 extent_end; 1492 u64 search_end = device->total_bytes; 1493 int ret; 1494 int slot; 1495 struct extent_buffer *l; 1496 1497 search_start = dev_extent_search_start(device, search_start); 1498 1499 path = btrfs_alloc_path(); 1500 if (!path) 1501 return -ENOMEM; 1502 1503 max_hole_start = search_start; 1504 max_hole_size = 0; 1505 1506 again: 1507 if (search_start >= search_end || 1508 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1509 ret = -ENOSPC; 1510 goto out; 1511 } 1512 1513 path->reada = READA_FORWARD; 1514 path->search_commit_root = 1; 1515 path->skip_locking = 1; 1516 1517 key.objectid = device->devid; 1518 key.offset = search_start; 1519 key.type = BTRFS_DEV_EXTENT_KEY; 1520 1521 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1522 if (ret < 0) 1523 goto out; 1524 if (ret > 0) { 1525 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1526 if (ret < 0) 1527 goto out; 1528 } 1529 1530 while (1) { 1531 l = path->nodes[0]; 1532 slot = path->slots[0]; 1533 if (slot >= btrfs_header_nritems(l)) { 1534 ret = btrfs_next_leaf(root, path); 1535 if (ret == 0) 1536 continue; 1537 if (ret < 0) 1538 goto out; 1539 1540 break; 1541 } 1542 btrfs_item_key_to_cpu(l, &key, slot); 1543 1544 if (key.objectid < device->devid) 1545 goto next; 1546 1547 if (key.objectid > device->devid) 1548 break; 1549 1550 if (key.type != BTRFS_DEV_EXTENT_KEY) 1551 goto next; 1552 1553 if (key.offset > search_start) { 1554 hole_size = key.offset - search_start; 1555 dev_extent_hole_check(device, &search_start, &hole_size, 1556 num_bytes); 1557 1558 if (hole_size > max_hole_size) { 1559 max_hole_start = search_start; 1560 max_hole_size = hole_size; 1561 } 1562 1563 /* 1564 * If this free space is greater than which we need, 1565 * it must be the max free space that we have found 1566 * until now, so max_hole_start must point to the start 1567 * of this free space and the length of this free space 1568 * is stored in max_hole_size. Thus, we return 1569 * max_hole_start and max_hole_size and go back to the 1570 * caller. 1571 */ 1572 if (hole_size >= num_bytes) { 1573 ret = 0; 1574 goto out; 1575 } 1576 } 1577 1578 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1579 extent_end = key.offset + btrfs_dev_extent_length(l, 1580 dev_extent); 1581 if (extent_end > search_start) 1582 search_start = extent_end; 1583 next: 1584 path->slots[0]++; 1585 cond_resched(); 1586 } 1587 1588 /* 1589 * At this point, search_start should be the end of 1590 * allocated dev extents, and when shrinking the device, 1591 * search_end may be smaller than search_start. 1592 */ 1593 if (search_end > search_start) { 1594 hole_size = search_end - search_start; 1595 if (dev_extent_hole_check(device, &search_start, &hole_size, 1596 num_bytes)) { 1597 btrfs_release_path(path); 1598 goto again; 1599 } 1600 1601 if (hole_size > max_hole_size) { 1602 max_hole_start = search_start; 1603 max_hole_size = hole_size; 1604 } 1605 } 1606 1607 /* See above. */ 1608 if (max_hole_size < num_bytes) 1609 ret = -ENOSPC; 1610 else 1611 ret = 0; 1612 1613 out: 1614 btrfs_free_path(path); 1615 *start = max_hole_start; 1616 if (len) 1617 *len = max_hole_size; 1618 return ret; 1619 } 1620 1621 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1622 u64 *start, u64 *len) 1623 { 1624 /* FIXME use last free of some kind */ 1625 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1626 } 1627 1628 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1629 struct btrfs_device *device, 1630 u64 start, u64 *dev_extent_len) 1631 { 1632 struct btrfs_fs_info *fs_info = device->fs_info; 1633 struct btrfs_root *root = fs_info->dev_root; 1634 int ret; 1635 struct btrfs_path *path; 1636 struct btrfs_key key; 1637 struct btrfs_key found_key; 1638 struct extent_buffer *leaf = NULL; 1639 struct btrfs_dev_extent *extent = NULL; 1640 1641 path = btrfs_alloc_path(); 1642 if (!path) 1643 return -ENOMEM; 1644 1645 key.objectid = device->devid; 1646 key.offset = start; 1647 key.type = BTRFS_DEV_EXTENT_KEY; 1648 again: 1649 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1650 if (ret > 0) { 1651 ret = btrfs_previous_item(root, path, key.objectid, 1652 BTRFS_DEV_EXTENT_KEY); 1653 if (ret) 1654 goto out; 1655 leaf = path->nodes[0]; 1656 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1657 extent = btrfs_item_ptr(leaf, path->slots[0], 1658 struct btrfs_dev_extent); 1659 BUG_ON(found_key.offset > start || found_key.offset + 1660 btrfs_dev_extent_length(leaf, extent) < start); 1661 key = found_key; 1662 btrfs_release_path(path); 1663 goto again; 1664 } else if (ret == 0) { 1665 leaf = path->nodes[0]; 1666 extent = btrfs_item_ptr(leaf, path->slots[0], 1667 struct btrfs_dev_extent); 1668 } else { 1669 btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); 1670 goto out; 1671 } 1672 1673 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1674 1675 ret = btrfs_del_item(trans, root, path); 1676 if (ret) { 1677 btrfs_handle_fs_error(fs_info, ret, 1678 "Failed to remove dev extent item"); 1679 } else { 1680 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1681 } 1682 out: 1683 btrfs_free_path(path); 1684 return ret; 1685 } 1686 1687 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1688 struct btrfs_device *device, 1689 u64 chunk_offset, u64 start, u64 num_bytes) 1690 { 1691 int ret; 1692 struct btrfs_path *path; 1693 struct btrfs_fs_info *fs_info = device->fs_info; 1694 struct btrfs_root *root = fs_info->dev_root; 1695 struct btrfs_dev_extent *extent; 1696 struct extent_buffer *leaf; 1697 struct btrfs_key key; 1698 1699 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 1700 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1701 path = btrfs_alloc_path(); 1702 if (!path) 1703 return -ENOMEM; 1704 1705 key.objectid = device->devid; 1706 key.offset = start; 1707 key.type = BTRFS_DEV_EXTENT_KEY; 1708 ret = btrfs_insert_empty_item(trans, root, path, &key, 1709 sizeof(*extent)); 1710 if (ret) 1711 goto out; 1712 1713 leaf = path->nodes[0]; 1714 extent = btrfs_item_ptr(leaf, path->slots[0], 1715 struct btrfs_dev_extent); 1716 btrfs_set_dev_extent_chunk_tree(leaf, extent, 1717 BTRFS_CHUNK_TREE_OBJECTID); 1718 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 1719 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 1720 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1721 1722 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1723 btrfs_mark_buffer_dirty(leaf); 1724 out: 1725 btrfs_free_path(path); 1726 return ret; 1727 } 1728 1729 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1730 { 1731 struct extent_map_tree *em_tree; 1732 struct extent_map *em; 1733 struct rb_node *n; 1734 u64 ret = 0; 1735 1736 em_tree = &fs_info->mapping_tree; 1737 read_lock(&em_tree->lock); 1738 n = rb_last(&em_tree->map.rb_root); 1739 if (n) { 1740 em = rb_entry(n, struct extent_map, rb_node); 1741 ret = em->start + em->len; 1742 } 1743 read_unlock(&em_tree->lock); 1744 1745 return ret; 1746 } 1747 1748 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1749 u64 *devid_ret) 1750 { 1751 int ret; 1752 struct btrfs_key key; 1753 struct btrfs_key found_key; 1754 struct btrfs_path *path; 1755 1756 path = btrfs_alloc_path(); 1757 if (!path) 1758 return -ENOMEM; 1759 1760 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1761 key.type = BTRFS_DEV_ITEM_KEY; 1762 key.offset = (u64)-1; 1763 1764 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1765 if (ret < 0) 1766 goto error; 1767 1768 if (ret == 0) { 1769 /* Corruption */ 1770 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1771 ret = -EUCLEAN; 1772 goto error; 1773 } 1774 1775 ret = btrfs_previous_item(fs_info->chunk_root, path, 1776 BTRFS_DEV_ITEMS_OBJECTID, 1777 BTRFS_DEV_ITEM_KEY); 1778 if (ret) { 1779 *devid_ret = 1; 1780 } else { 1781 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1782 path->slots[0]); 1783 *devid_ret = found_key.offset + 1; 1784 } 1785 ret = 0; 1786 error: 1787 btrfs_free_path(path); 1788 return ret; 1789 } 1790 1791 /* 1792 * the device information is stored in the chunk root 1793 * the btrfs_device struct should be fully filled in 1794 */ 1795 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1796 struct btrfs_device *device) 1797 { 1798 int ret; 1799 struct btrfs_path *path; 1800 struct btrfs_dev_item *dev_item; 1801 struct extent_buffer *leaf; 1802 struct btrfs_key key; 1803 unsigned long ptr; 1804 1805 path = btrfs_alloc_path(); 1806 if (!path) 1807 return -ENOMEM; 1808 1809 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1810 key.type = BTRFS_DEV_ITEM_KEY; 1811 key.offset = device->devid; 1812 1813 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1814 &key, sizeof(*dev_item)); 1815 if (ret) 1816 goto out; 1817 1818 leaf = path->nodes[0]; 1819 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1820 1821 btrfs_set_device_id(leaf, dev_item, device->devid); 1822 btrfs_set_device_generation(leaf, dev_item, 0); 1823 btrfs_set_device_type(leaf, dev_item, device->type); 1824 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1825 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1826 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1827 btrfs_set_device_total_bytes(leaf, dev_item, 1828 btrfs_device_get_disk_total_bytes(device)); 1829 btrfs_set_device_bytes_used(leaf, dev_item, 1830 btrfs_device_get_bytes_used(device)); 1831 btrfs_set_device_group(leaf, dev_item, 0); 1832 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1833 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1834 btrfs_set_device_start_offset(leaf, dev_item, 0); 1835 1836 ptr = btrfs_device_uuid(dev_item); 1837 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1838 ptr = btrfs_device_fsid(dev_item); 1839 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1840 ptr, BTRFS_FSID_SIZE); 1841 btrfs_mark_buffer_dirty(leaf); 1842 1843 ret = 0; 1844 out: 1845 btrfs_free_path(path); 1846 return ret; 1847 } 1848 1849 /* 1850 * Function to update ctime/mtime for a given device path. 1851 * Mainly used for ctime/mtime based probe like libblkid. 1852 */ 1853 static void update_dev_time(const char *path_name) 1854 { 1855 struct file *filp; 1856 1857 filp = filp_open(path_name, O_RDWR, 0); 1858 if (IS_ERR(filp)) 1859 return; 1860 file_update_time(filp); 1861 filp_close(filp, NULL); 1862 } 1863 1864 static int btrfs_rm_dev_item(struct btrfs_device *device) 1865 { 1866 struct btrfs_root *root = device->fs_info->chunk_root; 1867 int ret; 1868 struct btrfs_path *path; 1869 struct btrfs_key key; 1870 struct btrfs_trans_handle *trans; 1871 1872 path = btrfs_alloc_path(); 1873 if (!path) 1874 return -ENOMEM; 1875 1876 trans = btrfs_start_transaction(root, 0); 1877 if (IS_ERR(trans)) { 1878 btrfs_free_path(path); 1879 return PTR_ERR(trans); 1880 } 1881 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1882 key.type = BTRFS_DEV_ITEM_KEY; 1883 key.offset = device->devid; 1884 1885 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1886 if (ret) { 1887 if (ret > 0) 1888 ret = -ENOENT; 1889 btrfs_abort_transaction(trans, ret); 1890 btrfs_end_transaction(trans); 1891 goto out; 1892 } 1893 1894 ret = btrfs_del_item(trans, root, path); 1895 if (ret) { 1896 btrfs_abort_transaction(trans, ret); 1897 btrfs_end_transaction(trans); 1898 } 1899 1900 out: 1901 btrfs_free_path(path); 1902 if (!ret) 1903 ret = btrfs_commit_transaction(trans); 1904 return ret; 1905 } 1906 1907 /* 1908 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1909 * filesystem. It's up to the caller to adjust that number regarding eg. device 1910 * replace. 1911 */ 1912 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1913 u64 num_devices) 1914 { 1915 u64 all_avail; 1916 unsigned seq; 1917 int i; 1918 1919 do { 1920 seq = read_seqbegin(&fs_info->profiles_lock); 1921 1922 all_avail = fs_info->avail_data_alloc_bits | 1923 fs_info->avail_system_alloc_bits | 1924 fs_info->avail_metadata_alloc_bits; 1925 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1926 1927 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1928 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1929 continue; 1930 1931 if (num_devices < btrfs_raid_array[i].devs_min) { 1932 int ret = btrfs_raid_array[i].mindev_error; 1933 1934 if (ret) 1935 return ret; 1936 } 1937 } 1938 1939 return 0; 1940 } 1941 1942 static struct btrfs_device * btrfs_find_next_active_device( 1943 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1944 { 1945 struct btrfs_device *next_device; 1946 1947 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1948 if (next_device != device && 1949 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1950 && next_device->bdev) 1951 return next_device; 1952 } 1953 1954 return NULL; 1955 } 1956 1957 /* 1958 * Helper function to check if the given device is part of s_bdev / latest_bdev 1959 * and replace it with the provided or the next active device, in the context 1960 * where this function called, there should be always be another device (or 1961 * this_dev) which is active. 1962 */ 1963 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1964 struct btrfs_device *this_dev) 1965 { 1966 struct btrfs_fs_info *fs_info = device->fs_info; 1967 struct btrfs_device *next_device; 1968 1969 if (this_dev) 1970 next_device = this_dev; 1971 else 1972 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1973 device); 1974 ASSERT(next_device); 1975 1976 if (fs_info->sb->s_bdev && 1977 (fs_info->sb->s_bdev == device->bdev)) 1978 fs_info->sb->s_bdev = next_device->bdev; 1979 1980 if (fs_info->fs_devices->latest_bdev == device->bdev) 1981 fs_info->fs_devices->latest_bdev = next_device->bdev; 1982 } 1983 1984 /* 1985 * Return btrfs_fs_devices::num_devices excluding the device that's being 1986 * currently replaced. 1987 */ 1988 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 1989 { 1990 u64 num_devices = fs_info->fs_devices->num_devices; 1991 1992 down_read(&fs_info->dev_replace.rwsem); 1993 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 1994 ASSERT(num_devices > 1); 1995 num_devices--; 1996 } 1997 up_read(&fs_info->dev_replace.rwsem); 1998 1999 return num_devices; 2000 } 2001 2002 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2003 struct block_device *bdev, 2004 const char *device_path) 2005 { 2006 struct btrfs_super_block *disk_super; 2007 int copy_num; 2008 2009 if (!bdev) 2010 return; 2011 2012 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2013 struct page *page; 2014 int ret; 2015 2016 disk_super = btrfs_read_dev_one_super(bdev, copy_num); 2017 if (IS_ERR(disk_super)) 2018 continue; 2019 2020 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2021 2022 page = virt_to_page(disk_super); 2023 set_page_dirty(page); 2024 lock_page(page); 2025 /* write_on_page() unlocks the page */ 2026 ret = write_one_page(page); 2027 if (ret) 2028 btrfs_warn(fs_info, 2029 "error clearing superblock number %d (%d)", 2030 copy_num, ret); 2031 btrfs_release_disk_super(disk_super); 2032 2033 } 2034 2035 /* Notify udev that device has changed */ 2036 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2037 2038 /* Update ctime/mtime for device path for libblkid */ 2039 update_dev_time(device_path); 2040 } 2041 2042 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, 2043 u64 devid) 2044 { 2045 struct btrfs_device *device; 2046 struct btrfs_fs_devices *cur_devices; 2047 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2048 u64 num_devices; 2049 int ret = 0; 2050 2051 mutex_lock(&uuid_mutex); 2052 2053 num_devices = btrfs_num_devices(fs_info); 2054 2055 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2056 if (ret) 2057 goto out; 2058 2059 device = btrfs_find_device_by_devspec(fs_info, devid, device_path); 2060 2061 if (IS_ERR(device)) { 2062 if (PTR_ERR(device) == -ENOENT && 2063 strcmp(device_path, "missing") == 0) 2064 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2065 else 2066 ret = PTR_ERR(device); 2067 goto out; 2068 } 2069 2070 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2071 btrfs_warn_in_rcu(fs_info, 2072 "cannot remove device %s (devid %llu) due to active swapfile", 2073 rcu_str_deref(device->name), device->devid); 2074 ret = -ETXTBSY; 2075 goto out; 2076 } 2077 2078 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2079 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2080 goto out; 2081 } 2082 2083 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2084 fs_info->fs_devices->rw_devices == 1) { 2085 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2086 goto out; 2087 } 2088 2089 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2090 mutex_lock(&fs_info->chunk_mutex); 2091 list_del_init(&device->dev_alloc_list); 2092 device->fs_devices->rw_devices--; 2093 mutex_unlock(&fs_info->chunk_mutex); 2094 } 2095 2096 mutex_unlock(&uuid_mutex); 2097 ret = btrfs_shrink_device(device, 0); 2098 mutex_lock(&uuid_mutex); 2099 if (ret) 2100 goto error_undo; 2101 2102 /* 2103 * TODO: the superblock still includes this device in its num_devices 2104 * counter although write_all_supers() is not locked out. This 2105 * could give a filesystem state which requires a degraded mount. 2106 */ 2107 ret = btrfs_rm_dev_item(device); 2108 if (ret) 2109 goto error_undo; 2110 2111 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2112 btrfs_scrub_cancel_dev(device); 2113 2114 /* 2115 * the device list mutex makes sure that we don't change 2116 * the device list while someone else is writing out all 2117 * the device supers. Whoever is writing all supers, should 2118 * lock the device list mutex before getting the number of 2119 * devices in the super block (super_copy). Conversely, 2120 * whoever updates the number of devices in the super block 2121 * (super_copy) should hold the device list mutex. 2122 */ 2123 2124 /* 2125 * In normal cases the cur_devices == fs_devices. But in case 2126 * of deleting a seed device, the cur_devices should point to 2127 * its own fs_devices listed under the fs_devices->seed. 2128 */ 2129 cur_devices = device->fs_devices; 2130 mutex_lock(&fs_devices->device_list_mutex); 2131 list_del_rcu(&device->dev_list); 2132 2133 cur_devices->num_devices--; 2134 cur_devices->total_devices--; 2135 /* Update total_devices of the parent fs_devices if it's seed */ 2136 if (cur_devices != fs_devices) 2137 fs_devices->total_devices--; 2138 2139 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2140 cur_devices->missing_devices--; 2141 2142 btrfs_assign_next_active_device(device, NULL); 2143 2144 if (device->bdev) { 2145 cur_devices->open_devices--; 2146 /* remove sysfs entry */ 2147 btrfs_sysfs_remove_devices_dir(fs_devices, device); 2148 } 2149 2150 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2151 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2152 mutex_unlock(&fs_devices->device_list_mutex); 2153 2154 /* 2155 * at this point, the device is zero sized and detached from 2156 * the devices list. All that's left is to zero out the old 2157 * supers and free the device. 2158 */ 2159 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2160 btrfs_scratch_superblocks(fs_info, device->bdev, 2161 device->name->str); 2162 2163 btrfs_close_bdev(device); 2164 synchronize_rcu(); 2165 btrfs_free_device(device); 2166 2167 if (cur_devices->open_devices == 0) { 2168 while (fs_devices) { 2169 if (fs_devices->seed == cur_devices) { 2170 fs_devices->seed = cur_devices->seed; 2171 break; 2172 } 2173 fs_devices = fs_devices->seed; 2174 } 2175 cur_devices->seed = NULL; 2176 close_fs_devices(cur_devices); 2177 free_fs_devices(cur_devices); 2178 } 2179 2180 out: 2181 mutex_unlock(&uuid_mutex); 2182 return ret; 2183 2184 error_undo: 2185 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2186 mutex_lock(&fs_info->chunk_mutex); 2187 list_add(&device->dev_alloc_list, 2188 &fs_devices->alloc_list); 2189 device->fs_devices->rw_devices++; 2190 mutex_unlock(&fs_info->chunk_mutex); 2191 } 2192 goto out; 2193 } 2194 2195 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2196 { 2197 struct btrfs_fs_devices *fs_devices; 2198 2199 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2200 2201 /* 2202 * in case of fs with no seed, srcdev->fs_devices will point 2203 * to fs_devices of fs_info. However when the dev being replaced is 2204 * a seed dev it will point to the seed's local fs_devices. In short 2205 * srcdev will have its correct fs_devices in both the cases. 2206 */ 2207 fs_devices = srcdev->fs_devices; 2208 2209 list_del_rcu(&srcdev->dev_list); 2210 list_del(&srcdev->dev_alloc_list); 2211 fs_devices->num_devices--; 2212 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2213 fs_devices->missing_devices--; 2214 2215 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2216 fs_devices->rw_devices--; 2217 2218 if (srcdev->bdev) 2219 fs_devices->open_devices--; 2220 } 2221 2222 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2223 { 2224 struct btrfs_fs_info *fs_info = srcdev->fs_info; 2225 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2226 2227 mutex_lock(&uuid_mutex); 2228 2229 btrfs_close_bdev(srcdev); 2230 synchronize_rcu(); 2231 btrfs_free_device(srcdev); 2232 2233 /* if this is no devs we rather delete the fs_devices */ 2234 if (!fs_devices->num_devices) { 2235 struct btrfs_fs_devices *tmp_fs_devices; 2236 2237 /* 2238 * On a mounted FS, num_devices can't be zero unless it's a 2239 * seed. In case of a seed device being replaced, the replace 2240 * target added to the sprout FS, so there will be no more 2241 * device left under the seed FS. 2242 */ 2243 ASSERT(fs_devices->seeding); 2244 2245 tmp_fs_devices = fs_info->fs_devices; 2246 while (tmp_fs_devices) { 2247 if (tmp_fs_devices->seed == fs_devices) { 2248 tmp_fs_devices->seed = fs_devices->seed; 2249 break; 2250 } 2251 tmp_fs_devices = tmp_fs_devices->seed; 2252 } 2253 fs_devices->seed = NULL; 2254 close_fs_devices(fs_devices); 2255 free_fs_devices(fs_devices); 2256 } 2257 mutex_unlock(&uuid_mutex); 2258 } 2259 2260 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2261 { 2262 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2263 2264 mutex_lock(&fs_devices->device_list_mutex); 2265 2266 btrfs_sysfs_remove_devices_dir(fs_devices, tgtdev); 2267 2268 if (tgtdev->bdev) 2269 fs_devices->open_devices--; 2270 2271 fs_devices->num_devices--; 2272 2273 btrfs_assign_next_active_device(tgtdev, NULL); 2274 2275 list_del_rcu(&tgtdev->dev_list); 2276 2277 mutex_unlock(&fs_devices->device_list_mutex); 2278 2279 /* 2280 * The update_dev_time() with in btrfs_scratch_superblocks() 2281 * may lead to a call to btrfs_show_devname() which will try 2282 * to hold device_list_mutex. And here this device 2283 * is already out of device list, so we don't have to hold 2284 * the device_list_mutex lock. 2285 */ 2286 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2287 tgtdev->name->str); 2288 2289 btrfs_close_bdev(tgtdev); 2290 synchronize_rcu(); 2291 btrfs_free_device(tgtdev); 2292 } 2293 2294 static struct btrfs_device *btrfs_find_device_by_path( 2295 struct btrfs_fs_info *fs_info, const char *device_path) 2296 { 2297 int ret = 0; 2298 struct btrfs_super_block *disk_super; 2299 u64 devid; 2300 u8 *dev_uuid; 2301 struct block_device *bdev; 2302 struct btrfs_device *device; 2303 2304 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2305 fs_info->bdev_holder, 0, &bdev, &disk_super); 2306 if (ret) 2307 return ERR_PTR(ret); 2308 2309 devid = btrfs_stack_device_id(&disk_super->dev_item); 2310 dev_uuid = disk_super->dev_item.uuid; 2311 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2312 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2313 disk_super->metadata_uuid, true); 2314 else 2315 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2316 disk_super->fsid, true); 2317 2318 btrfs_release_disk_super(disk_super); 2319 if (!device) 2320 device = ERR_PTR(-ENOENT); 2321 blkdev_put(bdev, FMODE_READ); 2322 return device; 2323 } 2324 2325 /* 2326 * Lookup a device given by device id, or the path if the id is 0. 2327 */ 2328 struct btrfs_device *btrfs_find_device_by_devspec( 2329 struct btrfs_fs_info *fs_info, u64 devid, 2330 const char *device_path) 2331 { 2332 struct btrfs_device *device; 2333 2334 if (devid) { 2335 device = btrfs_find_device(fs_info->fs_devices, devid, NULL, 2336 NULL, true); 2337 if (!device) 2338 return ERR_PTR(-ENOENT); 2339 return device; 2340 } 2341 2342 if (!device_path || !device_path[0]) 2343 return ERR_PTR(-EINVAL); 2344 2345 if (strcmp(device_path, "missing") == 0) { 2346 /* Find first missing device */ 2347 list_for_each_entry(device, &fs_info->fs_devices->devices, 2348 dev_list) { 2349 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 2350 &device->dev_state) && !device->bdev) 2351 return device; 2352 } 2353 return ERR_PTR(-ENOENT); 2354 } 2355 2356 return btrfs_find_device_by_path(fs_info, device_path); 2357 } 2358 2359 /* 2360 * does all the dirty work required for changing file system's UUID. 2361 */ 2362 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2363 { 2364 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2365 struct btrfs_fs_devices *old_devices; 2366 struct btrfs_fs_devices *seed_devices; 2367 struct btrfs_super_block *disk_super = fs_info->super_copy; 2368 struct btrfs_device *device; 2369 u64 super_flags; 2370 2371 lockdep_assert_held(&uuid_mutex); 2372 if (!fs_devices->seeding) 2373 return -EINVAL; 2374 2375 seed_devices = alloc_fs_devices(NULL, NULL); 2376 if (IS_ERR(seed_devices)) 2377 return PTR_ERR(seed_devices); 2378 2379 old_devices = clone_fs_devices(fs_devices); 2380 if (IS_ERR(old_devices)) { 2381 kfree(seed_devices); 2382 return PTR_ERR(old_devices); 2383 } 2384 2385 list_add(&old_devices->fs_list, &fs_uuids); 2386 2387 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2388 seed_devices->opened = 1; 2389 INIT_LIST_HEAD(&seed_devices->devices); 2390 INIT_LIST_HEAD(&seed_devices->alloc_list); 2391 mutex_init(&seed_devices->device_list_mutex); 2392 2393 mutex_lock(&fs_devices->device_list_mutex); 2394 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2395 synchronize_rcu); 2396 list_for_each_entry(device, &seed_devices->devices, dev_list) 2397 device->fs_devices = seed_devices; 2398 2399 mutex_lock(&fs_info->chunk_mutex); 2400 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2401 mutex_unlock(&fs_info->chunk_mutex); 2402 2403 fs_devices->seeding = false; 2404 fs_devices->num_devices = 0; 2405 fs_devices->open_devices = 0; 2406 fs_devices->missing_devices = 0; 2407 fs_devices->rotating = false; 2408 fs_devices->seed = seed_devices; 2409 2410 generate_random_uuid(fs_devices->fsid); 2411 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2412 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2413 mutex_unlock(&fs_devices->device_list_mutex); 2414 2415 super_flags = btrfs_super_flags(disk_super) & 2416 ~BTRFS_SUPER_FLAG_SEEDING; 2417 btrfs_set_super_flags(disk_super, super_flags); 2418 2419 return 0; 2420 } 2421 2422 /* 2423 * Store the expected generation for seed devices in device items. 2424 */ 2425 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2426 { 2427 struct btrfs_fs_info *fs_info = trans->fs_info; 2428 struct btrfs_root *root = fs_info->chunk_root; 2429 struct btrfs_path *path; 2430 struct extent_buffer *leaf; 2431 struct btrfs_dev_item *dev_item; 2432 struct btrfs_device *device; 2433 struct btrfs_key key; 2434 u8 fs_uuid[BTRFS_FSID_SIZE]; 2435 u8 dev_uuid[BTRFS_UUID_SIZE]; 2436 u64 devid; 2437 int ret; 2438 2439 path = btrfs_alloc_path(); 2440 if (!path) 2441 return -ENOMEM; 2442 2443 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2444 key.offset = 0; 2445 key.type = BTRFS_DEV_ITEM_KEY; 2446 2447 while (1) { 2448 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2449 if (ret < 0) 2450 goto error; 2451 2452 leaf = path->nodes[0]; 2453 next_slot: 2454 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2455 ret = btrfs_next_leaf(root, path); 2456 if (ret > 0) 2457 break; 2458 if (ret < 0) 2459 goto error; 2460 leaf = path->nodes[0]; 2461 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2462 btrfs_release_path(path); 2463 continue; 2464 } 2465 2466 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2467 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2468 key.type != BTRFS_DEV_ITEM_KEY) 2469 break; 2470 2471 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2472 struct btrfs_dev_item); 2473 devid = btrfs_device_id(leaf, dev_item); 2474 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2475 BTRFS_UUID_SIZE); 2476 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2477 BTRFS_FSID_SIZE); 2478 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2479 fs_uuid, true); 2480 BUG_ON(!device); /* Logic error */ 2481 2482 if (device->fs_devices->seeding) { 2483 btrfs_set_device_generation(leaf, dev_item, 2484 device->generation); 2485 btrfs_mark_buffer_dirty(leaf); 2486 } 2487 2488 path->slots[0]++; 2489 goto next_slot; 2490 } 2491 ret = 0; 2492 error: 2493 btrfs_free_path(path); 2494 return ret; 2495 } 2496 2497 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2498 { 2499 struct btrfs_root *root = fs_info->dev_root; 2500 struct request_queue *q; 2501 struct btrfs_trans_handle *trans; 2502 struct btrfs_device *device; 2503 struct block_device *bdev; 2504 struct super_block *sb = fs_info->sb; 2505 struct rcu_string *name; 2506 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2507 u64 orig_super_total_bytes; 2508 u64 orig_super_num_devices; 2509 int seeding_dev = 0; 2510 int ret = 0; 2511 bool unlocked = false; 2512 2513 if (sb_rdonly(sb) && !fs_devices->seeding) 2514 return -EROFS; 2515 2516 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2517 fs_info->bdev_holder); 2518 if (IS_ERR(bdev)) 2519 return PTR_ERR(bdev); 2520 2521 if (fs_devices->seeding) { 2522 seeding_dev = 1; 2523 down_write(&sb->s_umount); 2524 mutex_lock(&uuid_mutex); 2525 } 2526 2527 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2528 2529 mutex_lock(&fs_devices->device_list_mutex); 2530 list_for_each_entry(device, &fs_devices->devices, dev_list) { 2531 if (device->bdev == bdev) { 2532 ret = -EEXIST; 2533 mutex_unlock( 2534 &fs_devices->device_list_mutex); 2535 goto error; 2536 } 2537 } 2538 mutex_unlock(&fs_devices->device_list_mutex); 2539 2540 device = btrfs_alloc_device(fs_info, NULL, NULL); 2541 if (IS_ERR(device)) { 2542 /* we can safely leave the fs_devices entry around */ 2543 ret = PTR_ERR(device); 2544 goto error; 2545 } 2546 2547 name = rcu_string_strdup(device_path, GFP_KERNEL); 2548 if (!name) { 2549 ret = -ENOMEM; 2550 goto error_free_device; 2551 } 2552 rcu_assign_pointer(device->name, name); 2553 2554 trans = btrfs_start_transaction(root, 0); 2555 if (IS_ERR(trans)) { 2556 ret = PTR_ERR(trans); 2557 goto error_free_device; 2558 } 2559 2560 q = bdev_get_queue(bdev); 2561 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2562 device->generation = trans->transid; 2563 device->io_width = fs_info->sectorsize; 2564 device->io_align = fs_info->sectorsize; 2565 device->sector_size = fs_info->sectorsize; 2566 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2567 fs_info->sectorsize); 2568 device->disk_total_bytes = device->total_bytes; 2569 device->commit_total_bytes = device->total_bytes; 2570 device->fs_info = fs_info; 2571 device->bdev = bdev; 2572 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2573 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2574 device->mode = FMODE_EXCL; 2575 device->dev_stats_valid = 1; 2576 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2577 2578 if (seeding_dev) { 2579 sb->s_flags &= ~SB_RDONLY; 2580 ret = btrfs_prepare_sprout(fs_info); 2581 if (ret) { 2582 btrfs_abort_transaction(trans, ret); 2583 goto error_trans; 2584 } 2585 } 2586 2587 device->fs_devices = fs_devices; 2588 2589 mutex_lock(&fs_devices->device_list_mutex); 2590 mutex_lock(&fs_info->chunk_mutex); 2591 list_add_rcu(&device->dev_list, &fs_devices->devices); 2592 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2593 fs_devices->num_devices++; 2594 fs_devices->open_devices++; 2595 fs_devices->rw_devices++; 2596 fs_devices->total_devices++; 2597 fs_devices->total_rw_bytes += device->total_bytes; 2598 2599 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2600 2601 if (!blk_queue_nonrot(q)) 2602 fs_devices->rotating = true; 2603 2604 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2605 btrfs_set_super_total_bytes(fs_info->super_copy, 2606 round_down(orig_super_total_bytes + device->total_bytes, 2607 fs_info->sectorsize)); 2608 2609 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2610 btrfs_set_super_num_devices(fs_info->super_copy, 2611 orig_super_num_devices + 1); 2612 2613 /* add sysfs device entry */ 2614 btrfs_sysfs_add_devices_dir(fs_devices, device); 2615 2616 /* 2617 * we've got more storage, clear any full flags on the space 2618 * infos 2619 */ 2620 btrfs_clear_space_info_full(fs_info); 2621 2622 mutex_unlock(&fs_info->chunk_mutex); 2623 mutex_unlock(&fs_devices->device_list_mutex); 2624 2625 if (seeding_dev) { 2626 mutex_lock(&fs_info->chunk_mutex); 2627 ret = init_first_rw_device(trans); 2628 mutex_unlock(&fs_info->chunk_mutex); 2629 if (ret) { 2630 btrfs_abort_transaction(trans, ret); 2631 goto error_sysfs; 2632 } 2633 } 2634 2635 ret = btrfs_add_dev_item(trans, device); 2636 if (ret) { 2637 btrfs_abort_transaction(trans, ret); 2638 goto error_sysfs; 2639 } 2640 2641 if (seeding_dev) { 2642 ret = btrfs_finish_sprout(trans); 2643 if (ret) { 2644 btrfs_abort_transaction(trans, ret); 2645 goto error_sysfs; 2646 } 2647 2648 btrfs_sysfs_update_sprout_fsid(fs_devices, 2649 fs_info->fs_devices->fsid); 2650 } 2651 2652 ret = btrfs_commit_transaction(trans); 2653 2654 if (seeding_dev) { 2655 mutex_unlock(&uuid_mutex); 2656 up_write(&sb->s_umount); 2657 unlocked = true; 2658 2659 if (ret) /* transaction commit */ 2660 return ret; 2661 2662 ret = btrfs_relocate_sys_chunks(fs_info); 2663 if (ret < 0) 2664 btrfs_handle_fs_error(fs_info, ret, 2665 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2666 trans = btrfs_attach_transaction(root); 2667 if (IS_ERR(trans)) { 2668 if (PTR_ERR(trans) == -ENOENT) 2669 return 0; 2670 ret = PTR_ERR(trans); 2671 trans = NULL; 2672 goto error_sysfs; 2673 } 2674 ret = btrfs_commit_transaction(trans); 2675 } 2676 2677 /* 2678 * Now that we have written a new super block to this device, check all 2679 * other fs_devices list if device_path alienates any other scanned 2680 * device. 2681 * We can ignore the return value as it typically returns -EINVAL and 2682 * only succeeds if the device was an alien. 2683 */ 2684 btrfs_forget_devices(device_path); 2685 2686 /* Update ctime/mtime for blkid or udev */ 2687 update_dev_time(device_path); 2688 2689 return ret; 2690 2691 error_sysfs: 2692 btrfs_sysfs_remove_devices_dir(fs_devices, device); 2693 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2694 mutex_lock(&fs_info->chunk_mutex); 2695 list_del_rcu(&device->dev_list); 2696 list_del(&device->dev_alloc_list); 2697 fs_info->fs_devices->num_devices--; 2698 fs_info->fs_devices->open_devices--; 2699 fs_info->fs_devices->rw_devices--; 2700 fs_info->fs_devices->total_devices--; 2701 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2702 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2703 btrfs_set_super_total_bytes(fs_info->super_copy, 2704 orig_super_total_bytes); 2705 btrfs_set_super_num_devices(fs_info->super_copy, 2706 orig_super_num_devices); 2707 mutex_unlock(&fs_info->chunk_mutex); 2708 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2709 error_trans: 2710 if (seeding_dev) 2711 sb->s_flags |= SB_RDONLY; 2712 if (trans) 2713 btrfs_end_transaction(trans); 2714 error_free_device: 2715 btrfs_free_device(device); 2716 error: 2717 blkdev_put(bdev, FMODE_EXCL); 2718 if (seeding_dev && !unlocked) { 2719 mutex_unlock(&uuid_mutex); 2720 up_write(&sb->s_umount); 2721 } 2722 return ret; 2723 } 2724 2725 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2726 struct btrfs_device *device) 2727 { 2728 int ret; 2729 struct btrfs_path *path; 2730 struct btrfs_root *root = device->fs_info->chunk_root; 2731 struct btrfs_dev_item *dev_item; 2732 struct extent_buffer *leaf; 2733 struct btrfs_key key; 2734 2735 path = btrfs_alloc_path(); 2736 if (!path) 2737 return -ENOMEM; 2738 2739 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2740 key.type = BTRFS_DEV_ITEM_KEY; 2741 key.offset = device->devid; 2742 2743 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2744 if (ret < 0) 2745 goto out; 2746 2747 if (ret > 0) { 2748 ret = -ENOENT; 2749 goto out; 2750 } 2751 2752 leaf = path->nodes[0]; 2753 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2754 2755 btrfs_set_device_id(leaf, dev_item, device->devid); 2756 btrfs_set_device_type(leaf, dev_item, device->type); 2757 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2758 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2759 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2760 btrfs_set_device_total_bytes(leaf, dev_item, 2761 btrfs_device_get_disk_total_bytes(device)); 2762 btrfs_set_device_bytes_used(leaf, dev_item, 2763 btrfs_device_get_bytes_used(device)); 2764 btrfs_mark_buffer_dirty(leaf); 2765 2766 out: 2767 btrfs_free_path(path); 2768 return ret; 2769 } 2770 2771 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2772 struct btrfs_device *device, u64 new_size) 2773 { 2774 struct btrfs_fs_info *fs_info = device->fs_info; 2775 struct btrfs_super_block *super_copy = fs_info->super_copy; 2776 u64 old_total; 2777 u64 diff; 2778 2779 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2780 return -EACCES; 2781 2782 new_size = round_down(new_size, fs_info->sectorsize); 2783 2784 mutex_lock(&fs_info->chunk_mutex); 2785 old_total = btrfs_super_total_bytes(super_copy); 2786 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2787 2788 if (new_size <= device->total_bytes || 2789 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2790 mutex_unlock(&fs_info->chunk_mutex); 2791 return -EINVAL; 2792 } 2793 2794 btrfs_set_super_total_bytes(super_copy, 2795 round_down(old_total + diff, fs_info->sectorsize)); 2796 device->fs_devices->total_rw_bytes += diff; 2797 2798 btrfs_device_set_total_bytes(device, new_size); 2799 btrfs_device_set_disk_total_bytes(device, new_size); 2800 btrfs_clear_space_info_full(device->fs_info); 2801 if (list_empty(&device->post_commit_list)) 2802 list_add_tail(&device->post_commit_list, 2803 &trans->transaction->dev_update_list); 2804 mutex_unlock(&fs_info->chunk_mutex); 2805 2806 return btrfs_update_device(trans, device); 2807 } 2808 2809 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2810 { 2811 struct btrfs_fs_info *fs_info = trans->fs_info; 2812 struct btrfs_root *root = fs_info->chunk_root; 2813 int ret; 2814 struct btrfs_path *path; 2815 struct btrfs_key key; 2816 2817 path = btrfs_alloc_path(); 2818 if (!path) 2819 return -ENOMEM; 2820 2821 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2822 key.offset = chunk_offset; 2823 key.type = BTRFS_CHUNK_ITEM_KEY; 2824 2825 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2826 if (ret < 0) 2827 goto out; 2828 else if (ret > 0) { /* Logic error or corruption */ 2829 btrfs_handle_fs_error(fs_info, -ENOENT, 2830 "Failed lookup while freeing chunk."); 2831 ret = -ENOENT; 2832 goto out; 2833 } 2834 2835 ret = btrfs_del_item(trans, root, path); 2836 if (ret < 0) 2837 btrfs_handle_fs_error(fs_info, ret, 2838 "Failed to delete chunk item."); 2839 out: 2840 btrfs_free_path(path); 2841 return ret; 2842 } 2843 2844 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2845 { 2846 struct btrfs_super_block *super_copy = fs_info->super_copy; 2847 struct btrfs_disk_key *disk_key; 2848 struct btrfs_chunk *chunk; 2849 u8 *ptr; 2850 int ret = 0; 2851 u32 num_stripes; 2852 u32 array_size; 2853 u32 len = 0; 2854 u32 cur; 2855 struct btrfs_key key; 2856 2857 mutex_lock(&fs_info->chunk_mutex); 2858 array_size = btrfs_super_sys_array_size(super_copy); 2859 2860 ptr = super_copy->sys_chunk_array; 2861 cur = 0; 2862 2863 while (cur < array_size) { 2864 disk_key = (struct btrfs_disk_key *)ptr; 2865 btrfs_disk_key_to_cpu(&key, disk_key); 2866 2867 len = sizeof(*disk_key); 2868 2869 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2870 chunk = (struct btrfs_chunk *)(ptr + len); 2871 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2872 len += btrfs_chunk_item_size(num_stripes); 2873 } else { 2874 ret = -EIO; 2875 break; 2876 } 2877 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2878 key.offset == chunk_offset) { 2879 memmove(ptr, ptr + len, array_size - (cur + len)); 2880 array_size -= len; 2881 btrfs_set_super_sys_array_size(super_copy, array_size); 2882 } else { 2883 ptr += len; 2884 cur += len; 2885 } 2886 } 2887 mutex_unlock(&fs_info->chunk_mutex); 2888 return ret; 2889 } 2890 2891 /* 2892 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2893 * @logical: Logical block offset in bytes. 2894 * @length: Length of extent in bytes. 2895 * 2896 * Return: Chunk mapping or ERR_PTR. 2897 */ 2898 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 2899 u64 logical, u64 length) 2900 { 2901 struct extent_map_tree *em_tree; 2902 struct extent_map *em; 2903 2904 em_tree = &fs_info->mapping_tree; 2905 read_lock(&em_tree->lock); 2906 em = lookup_extent_mapping(em_tree, logical, length); 2907 read_unlock(&em_tree->lock); 2908 2909 if (!em) { 2910 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 2911 logical, length); 2912 return ERR_PTR(-EINVAL); 2913 } 2914 2915 if (em->start > logical || em->start + em->len < logical) { 2916 btrfs_crit(fs_info, 2917 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 2918 logical, length, em->start, em->start + em->len); 2919 free_extent_map(em); 2920 return ERR_PTR(-EINVAL); 2921 } 2922 2923 /* callers are responsible for dropping em's ref. */ 2924 return em; 2925 } 2926 2927 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2928 { 2929 struct btrfs_fs_info *fs_info = trans->fs_info; 2930 struct extent_map *em; 2931 struct map_lookup *map; 2932 u64 dev_extent_len = 0; 2933 int i, ret = 0; 2934 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2935 2936 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 2937 if (IS_ERR(em)) { 2938 /* 2939 * This is a logic error, but we don't want to just rely on the 2940 * user having built with ASSERT enabled, so if ASSERT doesn't 2941 * do anything we still error out. 2942 */ 2943 ASSERT(0); 2944 return PTR_ERR(em); 2945 } 2946 map = em->map_lookup; 2947 mutex_lock(&fs_info->chunk_mutex); 2948 check_system_chunk(trans, map->type); 2949 mutex_unlock(&fs_info->chunk_mutex); 2950 2951 /* 2952 * Take the device list mutex to prevent races with the final phase of 2953 * a device replace operation that replaces the device object associated 2954 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). 2955 */ 2956 mutex_lock(&fs_devices->device_list_mutex); 2957 for (i = 0; i < map->num_stripes; i++) { 2958 struct btrfs_device *device = map->stripes[i].dev; 2959 ret = btrfs_free_dev_extent(trans, device, 2960 map->stripes[i].physical, 2961 &dev_extent_len); 2962 if (ret) { 2963 mutex_unlock(&fs_devices->device_list_mutex); 2964 btrfs_abort_transaction(trans, ret); 2965 goto out; 2966 } 2967 2968 if (device->bytes_used > 0) { 2969 mutex_lock(&fs_info->chunk_mutex); 2970 btrfs_device_set_bytes_used(device, 2971 device->bytes_used - dev_extent_len); 2972 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 2973 btrfs_clear_space_info_full(fs_info); 2974 mutex_unlock(&fs_info->chunk_mutex); 2975 } 2976 2977 ret = btrfs_update_device(trans, device); 2978 if (ret) { 2979 mutex_unlock(&fs_devices->device_list_mutex); 2980 btrfs_abort_transaction(trans, ret); 2981 goto out; 2982 } 2983 } 2984 mutex_unlock(&fs_devices->device_list_mutex); 2985 2986 ret = btrfs_free_chunk(trans, chunk_offset); 2987 if (ret) { 2988 btrfs_abort_transaction(trans, ret); 2989 goto out; 2990 } 2991 2992 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 2993 2994 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2995 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 2996 if (ret) { 2997 btrfs_abort_transaction(trans, ret); 2998 goto out; 2999 } 3000 } 3001 3002 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3003 if (ret) { 3004 btrfs_abort_transaction(trans, ret); 3005 goto out; 3006 } 3007 3008 out: 3009 /* once for us */ 3010 free_extent_map(em); 3011 return ret; 3012 } 3013 3014 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3015 { 3016 struct btrfs_root *root = fs_info->chunk_root; 3017 struct btrfs_trans_handle *trans; 3018 struct btrfs_block_group *block_group; 3019 int ret; 3020 3021 /* 3022 * Prevent races with automatic removal of unused block groups. 3023 * After we relocate and before we remove the chunk with offset 3024 * chunk_offset, automatic removal of the block group can kick in, 3025 * resulting in a failure when calling btrfs_remove_chunk() below. 3026 * 3027 * Make sure to acquire this mutex before doing a tree search (dev 3028 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3029 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3030 * we release the path used to search the chunk/dev tree and before 3031 * the current task acquires this mutex and calls us. 3032 */ 3033 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); 3034 3035 /* step one, relocate all the extents inside this chunk */ 3036 btrfs_scrub_pause(fs_info); 3037 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3038 btrfs_scrub_continue(fs_info); 3039 if (ret) 3040 return ret; 3041 3042 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3043 if (!block_group) 3044 return -ENOENT; 3045 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3046 btrfs_put_block_group(block_group); 3047 3048 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3049 chunk_offset); 3050 if (IS_ERR(trans)) { 3051 ret = PTR_ERR(trans); 3052 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3053 return ret; 3054 } 3055 3056 /* 3057 * step two, delete the device extents and the 3058 * chunk tree entries 3059 */ 3060 ret = btrfs_remove_chunk(trans, chunk_offset); 3061 btrfs_end_transaction(trans); 3062 return ret; 3063 } 3064 3065 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3066 { 3067 struct btrfs_root *chunk_root = fs_info->chunk_root; 3068 struct btrfs_path *path; 3069 struct extent_buffer *leaf; 3070 struct btrfs_chunk *chunk; 3071 struct btrfs_key key; 3072 struct btrfs_key found_key; 3073 u64 chunk_type; 3074 bool retried = false; 3075 int failed = 0; 3076 int ret; 3077 3078 path = btrfs_alloc_path(); 3079 if (!path) 3080 return -ENOMEM; 3081 3082 again: 3083 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3084 key.offset = (u64)-1; 3085 key.type = BTRFS_CHUNK_ITEM_KEY; 3086 3087 while (1) { 3088 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3089 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3090 if (ret < 0) { 3091 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3092 goto error; 3093 } 3094 BUG_ON(ret == 0); /* Corruption */ 3095 3096 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3097 key.type); 3098 if (ret) 3099 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3100 if (ret < 0) 3101 goto error; 3102 if (ret > 0) 3103 break; 3104 3105 leaf = path->nodes[0]; 3106 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3107 3108 chunk = btrfs_item_ptr(leaf, path->slots[0], 3109 struct btrfs_chunk); 3110 chunk_type = btrfs_chunk_type(leaf, chunk); 3111 btrfs_release_path(path); 3112 3113 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3114 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3115 if (ret == -ENOSPC) 3116 failed++; 3117 else 3118 BUG_ON(ret); 3119 } 3120 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3121 3122 if (found_key.offset == 0) 3123 break; 3124 key.offset = found_key.offset - 1; 3125 } 3126 ret = 0; 3127 if (failed && !retried) { 3128 failed = 0; 3129 retried = true; 3130 goto again; 3131 } else if (WARN_ON(failed && retried)) { 3132 ret = -ENOSPC; 3133 } 3134 error: 3135 btrfs_free_path(path); 3136 return ret; 3137 } 3138 3139 /* 3140 * return 1 : allocate a data chunk successfully, 3141 * return <0: errors during allocating a data chunk, 3142 * return 0 : no need to allocate a data chunk. 3143 */ 3144 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3145 u64 chunk_offset) 3146 { 3147 struct btrfs_block_group *cache; 3148 u64 bytes_used; 3149 u64 chunk_type; 3150 3151 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3152 ASSERT(cache); 3153 chunk_type = cache->flags; 3154 btrfs_put_block_group(cache); 3155 3156 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3157 return 0; 3158 3159 spin_lock(&fs_info->data_sinfo->lock); 3160 bytes_used = fs_info->data_sinfo->bytes_used; 3161 spin_unlock(&fs_info->data_sinfo->lock); 3162 3163 if (!bytes_used) { 3164 struct btrfs_trans_handle *trans; 3165 int ret; 3166 3167 trans = btrfs_join_transaction(fs_info->tree_root); 3168 if (IS_ERR(trans)) 3169 return PTR_ERR(trans); 3170 3171 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3172 btrfs_end_transaction(trans); 3173 if (ret < 0) 3174 return ret; 3175 return 1; 3176 } 3177 3178 return 0; 3179 } 3180 3181 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3182 struct btrfs_balance_control *bctl) 3183 { 3184 struct btrfs_root *root = fs_info->tree_root; 3185 struct btrfs_trans_handle *trans; 3186 struct btrfs_balance_item *item; 3187 struct btrfs_disk_balance_args disk_bargs; 3188 struct btrfs_path *path; 3189 struct extent_buffer *leaf; 3190 struct btrfs_key key; 3191 int ret, err; 3192 3193 path = btrfs_alloc_path(); 3194 if (!path) 3195 return -ENOMEM; 3196 3197 trans = btrfs_start_transaction(root, 0); 3198 if (IS_ERR(trans)) { 3199 btrfs_free_path(path); 3200 return PTR_ERR(trans); 3201 } 3202 3203 key.objectid = BTRFS_BALANCE_OBJECTID; 3204 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3205 key.offset = 0; 3206 3207 ret = btrfs_insert_empty_item(trans, root, path, &key, 3208 sizeof(*item)); 3209 if (ret) 3210 goto out; 3211 3212 leaf = path->nodes[0]; 3213 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3214 3215 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3216 3217 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3218 btrfs_set_balance_data(leaf, item, &disk_bargs); 3219 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3220 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3221 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3222 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3223 3224 btrfs_set_balance_flags(leaf, item, bctl->flags); 3225 3226 btrfs_mark_buffer_dirty(leaf); 3227 out: 3228 btrfs_free_path(path); 3229 err = btrfs_commit_transaction(trans); 3230 if (err && !ret) 3231 ret = err; 3232 return ret; 3233 } 3234 3235 static int del_balance_item(struct btrfs_fs_info *fs_info) 3236 { 3237 struct btrfs_root *root = fs_info->tree_root; 3238 struct btrfs_trans_handle *trans; 3239 struct btrfs_path *path; 3240 struct btrfs_key key; 3241 int ret, err; 3242 3243 path = btrfs_alloc_path(); 3244 if (!path) 3245 return -ENOMEM; 3246 3247 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3248 if (IS_ERR(trans)) { 3249 btrfs_free_path(path); 3250 return PTR_ERR(trans); 3251 } 3252 3253 key.objectid = BTRFS_BALANCE_OBJECTID; 3254 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3255 key.offset = 0; 3256 3257 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3258 if (ret < 0) 3259 goto out; 3260 if (ret > 0) { 3261 ret = -ENOENT; 3262 goto out; 3263 } 3264 3265 ret = btrfs_del_item(trans, root, path); 3266 out: 3267 btrfs_free_path(path); 3268 err = btrfs_commit_transaction(trans); 3269 if (err && !ret) 3270 ret = err; 3271 return ret; 3272 } 3273 3274 /* 3275 * This is a heuristic used to reduce the number of chunks balanced on 3276 * resume after balance was interrupted. 3277 */ 3278 static void update_balance_args(struct btrfs_balance_control *bctl) 3279 { 3280 /* 3281 * Turn on soft mode for chunk types that were being converted. 3282 */ 3283 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3284 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3285 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3286 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3287 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3288 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3289 3290 /* 3291 * Turn on usage filter if is not already used. The idea is 3292 * that chunks that we have already balanced should be 3293 * reasonably full. Don't do it for chunks that are being 3294 * converted - that will keep us from relocating unconverted 3295 * (albeit full) chunks. 3296 */ 3297 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3298 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3299 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3300 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3301 bctl->data.usage = 90; 3302 } 3303 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3304 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3305 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3306 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3307 bctl->sys.usage = 90; 3308 } 3309 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3310 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3311 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3312 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3313 bctl->meta.usage = 90; 3314 } 3315 } 3316 3317 /* 3318 * Clear the balance status in fs_info and delete the balance item from disk. 3319 */ 3320 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3321 { 3322 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3323 int ret; 3324 3325 BUG_ON(!fs_info->balance_ctl); 3326 3327 spin_lock(&fs_info->balance_lock); 3328 fs_info->balance_ctl = NULL; 3329 spin_unlock(&fs_info->balance_lock); 3330 3331 kfree(bctl); 3332 ret = del_balance_item(fs_info); 3333 if (ret) 3334 btrfs_handle_fs_error(fs_info, ret, NULL); 3335 } 3336 3337 /* 3338 * Balance filters. Return 1 if chunk should be filtered out 3339 * (should not be balanced). 3340 */ 3341 static int chunk_profiles_filter(u64 chunk_type, 3342 struct btrfs_balance_args *bargs) 3343 { 3344 chunk_type = chunk_to_extended(chunk_type) & 3345 BTRFS_EXTENDED_PROFILE_MASK; 3346 3347 if (bargs->profiles & chunk_type) 3348 return 0; 3349 3350 return 1; 3351 } 3352 3353 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3354 struct btrfs_balance_args *bargs) 3355 { 3356 struct btrfs_block_group *cache; 3357 u64 chunk_used; 3358 u64 user_thresh_min; 3359 u64 user_thresh_max; 3360 int ret = 1; 3361 3362 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3363 chunk_used = cache->used; 3364 3365 if (bargs->usage_min == 0) 3366 user_thresh_min = 0; 3367 else 3368 user_thresh_min = div_factor_fine(cache->length, 3369 bargs->usage_min); 3370 3371 if (bargs->usage_max == 0) 3372 user_thresh_max = 1; 3373 else if (bargs->usage_max > 100) 3374 user_thresh_max = cache->length; 3375 else 3376 user_thresh_max = div_factor_fine(cache->length, 3377 bargs->usage_max); 3378 3379 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3380 ret = 0; 3381 3382 btrfs_put_block_group(cache); 3383 return ret; 3384 } 3385 3386 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3387 u64 chunk_offset, struct btrfs_balance_args *bargs) 3388 { 3389 struct btrfs_block_group *cache; 3390 u64 chunk_used, user_thresh; 3391 int ret = 1; 3392 3393 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3394 chunk_used = cache->used; 3395 3396 if (bargs->usage_min == 0) 3397 user_thresh = 1; 3398 else if (bargs->usage > 100) 3399 user_thresh = cache->length; 3400 else 3401 user_thresh = div_factor_fine(cache->length, bargs->usage); 3402 3403 if (chunk_used < user_thresh) 3404 ret = 0; 3405 3406 btrfs_put_block_group(cache); 3407 return ret; 3408 } 3409 3410 static int chunk_devid_filter(struct extent_buffer *leaf, 3411 struct btrfs_chunk *chunk, 3412 struct btrfs_balance_args *bargs) 3413 { 3414 struct btrfs_stripe *stripe; 3415 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3416 int i; 3417 3418 for (i = 0; i < num_stripes; i++) { 3419 stripe = btrfs_stripe_nr(chunk, i); 3420 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3421 return 0; 3422 } 3423 3424 return 1; 3425 } 3426 3427 static u64 calc_data_stripes(u64 type, int num_stripes) 3428 { 3429 const int index = btrfs_bg_flags_to_raid_index(type); 3430 const int ncopies = btrfs_raid_array[index].ncopies; 3431 const int nparity = btrfs_raid_array[index].nparity; 3432 3433 if (nparity) 3434 return num_stripes - nparity; 3435 else 3436 return num_stripes / ncopies; 3437 } 3438 3439 /* [pstart, pend) */ 3440 static int chunk_drange_filter(struct extent_buffer *leaf, 3441 struct btrfs_chunk *chunk, 3442 struct btrfs_balance_args *bargs) 3443 { 3444 struct btrfs_stripe *stripe; 3445 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3446 u64 stripe_offset; 3447 u64 stripe_length; 3448 u64 type; 3449 int factor; 3450 int i; 3451 3452 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3453 return 0; 3454 3455 type = btrfs_chunk_type(leaf, chunk); 3456 factor = calc_data_stripes(type, num_stripes); 3457 3458 for (i = 0; i < num_stripes; i++) { 3459 stripe = btrfs_stripe_nr(chunk, i); 3460 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3461 continue; 3462 3463 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3464 stripe_length = btrfs_chunk_length(leaf, chunk); 3465 stripe_length = div_u64(stripe_length, factor); 3466 3467 if (stripe_offset < bargs->pend && 3468 stripe_offset + stripe_length > bargs->pstart) 3469 return 0; 3470 } 3471 3472 return 1; 3473 } 3474 3475 /* [vstart, vend) */ 3476 static int chunk_vrange_filter(struct extent_buffer *leaf, 3477 struct btrfs_chunk *chunk, 3478 u64 chunk_offset, 3479 struct btrfs_balance_args *bargs) 3480 { 3481 if (chunk_offset < bargs->vend && 3482 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3483 /* at least part of the chunk is inside this vrange */ 3484 return 0; 3485 3486 return 1; 3487 } 3488 3489 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3490 struct btrfs_chunk *chunk, 3491 struct btrfs_balance_args *bargs) 3492 { 3493 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3494 3495 if (bargs->stripes_min <= num_stripes 3496 && num_stripes <= bargs->stripes_max) 3497 return 0; 3498 3499 return 1; 3500 } 3501 3502 static int chunk_soft_convert_filter(u64 chunk_type, 3503 struct btrfs_balance_args *bargs) 3504 { 3505 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3506 return 0; 3507 3508 chunk_type = chunk_to_extended(chunk_type) & 3509 BTRFS_EXTENDED_PROFILE_MASK; 3510 3511 if (bargs->target == chunk_type) 3512 return 1; 3513 3514 return 0; 3515 } 3516 3517 static int should_balance_chunk(struct extent_buffer *leaf, 3518 struct btrfs_chunk *chunk, u64 chunk_offset) 3519 { 3520 struct btrfs_fs_info *fs_info = leaf->fs_info; 3521 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3522 struct btrfs_balance_args *bargs = NULL; 3523 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3524 3525 /* type filter */ 3526 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3527 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3528 return 0; 3529 } 3530 3531 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3532 bargs = &bctl->data; 3533 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3534 bargs = &bctl->sys; 3535 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3536 bargs = &bctl->meta; 3537 3538 /* profiles filter */ 3539 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3540 chunk_profiles_filter(chunk_type, bargs)) { 3541 return 0; 3542 } 3543 3544 /* usage filter */ 3545 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3546 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3547 return 0; 3548 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3549 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3550 return 0; 3551 } 3552 3553 /* devid filter */ 3554 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3555 chunk_devid_filter(leaf, chunk, bargs)) { 3556 return 0; 3557 } 3558 3559 /* drange filter, makes sense only with devid filter */ 3560 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3561 chunk_drange_filter(leaf, chunk, bargs)) { 3562 return 0; 3563 } 3564 3565 /* vrange filter */ 3566 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3567 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3568 return 0; 3569 } 3570 3571 /* stripes filter */ 3572 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3573 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3574 return 0; 3575 } 3576 3577 /* soft profile changing mode */ 3578 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3579 chunk_soft_convert_filter(chunk_type, bargs)) { 3580 return 0; 3581 } 3582 3583 /* 3584 * limited by count, must be the last filter 3585 */ 3586 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3587 if (bargs->limit == 0) 3588 return 0; 3589 else 3590 bargs->limit--; 3591 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3592 /* 3593 * Same logic as the 'limit' filter; the minimum cannot be 3594 * determined here because we do not have the global information 3595 * about the count of all chunks that satisfy the filters. 3596 */ 3597 if (bargs->limit_max == 0) 3598 return 0; 3599 else 3600 bargs->limit_max--; 3601 } 3602 3603 return 1; 3604 } 3605 3606 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3607 { 3608 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3609 struct btrfs_root *chunk_root = fs_info->chunk_root; 3610 u64 chunk_type; 3611 struct btrfs_chunk *chunk; 3612 struct btrfs_path *path = NULL; 3613 struct btrfs_key key; 3614 struct btrfs_key found_key; 3615 struct extent_buffer *leaf; 3616 int slot; 3617 int ret; 3618 int enospc_errors = 0; 3619 bool counting = true; 3620 /* The single value limit and min/max limits use the same bytes in the */ 3621 u64 limit_data = bctl->data.limit; 3622 u64 limit_meta = bctl->meta.limit; 3623 u64 limit_sys = bctl->sys.limit; 3624 u32 count_data = 0; 3625 u32 count_meta = 0; 3626 u32 count_sys = 0; 3627 int chunk_reserved = 0; 3628 3629 path = btrfs_alloc_path(); 3630 if (!path) { 3631 ret = -ENOMEM; 3632 goto error; 3633 } 3634 3635 /* zero out stat counters */ 3636 spin_lock(&fs_info->balance_lock); 3637 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3638 spin_unlock(&fs_info->balance_lock); 3639 again: 3640 if (!counting) { 3641 /* 3642 * The single value limit and min/max limits use the same bytes 3643 * in the 3644 */ 3645 bctl->data.limit = limit_data; 3646 bctl->meta.limit = limit_meta; 3647 bctl->sys.limit = limit_sys; 3648 } 3649 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3650 key.offset = (u64)-1; 3651 key.type = BTRFS_CHUNK_ITEM_KEY; 3652 3653 while (1) { 3654 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3655 atomic_read(&fs_info->balance_cancel_req)) { 3656 ret = -ECANCELED; 3657 goto error; 3658 } 3659 3660 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3661 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3662 if (ret < 0) { 3663 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3664 goto error; 3665 } 3666 3667 /* 3668 * this shouldn't happen, it means the last relocate 3669 * failed 3670 */ 3671 if (ret == 0) 3672 BUG(); /* FIXME break ? */ 3673 3674 ret = btrfs_previous_item(chunk_root, path, 0, 3675 BTRFS_CHUNK_ITEM_KEY); 3676 if (ret) { 3677 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3678 ret = 0; 3679 break; 3680 } 3681 3682 leaf = path->nodes[0]; 3683 slot = path->slots[0]; 3684 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3685 3686 if (found_key.objectid != key.objectid) { 3687 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3688 break; 3689 } 3690 3691 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3692 chunk_type = btrfs_chunk_type(leaf, chunk); 3693 3694 if (!counting) { 3695 spin_lock(&fs_info->balance_lock); 3696 bctl->stat.considered++; 3697 spin_unlock(&fs_info->balance_lock); 3698 } 3699 3700 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3701 3702 btrfs_release_path(path); 3703 if (!ret) { 3704 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3705 goto loop; 3706 } 3707 3708 if (counting) { 3709 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3710 spin_lock(&fs_info->balance_lock); 3711 bctl->stat.expected++; 3712 spin_unlock(&fs_info->balance_lock); 3713 3714 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3715 count_data++; 3716 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3717 count_sys++; 3718 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3719 count_meta++; 3720 3721 goto loop; 3722 } 3723 3724 /* 3725 * Apply limit_min filter, no need to check if the LIMITS 3726 * filter is used, limit_min is 0 by default 3727 */ 3728 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3729 count_data < bctl->data.limit_min) 3730 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3731 count_meta < bctl->meta.limit_min) 3732 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3733 count_sys < bctl->sys.limit_min)) { 3734 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3735 goto loop; 3736 } 3737 3738 if (!chunk_reserved) { 3739 /* 3740 * We may be relocating the only data chunk we have, 3741 * which could potentially end up with losing data's 3742 * raid profile, so lets allocate an empty one in 3743 * advance. 3744 */ 3745 ret = btrfs_may_alloc_data_chunk(fs_info, 3746 found_key.offset); 3747 if (ret < 0) { 3748 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3749 goto error; 3750 } else if (ret == 1) { 3751 chunk_reserved = 1; 3752 } 3753 } 3754 3755 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3756 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3757 if (ret == -ENOSPC) { 3758 enospc_errors++; 3759 } else if (ret == -ETXTBSY) { 3760 btrfs_info(fs_info, 3761 "skipping relocation of block group %llu due to active swapfile", 3762 found_key.offset); 3763 ret = 0; 3764 } else if (ret) { 3765 goto error; 3766 } else { 3767 spin_lock(&fs_info->balance_lock); 3768 bctl->stat.completed++; 3769 spin_unlock(&fs_info->balance_lock); 3770 } 3771 loop: 3772 if (found_key.offset == 0) 3773 break; 3774 key.offset = found_key.offset - 1; 3775 } 3776 3777 if (counting) { 3778 btrfs_release_path(path); 3779 counting = false; 3780 goto again; 3781 } 3782 error: 3783 btrfs_free_path(path); 3784 if (enospc_errors) { 3785 btrfs_info(fs_info, "%d enospc errors during balance", 3786 enospc_errors); 3787 if (!ret) 3788 ret = -ENOSPC; 3789 } 3790 3791 return ret; 3792 } 3793 3794 /** 3795 * alloc_profile_is_valid - see if a given profile is valid and reduced 3796 * @flags: profile to validate 3797 * @extended: if true @flags is treated as an extended profile 3798 */ 3799 static int alloc_profile_is_valid(u64 flags, int extended) 3800 { 3801 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3802 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3803 3804 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3805 3806 /* 1) check that all other bits are zeroed */ 3807 if (flags & ~mask) 3808 return 0; 3809 3810 /* 2) see if profile is reduced */ 3811 if (flags == 0) 3812 return !extended; /* "0" is valid for usual profiles */ 3813 3814 return has_single_bit_set(flags); 3815 } 3816 3817 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3818 { 3819 /* cancel requested || normal exit path */ 3820 return atomic_read(&fs_info->balance_cancel_req) || 3821 (atomic_read(&fs_info->balance_pause_req) == 0 && 3822 atomic_read(&fs_info->balance_cancel_req) == 0); 3823 } 3824 3825 /* 3826 * Validate target profile against allowed profiles and return true if it's OK. 3827 * Otherwise print the error message and return false. 3828 */ 3829 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 3830 const struct btrfs_balance_args *bargs, 3831 u64 allowed, const char *type) 3832 { 3833 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3834 return true; 3835 3836 /* Profile is valid and does not have bits outside of the allowed set */ 3837 if (alloc_profile_is_valid(bargs->target, 1) && 3838 (bargs->target & ~allowed) == 0) 3839 return true; 3840 3841 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 3842 type, btrfs_bg_type_to_raid_name(bargs->target)); 3843 return false; 3844 } 3845 3846 /* 3847 * Fill @buf with textual description of balance filter flags @bargs, up to 3848 * @size_buf including the terminating null. The output may be trimmed if it 3849 * does not fit into the provided buffer. 3850 */ 3851 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 3852 u32 size_buf) 3853 { 3854 int ret; 3855 u32 size_bp = size_buf; 3856 char *bp = buf; 3857 u64 flags = bargs->flags; 3858 char tmp_buf[128] = {'\0'}; 3859 3860 if (!flags) 3861 return; 3862 3863 #define CHECK_APPEND_NOARG(a) \ 3864 do { \ 3865 ret = snprintf(bp, size_bp, (a)); \ 3866 if (ret < 0 || ret >= size_bp) \ 3867 goto out_overflow; \ 3868 size_bp -= ret; \ 3869 bp += ret; \ 3870 } while (0) 3871 3872 #define CHECK_APPEND_1ARG(a, v1) \ 3873 do { \ 3874 ret = snprintf(bp, size_bp, (a), (v1)); \ 3875 if (ret < 0 || ret >= size_bp) \ 3876 goto out_overflow; \ 3877 size_bp -= ret; \ 3878 bp += ret; \ 3879 } while (0) 3880 3881 #define CHECK_APPEND_2ARG(a, v1, v2) \ 3882 do { \ 3883 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 3884 if (ret < 0 || ret >= size_bp) \ 3885 goto out_overflow; \ 3886 size_bp -= ret; \ 3887 bp += ret; \ 3888 } while (0) 3889 3890 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 3891 CHECK_APPEND_1ARG("convert=%s,", 3892 btrfs_bg_type_to_raid_name(bargs->target)); 3893 3894 if (flags & BTRFS_BALANCE_ARGS_SOFT) 3895 CHECK_APPEND_NOARG("soft,"); 3896 3897 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 3898 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 3899 sizeof(tmp_buf)); 3900 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 3901 } 3902 3903 if (flags & BTRFS_BALANCE_ARGS_USAGE) 3904 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 3905 3906 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 3907 CHECK_APPEND_2ARG("usage=%u..%u,", 3908 bargs->usage_min, bargs->usage_max); 3909 3910 if (flags & BTRFS_BALANCE_ARGS_DEVID) 3911 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 3912 3913 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 3914 CHECK_APPEND_2ARG("drange=%llu..%llu,", 3915 bargs->pstart, bargs->pend); 3916 3917 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 3918 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 3919 bargs->vstart, bargs->vend); 3920 3921 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 3922 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 3923 3924 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 3925 CHECK_APPEND_2ARG("limit=%u..%u,", 3926 bargs->limit_min, bargs->limit_max); 3927 3928 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 3929 CHECK_APPEND_2ARG("stripes=%u..%u,", 3930 bargs->stripes_min, bargs->stripes_max); 3931 3932 #undef CHECK_APPEND_2ARG 3933 #undef CHECK_APPEND_1ARG 3934 #undef CHECK_APPEND_NOARG 3935 3936 out_overflow: 3937 3938 if (size_bp < size_buf) 3939 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 3940 else 3941 buf[0] = '\0'; 3942 } 3943 3944 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 3945 { 3946 u32 size_buf = 1024; 3947 char tmp_buf[192] = {'\0'}; 3948 char *buf; 3949 char *bp; 3950 u32 size_bp = size_buf; 3951 int ret; 3952 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3953 3954 buf = kzalloc(size_buf, GFP_KERNEL); 3955 if (!buf) 3956 return; 3957 3958 bp = buf; 3959 3960 #define CHECK_APPEND_1ARG(a, v1) \ 3961 do { \ 3962 ret = snprintf(bp, size_bp, (a), (v1)); \ 3963 if (ret < 0 || ret >= size_bp) \ 3964 goto out_overflow; \ 3965 size_bp -= ret; \ 3966 bp += ret; \ 3967 } while (0) 3968 3969 if (bctl->flags & BTRFS_BALANCE_FORCE) 3970 CHECK_APPEND_1ARG("%s", "-f "); 3971 3972 if (bctl->flags & BTRFS_BALANCE_DATA) { 3973 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 3974 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 3975 } 3976 3977 if (bctl->flags & BTRFS_BALANCE_METADATA) { 3978 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 3979 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 3980 } 3981 3982 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 3983 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 3984 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 3985 } 3986 3987 #undef CHECK_APPEND_1ARG 3988 3989 out_overflow: 3990 3991 if (size_bp < size_buf) 3992 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 3993 btrfs_info(fs_info, "balance: %s %s", 3994 (bctl->flags & BTRFS_BALANCE_RESUME) ? 3995 "resume" : "start", buf); 3996 3997 kfree(buf); 3998 } 3999 4000 /* 4001 * Should be called with balance mutexe held 4002 */ 4003 int btrfs_balance(struct btrfs_fs_info *fs_info, 4004 struct btrfs_balance_control *bctl, 4005 struct btrfs_ioctl_balance_args *bargs) 4006 { 4007 u64 meta_target, data_target; 4008 u64 allowed; 4009 int mixed = 0; 4010 int ret; 4011 u64 num_devices; 4012 unsigned seq; 4013 bool reducing_redundancy; 4014 int i; 4015 4016 if (btrfs_fs_closing(fs_info) || 4017 atomic_read(&fs_info->balance_pause_req) || 4018 btrfs_should_cancel_balance(fs_info)) { 4019 ret = -EINVAL; 4020 goto out; 4021 } 4022 4023 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4024 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4025 mixed = 1; 4026 4027 /* 4028 * In case of mixed groups both data and meta should be picked, 4029 * and identical options should be given for both of them. 4030 */ 4031 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4032 if (mixed && (bctl->flags & allowed)) { 4033 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4034 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4035 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4036 btrfs_err(fs_info, 4037 "balance: mixed groups data and metadata options must be the same"); 4038 ret = -EINVAL; 4039 goto out; 4040 } 4041 } 4042 4043 /* 4044 * rw_devices will not change at the moment, device add/delete/replace 4045 * are excluded by EXCL_OP 4046 */ 4047 num_devices = fs_info->fs_devices->rw_devices; 4048 4049 /* 4050 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4051 * special bit for it, to make it easier to distinguish. Thus we need 4052 * to set it manually, or balance would refuse the profile. 4053 */ 4054 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4055 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4056 if (num_devices >= btrfs_raid_array[i].devs_min) 4057 allowed |= btrfs_raid_array[i].bg_flag; 4058 4059 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4060 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4061 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4062 ret = -EINVAL; 4063 goto out; 4064 } 4065 4066 /* 4067 * Allow to reduce metadata or system integrity only if force set for 4068 * profiles with redundancy (copies, parity) 4069 */ 4070 allowed = 0; 4071 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4072 if (btrfs_raid_array[i].ncopies >= 2 || 4073 btrfs_raid_array[i].tolerated_failures >= 1) 4074 allowed |= btrfs_raid_array[i].bg_flag; 4075 } 4076 do { 4077 seq = read_seqbegin(&fs_info->profiles_lock); 4078 4079 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4080 (fs_info->avail_system_alloc_bits & allowed) && 4081 !(bctl->sys.target & allowed)) || 4082 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4083 (fs_info->avail_metadata_alloc_bits & allowed) && 4084 !(bctl->meta.target & allowed))) 4085 reducing_redundancy = true; 4086 else 4087 reducing_redundancy = false; 4088 4089 /* if we're not converting, the target field is uninitialized */ 4090 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4091 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4092 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4093 bctl->data.target : fs_info->avail_data_alloc_bits; 4094 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4095 4096 if (reducing_redundancy) { 4097 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4098 btrfs_info(fs_info, 4099 "balance: force reducing metadata redundancy"); 4100 } else { 4101 btrfs_err(fs_info, 4102 "balance: reduces metadata redundancy, use --force if you want this"); 4103 ret = -EINVAL; 4104 goto out; 4105 } 4106 } 4107 4108 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4109 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4110 btrfs_warn(fs_info, 4111 "balance: metadata profile %s has lower redundancy than data profile %s", 4112 btrfs_bg_type_to_raid_name(meta_target), 4113 btrfs_bg_type_to_raid_name(data_target)); 4114 } 4115 4116 if (fs_info->send_in_progress) { 4117 btrfs_warn_rl(fs_info, 4118 "cannot run balance while send operations are in progress (%d in progress)", 4119 fs_info->send_in_progress); 4120 ret = -EAGAIN; 4121 goto out; 4122 } 4123 4124 ret = insert_balance_item(fs_info, bctl); 4125 if (ret && ret != -EEXIST) 4126 goto out; 4127 4128 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4129 BUG_ON(ret == -EEXIST); 4130 BUG_ON(fs_info->balance_ctl); 4131 spin_lock(&fs_info->balance_lock); 4132 fs_info->balance_ctl = bctl; 4133 spin_unlock(&fs_info->balance_lock); 4134 } else { 4135 BUG_ON(ret != -EEXIST); 4136 spin_lock(&fs_info->balance_lock); 4137 update_balance_args(bctl); 4138 spin_unlock(&fs_info->balance_lock); 4139 } 4140 4141 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4142 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4143 describe_balance_start_or_resume(fs_info); 4144 mutex_unlock(&fs_info->balance_mutex); 4145 4146 ret = __btrfs_balance(fs_info); 4147 4148 mutex_lock(&fs_info->balance_mutex); 4149 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4150 btrfs_info(fs_info, "balance: paused"); 4151 /* 4152 * Balance can be canceled by: 4153 * 4154 * - Regular cancel request 4155 * Then ret == -ECANCELED and balance_cancel_req > 0 4156 * 4157 * - Fatal signal to "btrfs" process 4158 * Either the signal caught by wait_reserve_ticket() and callers 4159 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4160 * got -ECANCELED. 4161 * Either way, in this case balance_cancel_req = 0, and 4162 * ret == -EINTR or ret == -ECANCELED. 4163 * 4164 * So here we only check the return value to catch canceled balance. 4165 */ 4166 else if (ret == -ECANCELED || ret == -EINTR) 4167 btrfs_info(fs_info, "balance: canceled"); 4168 else 4169 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4170 4171 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4172 4173 if (bargs) { 4174 memset(bargs, 0, sizeof(*bargs)); 4175 btrfs_update_ioctl_balance_args(fs_info, bargs); 4176 } 4177 4178 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4179 balance_need_close(fs_info)) { 4180 reset_balance_state(fs_info); 4181 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4182 } 4183 4184 wake_up(&fs_info->balance_wait_q); 4185 4186 return ret; 4187 out: 4188 if (bctl->flags & BTRFS_BALANCE_RESUME) 4189 reset_balance_state(fs_info); 4190 else 4191 kfree(bctl); 4192 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4193 4194 return ret; 4195 } 4196 4197 static int balance_kthread(void *data) 4198 { 4199 struct btrfs_fs_info *fs_info = data; 4200 int ret = 0; 4201 4202 mutex_lock(&fs_info->balance_mutex); 4203 if (fs_info->balance_ctl) 4204 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4205 mutex_unlock(&fs_info->balance_mutex); 4206 4207 return ret; 4208 } 4209 4210 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4211 { 4212 struct task_struct *tsk; 4213 4214 mutex_lock(&fs_info->balance_mutex); 4215 if (!fs_info->balance_ctl) { 4216 mutex_unlock(&fs_info->balance_mutex); 4217 return 0; 4218 } 4219 mutex_unlock(&fs_info->balance_mutex); 4220 4221 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4222 btrfs_info(fs_info, "balance: resume skipped"); 4223 return 0; 4224 } 4225 4226 /* 4227 * A ro->rw remount sequence should continue with the paused balance 4228 * regardless of who pauses it, system or the user as of now, so set 4229 * the resume flag. 4230 */ 4231 spin_lock(&fs_info->balance_lock); 4232 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4233 spin_unlock(&fs_info->balance_lock); 4234 4235 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4236 return PTR_ERR_OR_ZERO(tsk); 4237 } 4238 4239 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4240 { 4241 struct btrfs_balance_control *bctl; 4242 struct btrfs_balance_item *item; 4243 struct btrfs_disk_balance_args disk_bargs; 4244 struct btrfs_path *path; 4245 struct extent_buffer *leaf; 4246 struct btrfs_key key; 4247 int ret; 4248 4249 path = btrfs_alloc_path(); 4250 if (!path) 4251 return -ENOMEM; 4252 4253 key.objectid = BTRFS_BALANCE_OBJECTID; 4254 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4255 key.offset = 0; 4256 4257 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4258 if (ret < 0) 4259 goto out; 4260 if (ret > 0) { /* ret = -ENOENT; */ 4261 ret = 0; 4262 goto out; 4263 } 4264 4265 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4266 if (!bctl) { 4267 ret = -ENOMEM; 4268 goto out; 4269 } 4270 4271 leaf = path->nodes[0]; 4272 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4273 4274 bctl->flags = btrfs_balance_flags(leaf, item); 4275 bctl->flags |= BTRFS_BALANCE_RESUME; 4276 4277 btrfs_balance_data(leaf, item, &disk_bargs); 4278 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4279 btrfs_balance_meta(leaf, item, &disk_bargs); 4280 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4281 btrfs_balance_sys(leaf, item, &disk_bargs); 4282 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4283 4284 /* 4285 * This should never happen, as the paused balance state is recovered 4286 * during mount without any chance of other exclusive ops to collide. 4287 * 4288 * This gives the exclusive op status to balance and keeps in paused 4289 * state until user intervention (cancel or umount). If the ownership 4290 * cannot be assigned, show a message but do not fail. The balance 4291 * is in a paused state and must have fs_info::balance_ctl properly 4292 * set up. 4293 */ 4294 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) 4295 btrfs_warn(fs_info, 4296 "balance: cannot set exclusive op status, resume manually"); 4297 4298 mutex_lock(&fs_info->balance_mutex); 4299 BUG_ON(fs_info->balance_ctl); 4300 spin_lock(&fs_info->balance_lock); 4301 fs_info->balance_ctl = bctl; 4302 spin_unlock(&fs_info->balance_lock); 4303 mutex_unlock(&fs_info->balance_mutex); 4304 out: 4305 btrfs_free_path(path); 4306 return ret; 4307 } 4308 4309 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4310 { 4311 int ret = 0; 4312 4313 mutex_lock(&fs_info->balance_mutex); 4314 if (!fs_info->balance_ctl) { 4315 mutex_unlock(&fs_info->balance_mutex); 4316 return -ENOTCONN; 4317 } 4318 4319 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4320 atomic_inc(&fs_info->balance_pause_req); 4321 mutex_unlock(&fs_info->balance_mutex); 4322 4323 wait_event(fs_info->balance_wait_q, 4324 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4325 4326 mutex_lock(&fs_info->balance_mutex); 4327 /* we are good with balance_ctl ripped off from under us */ 4328 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4329 atomic_dec(&fs_info->balance_pause_req); 4330 } else { 4331 ret = -ENOTCONN; 4332 } 4333 4334 mutex_unlock(&fs_info->balance_mutex); 4335 return ret; 4336 } 4337 4338 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4339 { 4340 mutex_lock(&fs_info->balance_mutex); 4341 if (!fs_info->balance_ctl) { 4342 mutex_unlock(&fs_info->balance_mutex); 4343 return -ENOTCONN; 4344 } 4345 4346 /* 4347 * A paused balance with the item stored on disk can be resumed at 4348 * mount time if the mount is read-write. Otherwise it's still paused 4349 * and we must not allow cancelling as it deletes the item. 4350 */ 4351 if (sb_rdonly(fs_info->sb)) { 4352 mutex_unlock(&fs_info->balance_mutex); 4353 return -EROFS; 4354 } 4355 4356 atomic_inc(&fs_info->balance_cancel_req); 4357 /* 4358 * if we are running just wait and return, balance item is 4359 * deleted in btrfs_balance in this case 4360 */ 4361 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4362 mutex_unlock(&fs_info->balance_mutex); 4363 wait_event(fs_info->balance_wait_q, 4364 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4365 mutex_lock(&fs_info->balance_mutex); 4366 } else { 4367 mutex_unlock(&fs_info->balance_mutex); 4368 /* 4369 * Lock released to allow other waiters to continue, we'll 4370 * reexamine the status again. 4371 */ 4372 mutex_lock(&fs_info->balance_mutex); 4373 4374 if (fs_info->balance_ctl) { 4375 reset_balance_state(fs_info); 4376 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4377 btrfs_info(fs_info, "balance: canceled"); 4378 } 4379 } 4380 4381 BUG_ON(fs_info->balance_ctl || 4382 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4383 atomic_dec(&fs_info->balance_cancel_req); 4384 mutex_unlock(&fs_info->balance_mutex); 4385 return 0; 4386 } 4387 4388 int btrfs_uuid_scan_kthread(void *data) 4389 { 4390 struct btrfs_fs_info *fs_info = data; 4391 struct btrfs_root *root = fs_info->tree_root; 4392 struct btrfs_key key; 4393 struct btrfs_path *path = NULL; 4394 int ret = 0; 4395 struct extent_buffer *eb; 4396 int slot; 4397 struct btrfs_root_item root_item; 4398 u32 item_size; 4399 struct btrfs_trans_handle *trans = NULL; 4400 bool closing = false; 4401 4402 path = btrfs_alloc_path(); 4403 if (!path) { 4404 ret = -ENOMEM; 4405 goto out; 4406 } 4407 4408 key.objectid = 0; 4409 key.type = BTRFS_ROOT_ITEM_KEY; 4410 key.offset = 0; 4411 4412 while (1) { 4413 if (btrfs_fs_closing(fs_info)) { 4414 closing = true; 4415 break; 4416 } 4417 ret = btrfs_search_forward(root, &key, path, 4418 BTRFS_OLDEST_GENERATION); 4419 if (ret) { 4420 if (ret > 0) 4421 ret = 0; 4422 break; 4423 } 4424 4425 if (key.type != BTRFS_ROOT_ITEM_KEY || 4426 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4427 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4428 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4429 goto skip; 4430 4431 eb = path->nodes[0]; 4432 slot = path->slots[0]; 4433 item_size = btrfs_item_size_nr(eb, slot); 4434 if (item_size < sizeof(root_item)) 4435 goto skip; 4436 4437 read_extent_buffer(eb, &root_item, 4438 btrfs_item_ptr_offset(eb, slot), 4439 (int)sizeof(root_item)); 4440 if (btrfs_root_refs(&root_item) == 0) 4441 goto skip; 4442 4443 if (!btrfs_is_empty_uuid(root_item.uuid) || 4444 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4445 if (trans) 4446 goto update_tree; 4447 4448 btrfs_release_path(path); 4449 /* 4450 * 1 - subvol uuid item 4451 * 1 - received_subvol uuid item 4452 */ 4453 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4454 if (IS_ERR(trans)) { 4455 ret = PTR_ERR(trans); 4456 break; 4457 } 4458 continue; 4459 } else { 4460 goto skip; 4461 } 4462 update_tree: 4463 btrfs_release_path(path); 4464 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4465 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4466 BTRFS_UUID_KEY_SUBVOL, 4467 key.objectid); 4468 if (ret < 0) { 4469 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4470 ret); 4471 break; 4472 } 4473 } 4474 4475 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4476 ret = btrfs_uuid_tree_add(trans, 4477 root_item.received_uuid, 4478 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4479 key.objectid); 4480 if (ret < 0) { 4481 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4482 ret); 4483 break; 4484 } 4485 } 4486 4487 skip: 4488 btrfs_release_path(path); 4489 if (trans) { 4490 ret = btrfs_end_transaction(trans); 4491 trans = NULL; 4492 if (ret) 4493 break; 4494 } 4495 4496 if (key.offset < (u64)-1) { 4497 key.offset++; 4498 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4499 key.offset = 0; 4500 key.type = BTRFS_ROOT_ITEM_KEY; 4501 } else if (key.objectid < (u64)-1) { 4502 key.offset = 0; 4503 key.type = BTRFS_ROOT_ITEM_KEY; 4504 key.objectid++; 4505 } else { 4506 break; 4507 } 4508 cond_resched(); 4509 } 4510 4511 out: 4512 btrfs_free_path(path); 4513 if (trans && !IS_ERR(trans)) 4514 btrfs_end_transaction(trans); 4515 if (ret) 4516 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4517 else if (!closing) 4518 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4519 up(&fs_info->uuid_tree_rescan_sem); 4520 return 0; 4521 } 4522 4523 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4524 { 4525 struct btrfs_trans_handle *trans; 4526 struct btrfs_root *tree_root = fs_info->tree_root; 4527 struct btrfs_root *uuid_root; 4528 struct task_struct *task; 4529 int ret; 4530 4531 /* 4532 * 1 - root node 4533 * 1 - root item 4534 */ 4535 trans = btrfs_start_transaction(tree_root, 2); 4536 if (IS_ERR(trans)) 4537 return PTR_ERR(trans); 4538 4539 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4540 if (IS_ERR(uuid_root)) { 4541 ret = PTR_ERR(uuid_root); 4542 btrfs_abort_transaction(trans, ret); 4543 btrfs_end_transaction(trans); 4544 return ret; 4545 } 4546 4547 fs_info->uuid_root = uuid_root; 4548 4549 ret = btrfs_commit_transaction(trans); 4550 if (ret) 4551 return ret; 4552 4553 down(&fs_info->uuid_tree_rescan_sem); 4554 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4555 if (IS_ERR(task)) { 4556 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4557 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4558 up(&fs_info->uuid_tree_rescan_sem); 4559 return PTR_ERR(task); 4560 } 4561 4562 return 0; 4563 } 4564 4565 /* 4566 * shrinking a device means finding all of the device extents past 4567 * the new size, and then following the back refs to the chunks. 4568 * The chunk relocation code actually frees the device extent 4569 */ 4570 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4571 { 4572 struct btrfs_fs_info *fs_info = device->fs_info; 4573 struct btrfs_root *root = fs_info->dev_root; 4574 struct btrfs_trans_handle *trans; 4575 struct btrfs_dev_extent *dev_extent = NULL; 4576 struct btrfs_path *path; 4577 u64 length; 4578 u64 chunk_offset; 4579 int ret; 4580 int slot; 4581 int failed = 0; 4582 bool retried = false; 4583 struct extent_buffer *l; 4584 struct btrfs_key key; 4585 struct btrfs_super_block *super_copy = fs_info->super_copy; 4586 u64 old_total = btrfs_super_total_bytes(super_copy); 4587 u64 old_size = btrfs_device_get_total_bytes(device); 4588 u64 diff; 4589 u64 start; 4590 4591 new_size = round_down(new_size, fs_info->sectorsize); 4592 start = new_size; 4593 diff = round_down(old_size - new_size, fs_info->sectorsize); 4594 4595 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4596 return -EINVAL; 4597 4598 path = btrfs_alloc_path(); 4599 if (!path) 4600 return -ENOMEM; 4601 4602 path->reada = READA_BACK; 4603 4604 trans = btrfs_start_transaction(root, 0); 4605 if (IS_ERR(trans)) { 4606 btrfs_free_path(path); 4607 return PTR_ERR(trans); 4608 } 4609 4610 mutex_lock(&fs_info->chunk_mutex); 4611 4612 btrfs_device_set_total_bytes(device, new_size); 4613 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4614 device->fs_devices->total_rw_bytes -= diff; 4615 atomic64_sub(diff, &fs_info->free_chunk_space); 4616 } 4617 4618 /* 4619 * Once the device's size has been set to the new size, ensure all 4620 * in-memory chunks are synced to disk so that the loop below sees them 4621 * and relocates them accordingly. 4622 */ 4623 if (contains_pending_extent(device, &start, diff)) { 4624 mutex_unlock(&fs_info->chunk_mutex); 4625 ret = btrfs_commit_transaction(trans); 4626 if (ret) 4627 goto done; 4628 } else { 4629 mutex_unlock(&fs_info->chunk_mutex); 4630 btrfs_end_transaction(trans); 4631 } 4632 4633 again: 4634 key.objectid = device->devid; 4635 key.offset = (u64)-1; 4636 key.type = BTRFS_DEV_EXTENT_KEY; 4637 4638 do { 4639 mutex_lock(&fs_info->delete_unused_bgs_mutex); 4640 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4641 if (ret < 0) { 4642 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4643 goto done; 4644 } 4645 4646 ret = btrfs_previous_item(root, path, 0, key.type); 4647 if (ret) 4648 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4649 if (ret < 0) 4650 goto done; 4651 if (ret) { 4652 ret = 0; 4653 btrfs_release_path(path); 4654 break; 4655 } 4656 4657 l = path->nodes[0]; 4658 slot = path->slots[0]; 4659 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4660 4661 if (key.objectid != device->devid) { 4662 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4663 btrfs_release_path(path); 4664 break; 4665 } 4666 4667 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4668 length = btrfs_dev_extent_length(l, dev_extent); 4669 4670 if (key.offset + length <= new_size) { 4671 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4672 btrfs_release_path(path); 4673 break; 4674 } 4675 4676 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4677 btrfs_release_path(path); 4678 4679 /* 4680 * We may be relocating the only data chunk we have, 4681 * which could potentially end up with losing data's 4682 * raid profile, so lets allocate an empty one in 4683 * advance. 4684 */ 4685 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4686 if (ret < 0) { 4687 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4688 goto done; 4689 } 4690 4691 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4692 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4693 if (ret == -ENOSPC) { 4694 failed++; 4695 } else if (ret) { 4696 if (ret == -ETXTBSY) { 4697 btrfs_warn(fs_info, 4698 "could not shrink block group %llu due to active swapfile", 4699 chunk_offset); 4700 } 4701 goto done; 4702 } 4703 } while (key.offset-- > 0); 4704 4705 if (failed && !retried) { 4706 failed = 0; 4707 retried = true; 4708 goto again; 4709 } else if (failed && retried) { 4710 ret = -ENOSPC; 4711 goto done; 4712 } 4713 4714 /* Shrinking succeeded, else we would be at "done". */ 4715 trans = btrfs_start_transaction(root, 0); 4716 if (IS_ERR(trans)) { 4717 ret = PTR_ERR(trans); 4718 goto done; 4719 } 4720 4721 mutex_lock(&fs_info->chunk_mutex); 4722 /* Clear all state bits beyond the shrunk device size */ 4723 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4724 CHUNK_STATE_MASK); 4725 4726 btrfs_device_set_disk_total_bytes(device, new_size); 4727 if (list_empty(&device->post_commit_list)) 4728 list_add_tail(&device->post_commit_list, 4729 &trans->transaction->dev_update_list); 4730 4731 WARN_ON(diff > old_total); 4732 btrfs_set_super_total_bytes(super_copy, 4733 round_down(old_total - diff, fs_info->sectorsize)); 4734 mutex_unlock(&fs_info->chunk_mutex); 4735 4736 /* Now btrfs_update_device() will change the on-disk size. */ 4737 ret = btrfs_update_device(trans, device); 4738 if (ret < 0) { 4739 btrfs_abort_transaction(trans, ret); 4740 btrfs_end_transaction(trans); 4741 } else { 4742 ret = btrfs_commit_transaction(trans); 4743 } 4744 done: 4745 btrfs_free_path(path); 4746 if (ret) { 4747 mutex_lock(&fs_info->chunk_mutex); 4748 btrfs_device_set_total_bytes(device, old_size); 4749 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4750 device->fs_devices->total_rw_bytes += diff; 4751 atomic64_add(diff, &fs_info->free_chunk_space); 4752 mutex_unlock(&fs_info->chunk_mutex); 4753 } 4754 return ret; 4755 } 4756 4757 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4758 struct btrfs_key *key, 4759 struct btrfs_chunk *chunk, int item_size) 4760 { 4761 struct btrfs_super_block *super_copy = fs_info->super_copy; 4762 struct btrfs_disk_key disk_key; 4763 u32 array_size; 4764 u8 *ptr; 4765 4766 mutex_lock(&fs_info->chunk_mutex); 4767 array_size = btrfs_super_sys_array_size(super_copy); 4768 if (array_size + item_size + sizeof(disk_key) 4769 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4770 mutex_unlock(&fs_info->chunk_mutex); 4771 return -EFBIG; 4772 } 4773 4774 ptr = super_copy->sys_chunk_array + array_size; 4775 btrfs_cpu_key_to_disk(&disk_key, key); 4776 memcpy(ptr, &disk_key, sizeof(disk_key)); 4777 ptr += sizeof(disk_key); 4778 memcpy(ptr, chunk, item_size); 4779 item_size += sizeof(disk_key); 4780 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4781 mutex_unlock(&fs_info->chunk_mutex); 4782 4783 return 0; 4784 } 4785 4786 /* 4787 * sort the devices in descending order by max_avail, total_avail 4788 */ 4789 static int btrfs_cmp_device_info(const void *a, const void *b) 4790 { 4791 const struct btrfs_device_info *di_a = a; 4792 const struct btrfs_device_info *di_b = b; 4793 4794 if (di_a->max_avail > di_b->max_avail) 4795 return -1; 4796 if (di_a->max_avail < di_b->max_avail) 4797 return 1; 4798 if (di_a->total_avail > di_b->total_avail) 4799 return -1; 4800 if (di_a->total_avail < di_b->total_avail) 4801 return 1; 4802 return 0; 4803 } 4804 4805 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4806 { 4807 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4808 return; 4809 4810 btrfs_set_fs_incompat(info, RAID56); 4811 } 4812 4813 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 4814 { 4815 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 4816 return; 4817 4818 btrfs_set_fs_incompat(info, RAID1C34); 4819 } 4820 4821 /* 4822 * Structure used internally for __btrfs_alloc_chunk() function. 4823 * Wraps needed parameters. 4824 */ 4825 struct alloc_chunk_ctl { 4826 u64 start; 4827 u64 type; 4828 /* Total number of stripes to allocate */ 4829 int num_stripes; 4830 /* sub_stripes info for map */ 4831 int sub_stripes; 4832 /* Stripes per device */ 4833 int dev_stripes; 4834 /* Maximum number of devices to use */ 4835 int devs_max; 4836 /* Minimum number of devices to use */ 4837 int devs_min; 4838 /* ndevs has to be a multiple of this */ 4839 int devs_increment; 4840 /* Number of copies */ 4841 int ncopies; 4842 /* Number of stripes worth of bytes to store parity information */ 4843 int nparity; 4844 u64 max_stripe_size; 4845 u64 max_chunk_size; 4846 u64 dev_extent_min; 4847 u64 stripe_size; 4848 u64 chunk_size; 4849 int ndevs; 4850 }; 4851 4852 static void init_alloc_chunk_ctl_policy_regular( 4853 struct btrfs_fs_devices *fs_devices, 4854 struct alloc_chunk_ctl *ctl) 4855 { 4856 u64 type = ctl->type; 4857 4858 if (type & BTRFS_BLOCK_GROUP_DATA) { 4859 ctl->max_stripe_size = SZ_1G; 4860 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 4861 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4862 /* For larger filesystems, use larger metadata chunks */ 4863 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4864 ctl->max_stripe_size = SZ_1G; 4865 else 4866 ctl->max_stripe_size = SZ_256M; 4867 ctl->max_chunk_size = ctl->max_stripe_size; 4868 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4869 ctl->max_stripe_size = SZ_32M; 4870 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 4871 ctl->devs_max = min_t(int, ctl->devs_max, 4872 BTRFS_MAX_DEVS_SYS_CHUNK); 4873 } else { 4874 BUG(); 4875 } 4876 4877 /* We don't want a chunk larger than 10% of writable space */ 4878 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4879 ctl->max_chunk_size); 4880 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; 4881 } 4882 4883 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 4884 struct alloc_chunk_ctl *ctl) 4885 { 4886 int index = btrfs_bg_flags_to_raid_index(ctl->type); 4887 4888 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 4889 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 4890 ctl->devs_max = btrfs_raid_array[index].devs_max; 4891 if (!ctl->devs_max) 4892 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 4893 ctl->devs_min = btrfs_raid_array[index].devs_min; 4894 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 4895 ctl->ncopies = btrfs_raid_array[index].ncopies; 4896 ctl->nparity = btrfs_raid_array[index].nparity; 4897 ctl->ndevs = 0; 4898 4899 switch (fs_devices->chunk_alloc_policy) { 4900 case BTRFS_CHUNK_ALLOC_REGULAR: 4901 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 4902 break; 4903 default: 4904 BUG(); 4905 } 4906 } 4907 4908 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 4909 struct alloc_chunk_ctl *ctl, 4910 struct btrfs_device_info *devices_info) 4911 { 4912 struct btrfs_fs_info *info = fs_devices->fs_info; 4913 struct btrfs_device *device; 4914 u64 total_avail; 4915 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 4916 int ret; 4917 int ndevs = 0; 4918 u64 max_avail; 4919 u64 dev_offset; 4920 4921 /* 4922 * in the first pass through the devices list, we gather information 4923 * about the available holes on each device. 4924 */ 4925 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 4926 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4927 WARN(1, KERN_ERR 4928 "BTRFS: read-only device in alloc_list\n"); 4929 continue; 4930 } 4931 4932 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 4933 &device->dev_state) || 4934 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4935 continue; 4936 4937 if (device->total_bytes > device->bytes_used) 4938 total_avail = device->total_bytes - device->bytes_used; 4939 else 4940 total_avail = 0; 4941 4942 /* If there is no space on this device, skip it. */ 4943 if (total_avail < ctl->dev_extent_min) 4944 continue; 4945 4946 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 4947 &max_avail); 4948 if (ret && ret != -ENOSPC) 4949 return ret; 4950 4951 if (ret == 0) 4952 max_avail = dev_extent_want; 4953 4954 if (max_avail < ctl->dev_extent_min) { 4955 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4956 btrfs_debug(info, 4957 "%s: devid %llu has no free space, have=%llu want=%llu", 4958 __func__, device->devid, max_avail, 4959 ctl->dev_extent_min); 4960 continue; 4961 } 4962 4963 if (ndevs == fs_devices->rw_devices) { 4964 WARN(1, "%s: found more than %llu devices\n", 4965 __func__, fs_devices->rw_devices); 4966 break; 4967 } 4968 devices_info[ndevs].dev_offset = dev_offset; 4969 devices_info[ndevs].max_avail = max_avail; 4970 devices_info[ndevs].total_avail = total_avail; 4971 devices_info[ndevs].dev = device; 4972 ++ndevs; 4973 } 4974 ctl->ndevs = ndevs; 4975 4976 /* 4977 * now sort the devices by hole size / available space 4978 */ 4979 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4980 btrfs_cmp_device_info, NULL); 4981 4982 return 0; 4983 } 4984 4985 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 4986 struct btrfs_device_info *devices_info) 4987 { 4988 /* Number of stripes that count for block group size */ 4989 int data_stripes; 4990 4991 /* 4992 * The primary goal is to maximize the number of stripes, so use as 4993 * many devices as possible, even if the stripes are not maximum sized. 4994 * 4995 * The DUP profile stores more than one stripe per device, the 4996 * max_avail is the total size so we have to adjust. 4997 */ 4998 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 4999 ctl->dev_stripes); 5000 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5001 5002 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5003 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5004 5005 /* 5006 * Use the number of data stripes to figure out how big this chunk is 5007 * really going to be in terms of logical address space, and compare 5008 * that answer with the max chunk size. If it's higher, we try to 5009 * reduce stripe_size. 5010 */ 5011 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5012 /* 5013 * Reduce stripe_size, round it up to a 16MB boundary again and 5014 * then use it, unless it ends up being even bigger than the 5015 * previous value we had already. 5016 */ 5017 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5018 data_stripes), SZ_16M), 5019 ctl->stripe_size); 5020 } 5021 5022 /* Align to BTRFS_STRIPE_LEN */ 5023 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5024 ctl->chunk_size = ctl->stripe_size * data_stripes; 5025 5026 return 0; 5027 } 5028 5029 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5030 struct alloc_chunk_ctl *ctl, 5031 struct btrfs_device_info *devices_info) 5032 { 5033 struct btrfs_fs_info *info = fs_devices->fs_info; 5034 5035 /* 5036 * Round down to number of usable stripes, devs_increment can be any 5037 * number so we can't use round_down() that requires power of 2, while 5038 * rounddown is safe. 5039 */ 5040 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5041 5042 if (ctl->ndevs < ctl->devs_min) { 5043 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5044 btrfs_debug(info, 5045 "%s: not enough devices with free space: have=%d minimum required=%d", 5046 __func__, ctl->ndevs, ctl->devs_min); 5047 } 5048 return -ENOSPC; 5049 } 5050 5051 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5052 5053 switch (fs_devices->chunk_alloc_policy) { 5054 case BTRFS_CHUNK_ALLOC_REGULAR: 5055 return decide_stripe_size_regular(ctl, devices_info); 5056 default: 5057 BUG(); 5058 } 5059 } 5060 5061 static int create_chunk(struct btrfs_trans_handle *trans, 5062 struct alloc_chunk_ctl *ctl, 5063 struct btrfs_device_info *devices_info) 5064 { 5065 struct btrfs_fs_info *info = trans->fs_info; 5066 struct map_lookup *map = NULL; 5067 struct extent_map_tree *em_tree; 5068 struct extent_map *em; 5069 u64 start = ctl->start; 5070 u64 type = ctl->type; 5071 int ret; 5072 int i; 5073 int j; 5074 5075 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5076 if (!map) 5077 return -ENOMEM; 5078 map->num_stripes = ctl->num_stripes; 5079 5080 for (i = 0; i < ctl->ndevs; ++i) { 5081 for (j = 0; j < ctl->dev_stripes; ++j) { 5082 int s = i * ctl->dev_stripes + j; 5083 map->stripes[s].dev = devices_info[i].dev; 5084 map->stripes[s].physical = devices_info[i].dev_offset + 5085 j * ctl->stripe_size; 5086 } 5087 } 5088 map->stripe_len = BTRFS_STRIPE_LEN; 5089 map->io_align = BTRFS_STRIPE_LEN; 5090 map->io_width = BTRFS_STRIPE_LEN; 5091 map->type = type; 5092 map->sub_stripes = ctl->sub_stripes; 5093 5094 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5095 5096 em = alloc_extent_map(); 5097 if (!em) { 5098 kfree(map); 5099 return -ENOMEM; 5100 } 5101 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5102 em->map_lookup = map; 5103 em->start = start; 5104 em->len = ctl->chunk_size; 5105 em->block_start = 0; 5106 em->block_len = em->len; 5107 em->orig_block_len = ctl->stripe_size; 5108 5109 em_tree = &info->mapping_tree; 5110 write_lock(&em_tree->lock); 5111 ret = add_extent_mapping(em_tree, em, 0); 5112 if (ret) { 5113 write_unlock(&em_tree->lock); 5114 free_extent_map(em); 5115 return ret; 5116 } 5117 write_unlock(&em_tree->lock); 5118 5119 ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); 5120 if (ret) 5121 goto error_del_extent; 5122 5123 for (i = 0; i < map->num_stripes; i++) { 5124 struct btrfs_device *dev = map->stripes[i].dev; 5125 5126 btrfs_device_set_bytes_used(dev, 5127 dev->bytes_used + ctl->stripe_size); 5128 if (list_empty(&dev->post_commit_list)) 5129 list_add_tail(&dev->post_commit_list, 5130 &trans->transaction->dev_update_list); 5131 } 5132 5133 atomic64_sub(ctl->stripe_size * map->num_stripes, 5134 &info->free_chunk_space); 5135 5136 free_extent_map(em); 5137 check_raid56_incompat_flag(info, type); 5138 check_raid1c34_incompat_flag(info, type); 5139 5140 return 0; 5141 5142 error_del_extent: 5143 write_lock(&em_tree->lock); 5144 remove_extent_mapping(em_tree, em); 5145 write_unlock(&em_tree->lock); 5146 5147 /* One for our allocation */ 5148 free_extent_map(em); 5149 /* One for the tree reference */ 5150 free_extent_map(em); 5151 5152 return ret; 5153 } 5154 5155 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) 5156 { 5157 struct btrfs_fs_info *info = trans->fs_info; 5158 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5159 struct btrfs_device_info *devices_info = NULL; 5160 struct alloc_chunk_ctl ctl; 5161 int ret; 5162 5163 lockdep_assert_held(&info->chunk_mutex); 5164 5165 if (!alloc_profile_is_valid(type, 0)) { 5166 ASSERT(0); 5167 return -EINVAL; 5168 } 5169 5170 if (list_empty(&fs_devices->alloc_list)) { 5171 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5172 btrfs_debug(info, "%s: no writable device", __func__); 5173 return -ENOSPC; 5174 } 5175 5176 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5177 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5178 ASSERT(0); 5179 return -EINVAL; 5180 } 5181 5182 ctl.start = find_next_chunk(info); 5183 ctl.type = type; 5184 init_alloc_chunk_ctl(fs_devices, &ctl); 5185 5186 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5187 GFP_NOFS); 5188 if (!devices_info) 5189 return -ENOMEM; 5190 5191 ret = gather_device_info(fs_devices, &ctl, devices_info); 5192 if (ret < 0) 5193 goto out; 5194 5195 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5196 if (ret < 0) 5197 goto out; 5198 5199 ret = create_chunk(trans, &ctl, devices_info); 5200 5201 out: 5202 kfree(devices_info); 5203 return ret; 5204 } 5205 5206 /* 5207 * Chunk allocation falls into two parts. The first part does work 5208 * that makes the new allocated chunk usable, but does not do any operation 5209 * that modifies the chunk tree. The second part does the work that 5210 * requires modifying the chunk tree. This division is important for the 5211 * bootstrap process of adding storage to a seed btrfs. 5212 */ 5213 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 5214 u64 chunk_offset, u64 chunk_size) 5215 { 5216 struct btrfs_fs_info *fs_info = trans->fs_info; 5217 struct btrfs_root *extent_root = fs_info->extent_root; 5218 struct btrfs_root *chunk_root = fs_info->chunk_root; 5219 struct btrfs_key key; 5220 struct btrfs_device *device; 5221 struct btrfs_chunk *chunk; 5222 struct btrfs_stripe *stripe; 5223 struct extent_map *em; 5224 struct map_lookup *map; 5225 size_t item_size; 5226 u64 dev_offset; 5227 u64 stripe_size; 5228 int i = 0; 5229 int ret = 0; 5230 5231 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 5232 if (IS_ERR(em)) 5233 return PTR_ERR(em); 5234 5235 map = em->map_lookup; 5236 item_size = btrfs_chunk_item_size(map->num_stripes); 5237 stripe_size = em->orig_block_len; 5238 5239 chunk = kzalloc(item_size, GFP_NOFS); 5240 if (!chunk) { 5241 ret = -ENOMEM; 5242 goto out; 5243 } 5244 5245 /* 5246 * Take the device list mutex to prevent races with the final phase of 5247 * a device replace operation that replaces the device object associated 5248 * with the map's stripes, because the device object's id can change 5249 * at any time during that final phase of the device replace operation 5250 * (dev-replace.c:btrfs_dev_replace_finishing()). 5251 */ 5252 mutex_lock(&fs_info->fs_devices->device_list_mutex); 5253 for (i = 0; i < map->num_stripes; i++) { 5254 device = map->stripes[i].dev; 5255 dev_offset = map->stripes[i].physical; 5256 5257 ret = btrfs_update_device(trans, device); 5258 if (ret) 5259 break; 5260 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, 5261 dev_offset, stripe_size); 5262 if (ret) 5263 break; 5264 } 5265 if (ret) { 5266 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5267 goto out; 5268 } 5269 5270 stripe = &chunk->stripe; 5271 for (i = 0; i < map->num_stripes; i++) { 5272 device = map->stripes[i].dev; 5273 dev_offset = map->stripes[i].physical; 5274 5275 btrfs_set_stack_stripe_devid(stripe, device->devid); 5276 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5277 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5278 stripe++; 5279 } 5280 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5281 5282 btrfs_set_stack_chunk_length(chunk, chunk_size); 5283 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5284 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5285 btrfs_set_stack_chunk_type(chunk, map->type); 5286 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5287 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5288 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5289 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5290 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5291 5292 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5293 key.type = BTRFS_CHUNK_ITEM_KEY; 5294 key.offset = chunk_offset; 5295 5296 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5297 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5298 /* 5299 * TODO: Cleanup of inserted chunk root in case of 5300 * failure. 5301 */ 5302 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5303 } 5304 5305 out: 5306 kfree(chunk); 5307 free_extent_map(em); 5308 return ret; 5309 } 5310 5311 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5312 { 5313 struct btrfs_fs_info *fs_info = trans->fs_info; 5314 u64 alloc_profile; 5315 int ret; 5316 5317 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5318 ret = btrfs_alloc_chunk(trans, alloc_profile); 5319 if (ret) 5320 return ret; 5321 5322 alloc_profile = btrfs_system_alloc_profile(fs_info); 5323 ret = btrfs_alloc_chunk(trans, alloc_profile); 5324 return ret; 5325 } 5326 5327 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5328 { 5329 const int index = btrfs_bg_flags_to_raid_index(map->type); 5330 5331 return btrfs_raid_array[index].tolerated_failures; 5332 } 5333 5334 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5335 { 5336 struct extent_map *em; 5337 struct map_lookup *map; 5338 int readonly = 0; 5339 int miss_ndevs = 0; 5340 int i; 5341 5342 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5343 if (IS_ERR(em)) 5344 return 1; 5345 5346 map = em->map_lookup; 5347 for (i = 0; i < map->num_stripes; i++) { 5348 if (test_bit(BTRFS_DEV_STATE_MISSING, 5349 &map->stripes[i].dev->dev_state)) { 5350 miss_ndevs++; 5351 continue; 5352 } 5353 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5354 &map->stripes[i].dev->dev_state)) { 5355 readonly = 1; 5356 goto end; 5357 } 5358 } 5359 5360 /* 5361 * If the number of missing devices is larger than max errors, 5362 * we can not write the data into that chunk successfully, so 5363 * set it readonly. 5364 */ 5365 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5366 readonly = 1; 5367 end: 5368 free_extent_map(em); 5369 return readonly; 5370 } 5371 5372 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5373 { 5374 struct extent_map *em; 5375 5376 while (1) { 5377 write_lock(&tree->lock); 5378 em = lookup_extent_mapping(tree, 0, (u64)-1); 5379 if (em) 5380 remove_extent_mapping(tree, em); 5381 write_unlock(&tree->lock); 5382 if (!em) 5383 break; 5384 /* once for us */ 5385 free_extent_map(em); 5386 /* once for the tree */ 5387 free_extent_map(em); 5388 } 5389 } 5390 5391 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5392 { 5393 struct extent_map *em; 5394 struct map_lookup *map; 5395 int ret; 5396 5397 em = btrfs_get_chunk_map(fs_info, logical, len); 5398 if (IS_ERR(em)) 5399 /* 5400 * We could return errors for these cases, but that could get 5401 * ugly and we'd probably do the same thing which is just not do 5402 * anything else and exit, so return 1 so the callers don't try 5403 * to use other copies. 5404 */ 5405 return 1; 5406 5407 map = em->map_lookup; 5408 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5409 ret = map->num_stripes; 5410 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5411 ret = map->sub_stripes; 5412 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5413 ret = 2; 5414 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5415 /* 5416 * There could be two corrupted data stripes, we need 5417 * to loop retry in order to rebuild the correct data. 5418 * 5419 * Fail a stripe at a time on every retry except the 5420 * stripe under reconstruction. 5421 */ 5422 ret = map->num_stripes; 5423 else 5424 ret = 1; 5425 free_extent_map(em); 5426 5427 down_read(&fs_info->dev_replace.rwsem); 5428 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5429 fs_info->dev_replace.tgtdev) 5430 ret++; 5431 up_read(&fs_info->dev_replace.rwsem); 5432 5433 return ret; 5434 } 5435 5436 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5437 u64 logical) 5438 { 5439 struct extent_map *em; 5440 struct map_lookup *map; 5441 unsigned long len = fs_info->sectorsize; 5442 5443 em = btrfs_get_chunk_map(fs_info, logical, len); 5444 5445 if (!WARN_ON(IS_ERR(em))) { 5446 map = em->map_lookup; 5447 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5448 len = map->stripe_len * nr_data_stripes(map); 5449 free_extent_map(em); 5450 } 5451 return len; 5452 } 5453 5454 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5455 { 5456 struct extent_map *em; 5457 struct map_lookup *map; 5458 int ret = 0; 5459 5460 em = btrfs_get_chunk_map(fs_info, logical, len); 5461 5462 if(!WARN_ON(IS_ERR(em))) { 5463 map = em->map_lookup; 5464 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5465 ret = 1; 5466 free_extent_map(em); 5467 } 5468 return ret; 5469 } 5470 5471 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5472 struct map_lookup *map, int first, 5473 int dev_replace_is_ongoing) 5474 { 5475 int i; 5476 int num_stripes; 5477 int preferred_mirror; 5478 int tolerance; 5479 struct btrfs_device *srcdev; 5480 5481 ASSERT((map->type & 5482 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5483 5484 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5485 num_stripes = map->sub_stripes; 5486 else 5487 num_stripes = map->num_stripes; 5488 5489 preferred_mirror = first + current->pid % num_stripes; 5490 5491 if (dev_replace_is_ongoing && 5492 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5493 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5494 srcdev = fs_info->dev_replace.srcdev; 5495 else 5496 srcdev = NULL; 5497 5498 /* 5499 * try to avoid the drive that is the source drive for a 5500 * dev-replace procedure, only choose it if no other non-missing 5501 * mirror is available 5502 */ 5503 for (tolerance = 0; tolerance < 2; tolerance++) { 5504 if (map->stripes[preferred_mirror].dev->bdev && 5505 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5506 return preferred_mirror; 5507 for (i = first; i < first + num_stripes; i++) { 5508 if (map->stripes[i].dev->bdev && 5509 (tolerance || map->stripes[i].dev != srcdev)) 5510 return i; 5511 } 5512 } 5513 5514 /* we couldn't find one that doesn't fail. Just return something 5515 * and the io error handling code will clean up eventually 5516 */ 5517 return preferred_mirror; 5518 } 5519 5520 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5521 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5522 { 5523 int i; 5524 int again = 1; 5525 5526 while (again) { 5527 again = 0; 5528 for (i = 0; i < num_stripes - 1; i++) { 5529 /* Swap if parity is on a smaller index */ 5530 if (bbio->raid_map[i] > bbio->raid_map[i + 1]) { 5531 swap(bbio->stripes[i], bbio->stripes[i + 1]); 5532 swap(bbio->raid_map[i], bbio->raid_map[i + 1]); 5533 again = 1; 5534 } 5535 } 5536 } 5537 } 5538 5539 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5540 { 5541 struct btrfs_bio *bbio = kzalloc( 5542 /* the size of the btrfs_bio */ 5543 sizeof(struct btrfs_bio) + 5544 /* plus the variable array for the stripes */ 5545 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5546 /* plus the variable array for the tgt dev */ 5547 sizeof(int) * (real_stripes) + 5548 /* 5549 * plus the raid_map, which includes both the tgt dev 5550 * and the stripes 5551 */ 5552 sizeof(u64) * (total_stripes), 5553 GFP_NOFS|__GFP_NOFAIL); 5554 5555 atomic_set(&bbio->error, 0); 5556 refcount_set(&bbio->refs, 1); 5557 5558 bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes); 5559 bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes); 5560 5561 return bbio; 5562 } 5563 5564 void btrfs_get_bbio(struct btrfs_bio *bbio) 5565 { 5566 WARN_ON(!refcount_read(&bbio->refs)); 5567 refcount_inc(&bbio->refs); 5568 } 5569 5570 void btrfs_put_bbio(struct btrfs_bio *bbio) 5571 { 5572 if (!bbio) 5573 return; 5574 if (refcount_dec_and_test(&bbio->refs)) 5575 kfree(bbio); 5576 } 5577 5578 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5579 /* 5580 * Please note that, discard won't be sent to target device of device 5581 * replace. 5582 */ 5583 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5584 u64 logical, u64 *length_ret, 5585 struct btrfs_bio **bbio_ret) 5586 { 5587 struct extent_map *em; 5588 struct map_lookup *map; 5589 struct btrfs_bio *bbio; 5590 u64 length = *length_ret; 5591 u64 offset; 5592 u64 stripe_nr; 5593 u64 stripe_nr_end; 5594 u64 stripe_end_offset; 5595 u64 stripe_cnt; 5596 u64 stripe_len; 5597 u64 stripe_offset; 5598 u64 num_stripes; 5599 u32 stripe_index; 5600 u32 factor = 0; 5601 u32 sub_stripes = 0; 5602 u64 stripes_per_dev = 0; 5603 u32 remaining_stripes = 0; 5604 u32 last_stripe = 0; 5605 int ret = 0; 5606 int i; 5607 5608 /* discard always return a bbio */ 5609 ASSERT(bbio_ret); 5610 5611 em = btrfs_get_chunk_map(fs_info, logical, length); 5612 if (IS_ERR(em)) 5613 return PTR_ERR(em); 5614 5615 map = em->map_lookup; 5616 /* we don't discard raid56 yet */ 5617 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5618 ret = -EOPNOTSUPP; 5619 goto out; 5620 } 5621 5622 offset = logical - em->start; 5623 length = min_t(u64, em->start + em->len - logical, length); 5624 *length_ret = length; 5625 5626 stripe_len = map->stripe_len; 5627 /* 5628 * stripe_nr counts the total number of stripes we have to stride 5629 * to get to this block 5630 */ 5631 stripe_nr = div64_u64(offset, stripe_len); 5632 5633 /* stripe_offset is the offset of this block in its stripe */ 5634 stripe_offset = offset - stripe_nr * stripe_len; 5635 5636 stripe_nr_end = round_up(offset + length, map->stripe_len); 5637 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5638 stripe_cnt = stripe_nr_end - stripe_nr; 5639 stripe_end_offset = stripe_nr_end * map->stripe_len - 5640 (offset + length); 5641 /* 5642 * after this, stripe_nr is the number of stripes on this 5643 * device we have to walk to find the data, and stripe_index is 5644 * the number of our device in the stripe array 5645 */ 5646 num_stripes = 1; 5647 stripe_index = 0; 5648 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5649 BTRFS_BLOCK_GROUP_RAID10)) { 5650 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5651 sub_stripes = 1; 5652 else 5653 sub_stripes = map->sub_stripes; 5654 5655 factor = map->num_stripes / sub_stripes; 5656 num_stripes = min_t(u64, map->num_stripes, 5657 sub_stripes * stripe_cnt); 5658 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5659 stripe_index *= sub_stripes; 5660 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5661 &remaining_stripes); 5662 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5663 last_stripe *= sub_stripes; 5664 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 5665 BTRFS_BLOCK_GROUP_DUP)) { 5666 num_stripes = map->num_stripes; 5667 } else { 5668 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5669 &stripe_index); 5670 } 5671 5672 bbio = alloc_btrfs_bio(num_stripes, 0); 5673 if (!bbio) { 5674 ret = -ENOMEM; 5675 goto out; 5676 } 5677 5678 for (i = 0; i < num_stripes; i++) { 5679 bbio->stripes[i].physical = 5680 map->stripes[stripe_index].physical + 5681 stripe_offset + stripe_nr * map->stripe_len; 5682 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5683 5684 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5685 BTRFS_BLOCK_GROUP_RAID10)) { 5686 bbio->stripes[i].length = stripes_per_dev * 5687 map->stripe_len; 5688 5689 if (i / sub_stripes < remaining_stripes) 5690 bbio->stripes[i].length += 5691 map->stripe_len; 5692 5693 /* 5694 * Special for the first stripe and 5695 * the last stripe: 5696 * 5697 * |-------|...|-------| 5698 * |----------| 5699 * off end_off 5700 */ 5701 if (i < sub_stripes) 5702 bbio->stripes[i].length -= 5703 stripe_offset; 5704 5705 if (stripe_index >= last_stripe && 5706 stripe_index <= (last_stripe + 5707 sub_stripes - 1)) 5708 bbio->stripes[i].length -= 5709 stripe_end_offset; 5710 5711 if (i == sub_stripes - 1) 5712 stripe_offset = 0; 5713 } else { 5714 bbio->stripes[i].length = length; 5715 } 5716 5717 stripe_index++; 5718 if (stripe_index == map->num_stripes) { 5719 stripe_index = 0; 5720 stripe_nr++; 5721 } 5722 } 5723 5724 *bbio_ret = bbio; 5725 bbio->map_type = map->type; 5726 bbio->num_stripes = num_stripes; 5727 out: 5728 free_extent_map(em); 5729 return ret; 5730 } 5731 5732 /* 5733 * In dev-replace case, for repair case (that's the only case where the mirror 5734 * is selected explicitly when calling btrfs_map_block), blocks left of the 5735 * left cursor can also be read from the target drive. 5736 * 5737 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 5738 * array of stripes. 5739 * For READ, it also needs to be supported using the same mirror number. 5740 * 5741 * If the requested block is not left of the left cursor, EIO is returned. This 5742 * can happen because btrfs_num_copies() returns one more in the dev-replace 5743 * case. 5744 */ 5745 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 5746 u64 logical, u64 length, 5747 u64 srcdev_devid, int *mirror_num, 5748 u64 *physical) 5749 { 5750 struct btrfs_bio *bbio = NULL; 5751 int num_stripes; 5752 int index_srcdev = 0; 5753 int found = 0; 5754 u64 physical_of_found = 0; 5755 int i; 5756 int ret = 0; 5757 5758 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 5759 logical, &length, &bbio, 0, 0); 5760 if (ret) { 5761 ASSERT(bbio == NULL); 5762 return ret; 5763 } 5764 5765 num_stripes = bbio->num_stripes; 5766 if (*mirror_num > num_stripes) { 5767 /* 5768 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 5769 * that means that the requested area is not left of the left 5770 * cursor 5771 */ 5772 btrfs_put_bbio(bbio); 5773 return -EIO; 5774 } 5775 5776 /* 5777 * process the rest of the function using the mirror_num of the source 5778 * drive. Therefore look it up first. At the end, patch the device 5779 * pointer to the one of the target drive. 5780 */ 5781 for (i = 0; i < num_stripes; i++) { 5782 if (bbio->stripes[i].dev->devid != srcdev_devid) 5783 continue; 5784 5785 /* 5786 * In case of DUP, in order to keep it simple, only add the 5787 * mirror with the lowest physical address 5788 */ 5789 if (found && 5790 physical_of_found <= bbio->stripes[i].physical) 5791 continue; 5792 5793 index_srcdev = i; 5794 found = 1; 5795 physical_of_found = bbio->stripes[i].physical; 5796 } 5797 5798 btrfs_put_bbio(bbio); 5799 5800 ASSERT(found); 5801 if (!found) 5802 return -EIO; 5803 5804 *mirror_num = index_srcdev + 1; 5805 *physical = physical_of_found; 5806 return ret; 5807 } 5808 5809 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 5810 struct btrfs_bio **bbio_ret, 5811 struct btrfs_dev_replace *dev_replace, 5812 int *num_stripes_ret, int *max_errors_ret) 5813 { 5814 struct btrfs_bio *bbio = *bbio_ret; 5815 u64 srcdev_devid = dev_replace->srcdev->devid; 5816 int tgtdev_indexes = 0; 5817 int num_stripes = *num_stripes_ret; 5818 int max_errors = *max_errors_ret; 5819 int i; 5820 5821 if (op == BTRFS_MAP_WRITE) { 5822 int index_where_to_add; 5823 5824 /* 5825 * duplicate the write operations while the dev replace 5826 * procedure is running. Since the copying of the old disk to 5827 * the new disk takes place at run time while the filesystem is 5828 * mounted writable, the regular write operations to the old 5829 * disk have to be duplicated to go to the new disk as well. 5830 * 5831 * Note that device->missing is handled by the caller, and that 5832 * the write to the old disk is already set up in the stripes 5833 * array. 5834 */ 5835 index_where_to_add = num_stripes; 5836 for (i = 0; i < num_stripes; i++) { 5837 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5838 /* write to new disk, too */ 5839 struct btrfs_bio_stripe *new = 5840 bbio->stripes + index_where_to_add; 5841 struct btrfs_bio_stripe *old = 5842 bbio->stripes + i; 5843 5844 new->physical = old->physical; 5845 new->length = old->length; 5846 new->dev = dev_replace->tgtdev; 5847 bbio->tgtdev_map[i] = index_where_to_add; 5848 index_where_to_add++; 5849 max_errors++; 5850 tgtdev_indexes++; 5851 } 5852 } 5853 num_stripes = index_where_to_add; 5854 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 5855 int index_srcdev = 0; 5856 int found = 0; 5857 u64 physical_of_found = 0; 5858 5859 /* 5860 * During the dev-replace procedure, the target drive can also 5861 * be used to read data in case it is needed to repair a corrupt 5862 * block elsewhere. This is possible if the requested area is 5863 * left of the left cursor. In this area, the target drive is a 5864 * full copy of the source drive. 5865 */ 5866 for (i = 0; i < num_stripes; i++) { 5867 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5868 /* 5869 * In case of DUP, in order to keep it simple, 5870 * only add the mirror with the lowest physical 5871 * address 5872 */ 5873 if (found && 5874 physical_of_found <= 5875 bbio->stripes[i].physical) 5876 continue; 5877 index_srcdev = i; 5878 found = 1; 5879 physical_of_found = bbio->stripes[i].physical; 5880 } 5881 } 5882 if (found) { 5883 struct btrfs_bio_stripe *tgtdev_stripe = 5884 bbio->stripes + num_stripes; 5885 5886 tgtdev_stripe->physical = physical_of_found; 5887 tgtdev_stripe->length = 5888 bbio->stripes[index_srcdev].length; 5889 tgtdev_stripe->dev = dev_replace->tgtdev; 5890 bbio->tgtdev_map[index_srcdev] = num_stripes; 5891 5892 tgtdev_indexes++; 5893 num_stripes++; 5894 } 5895 } 5896 5897 *num_stripes_ret = num_stripes; 5898 *max_errors_ret = max_errors; 5899 bbio->num_tgtdevs = tgtdev_indexes; 5900 *bbio_ret = bbio; 5901 } 5902 5903 static bool need_full_stripe(enum btrfs_map_op op) 5904 { 5905 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 5906 } 5907 5908 /* 5909 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len) 5910 * tuple. This information is used to calculate how big a 5911 * particular bio can get before it straddles a stripe. 5912 * 5913 * @fs_info - the filesystem 5914 * @logical - address that we want to figure out the geometry of 5915 * @len - the length of IO we are going to perform, starting at @logical 5916 * @op - type of operation - write or read 5917 * @io_geom - pointer used to return values 5918 * 5919 * Returns < 0 in case a chunk for the given logical address cannot be found, 5920 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 5921 */ 5922 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5923 u64 logical, u64 len, struct btrfs_io_geometry *io_geom) 5924 { 5925 struct extent_map *em; 5926 struct map_lookup *map; 5927 u64 offset; 5928 u64 stripe_offset; 5929 u64 stripe_nr; 5930 u64 stripe_len; 5931 u64 raid56_full_stripe_start = (u64)-1; 5932 int data_stripes; 5933 int ret = 0; 5934 5935 ASSERT(op != BTRFS_MAP_DISCARD); 5936 5937 em = btrfs_get_chunk_map(fs_info, logical, len); 5938 if (IS_ERR(em)) 5939 return PTR_ERR(em); 5940 5941 map = em->map_lookup; 5942 /* Offset of this logical address in the chunk */ 5943 offset = logical - em->start; 5944 /* Len of a stripe in a chunk */ 5945 stripe_len = map->stripe_len; 5946 /* Stripe wher this block falls in */ 5947 stripe_nr = div64_u64(offset, stripe_len); 5948 /* Offset of stripe in the chunk */ 5949 stripe_offset = stripe_nr * stripe_len; 5950 if (offset < stripe_offset) { 5951 btrfs_crit(fs_info, 5952 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 5953 stripe_offset, offset, em->start, logical, stripe_len); 5954 ret = -EINVAL; 5955 goto out; 5956 } 5957 5958 /* stripe_offset is the offset of this block in its stripe */ 5959 stripe_offset = offset - stripe_offset; 5960 data_stripes = nr_data_stripes(map); 5961 5962 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5963 u64 max_len = stripe_len - stripe_offset; 5964 5965 /* 5966 * In case of raid56, we need to know the stripe aligned start 5967 */ 5968 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5969 unsigned long full_stripe_len = stripe_len * data_stripes; 5970 raid56_full_stripe_start = offset; 5971 5972 /* 5973 * Allow a write of a full stripe, but make sure we 5974 * don't allow straddling of stripes 5975 */ 5976 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5977 full_stripe_len); 5978 raid56_full_stripe_start *= full_stripe_len; 5979 5980 /* 5981 * For writes to RAID[56], allow a full stripeset across 5982 * all disks. For other RAID types and for RAID[56] 5983 * reads, just allow a single stripe (on a single disk). 5984 */ 5985 if (op == BTRFS_MAP_WRITE) { 5986 max_len = stripe_len * data_stripes - 5987 (offset - raid56_full_stripe_start); 5988 } 5989 } 5990 len = min_t(u64, em->len - offset, max_len); 5991 } else { 5992 len = em->len - offset; 5993 } 5994 5995 io_geom->len = len; 5996 io_geom->offset = offset; 5997 io_geom->stripe_len = stripe_len; 5998 io_geom->stripe_nr = stripe_nr; 5999 io_geom->stripe_offset = stripe_offset; 6000 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6001 6002 out: 6003 /* once for us */ 6004 free_extent_map(em); 6005 return ret; 6006 } 6007 6008 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6009 enum btrfs_map_op op, 6010 u64 logical, u64 *length, 6011 struct btrfs_bio **bbio_ret, 6012 int mirror_num, int need_raid_map) 6013 { 6014 struct extent_map *em; 6015 struct map_lookup *map; 6016 u64 stripe_offset; 6017 u64 stripe_nr; 6018 u64 stripe_len; 6019 u32 stripe_index; 6020 int data_stripes; 6021 int i; 6022 int ret = 0; 6023 int num_stripes; 6024 int max_errors = 0; 6025 int tgtdev_indexes = 0; 6026 struct btrfs_bio *bbio = NULL; 6027 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6028 int dev_replace_is_ongoing = 0; 6029 int num_alloc_stripes; 6030 int patch_the_first_stripe_for_dev_replace = 0; 6031 u64 physical_to_patch_in_first_stripe = 0; 6032 u64 raid56_full_stripe_start = (u64)-1; 6033 struct btrfs_io_geometry geom; 6034 6035 ASSERT(bbio_ret); 6036 ASSERT(op != BTRFS_MAP_DISCARD); 6037 6038 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom); 6039 if (ret < 0) 6040 return ret; 6041 6042 em = btrfs_get_chunk_map(fs_info, logical, *length); 6043 ASSERT(!IS_ERR(em)); 6044 map = em->map_lookup; 6045 6046 *length = geom.len; 6047 stripe_len = geom.stripe_len; 6048 stripe_nr = geom.stripe_nr; 6049 stripe_offset = geom.stripe_offset; 6050 raid56_full_stripe_start = geom.raid56_stripe_offset; 6051 data_stripes = nr_data_stripes(map); 6052 6053 down_read(&dev_replace->rwsem); 6054 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6055 /* 6056 * Hold the semaphore for read during the whole operation, write is 6057 * requested at commit time but must wait. 6058 */ 6059 if (!dev_replace_is_ongoing) 6060 up_read(&dev_replace->rwsem); 6061 6062 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6063 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6064 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6065 dev_replace->srcdev->devid, 6066 &mirror_num, 6067 &physical_to_patch_in_first_stripe); 6068 if (ret) 6069 goto out; 6070 else 6071 patch_the_first_stripe_for_dev_replace = 1; 6072 } else if (mirror_num > map->num_stripes) { 6073 mirror_num = 0; 6074 } 6075 6076 num_stripes = 1; 6077 stripe_index = 0; 6078 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6079 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6080 &stripe_index); 6081 if (!need_full_stripe(op)) 6082 mirror_num = 1; 6083 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6084 if (need_full_stripe(op)) 6085 num_stripes = map->num_stripes; 6086 else if (mirror_num) 6087 stripe_index = mirror_num - 1; 6088 else { 6089 stripe_index = find_live_mirror(fs_info, map, 0, 6090 dev_replace_is_ongoing); 6091 mirror_num = stripe_index + 1; 6092 } 6093 6094 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6095 if (need_full_stripe(op)) { 6096 num_stripes = map->num_stripes; 6097 } else if (mirror_num) { 6098 stripe_index = mirror_num - 1; 6099 } else { 6100 mirror_num = 1; 6101 } 6102 6103 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6104 u32 factor = map->num_stripes / map->sub_stripes; 6105 6106 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6107 stripe_index *= map->sub_stripes; 6108 6109 if (need_full_stripe(op)) 6110 num_stripes = map->sub_stripes; 6111 else if (mirror_num) 6112 stripe_index += mirror_num - 1; 6113 else { 6114 int old_stripe_index = stripe_index; 6115 stripe_index = find_live_mirror(fs_info, map, 6116 stripe_index, 6117 dev_replace_is_ongoing); 6118 mirror_num = stripe_index - old_stripe_index + 1; 6119 } 6120 6121 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6122 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6123 /* push stripe_nr back to the start of the full stripe */ 6124 stripe_nr = div64_u64(raid56_full_stripe_start, 6125 stripe_len * data_stripes); 6126 6127 /* RAID[56] write or recovery. Return all stripes */ 6128 num_stripes = map->num_stripes; 6129 max_errors = nr_parity_stripes(map); 6130 6131 *length = map->stripe_len; 6132 stripe_index = 0; 6133 stripe_offset = 0; 6134 } else { 6135 /* 6136 * Mirror #0 or #1 means the original data block. 6137 * Mirror #2 is RAID5 parity block. 6138 * Mirror #3 is RAID6 Q block. 6139 */ 6140 stripe_nr = div_u64_rem(stripe_nr, 6141 data_stripes, &stripe_index); 6142 if (mirror_num > 1) 6143 stripe_index = data_stripes + mirror_num - 2; 6144 6145 /* We distribute the parity blocks across stripes */ 6146 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6147 &stripe_index); 6148 if (!need_full_stripe(op) && mirror_num <= 1) 6149 mirror_num = 1; 6150 } 6151 } else { 6152 /* 6153 * after this, stripe_nr is the number of stripes on this 6154 * device we have to walk to find the data, and stripe_index is 6155 * the number of our device in the stripe array 6156 */ 6157 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6158 &stripe_index); 6159 mirror_num = stripe_index + 1; 6160 } 6161 if (stripe_index >= map->num_stripes) { 6162 btrfs_crit(fs_info, 6163 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6164 stripe_index, map->num_stripes); 6165 ret = -EINVAL; 6166 goto out; 6167 } 6168 6169 num_alloc_stripes = num_stripes; 6170 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6171 if (op == BTRFS_MAP_WRITE) 6172 num_alloc_stripes <<= 1; 6173 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6174 num_alloc_stripes++; 6175 tgtdev_indexes = num_stripes; 6176 } 6177 6178 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 6179 if (!bbio) { 6180 ret = -ENOMEM; 6181 goto out; 6182 } 6183 6184 for (i = 0; i < num_stripes; i++) { 6185 bbio->stripes[i].physical = map->stripes[stripe_index].physical + 6186 stripe_offset + stripe_nr * map->stripe_len; 6187 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 6188 stripe_index++; 6189 } 6190 6191 /* build raid_map */ 6192 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6193 (need_full_stripe(op) || mirror_num > 1)) { 6194 u64 tmp; 6195 unsigned rot; 6196 6197 /* Work out the disk rotation on this stripe-set */ 6198 div_u64_rem(stripe_nr, num_stripes, &rot); 6199 6200 /* Fill in the logical address of each stripe */ 6201 tmp = stripe_nr * data_stripes; 6202 for (i = 0; i < data_stripes; i++) 6203 bbio->raid_map[(i+rot) % num_stripes] = 6204 em->start + (tmp + i) * map->stripe_len; 6205 6206 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 6207 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6208 bbio->raid_map[(i+rot+1) % num_stripes] = 6209 RAID6_Q_STRIPE; 6210 6211 sort_parity_stripes(bbio, num_stripes); 6212 } 6213 6214 if (need_full_stripe(op)) 6215 max_errors = btrfs_chunk_max_errors(map); 6216 6217 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6218 need_full_stripe(op)) { 6219 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, 6220 &max_errors); 6221 } 6222 6223 *bbio_ret = bbio; 6224 bbio->map_type = map->type; 6225 bbio->num_stripes = num_stripes; 6226 bbio->max_errors = max_errors; 6227 bbio->mirror_num = mirror_num; 6228 6229 /* 6230 * this is the case that REQ_READ && dev_replace_is_ongoing && 6231 * mirror_num == num_stripes + 1 && dev_replace target drive is 6232 * available as a mirror 6233 */ 6234 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6235 WARN_ON(num_stripes > 1); 6236 bbio->stripes[0].dev = dev_replace->tgtdev; 6237 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 6238 bbio->mirror_num = map->num_stripes + 1; 6239 } 6240 out: 6241 if (dev_replace_is_ongoing) { 6242 lockdep_assert_held(&dev_replace->rwsem); 6243 /* Unlock and let waiting writers proceed */ 6244 up_read(&dev_replace->rwsem); 6245 } 6246 free_extent_map(em); 6247 return ret; 6248 } 6249 6250 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6251 u64 logical, u64 *length, 6252 struct btrfs_bio **bbio_ret, int mirror_num) 6253 { 6254 if (op == BTRFS_MAP_DISCARD) 6255 return __btrfs_map_block_for_discard(fs_info, logical, 6256 length, bbio_ret); 6257 6258 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 6259 mirror_num, 0); 6260 } 6261 6262 /* For Scrub/replace */ 6263 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6264 u64 logical, u64 *length, 6265 struct btrfs_bio **bbio_ret) 6266 { 6267 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); 6268 } 6269 6270 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 6271 { 6272 bio->bi_private = bbio->private; 6273 bio->bi_end_io = bbio->end_io; 6274 bio_endio(bio); 6275 6276 btrfs_put_bbio(bbio); 6277 } 6278 6279 static void btrfs_end_bio(struct bio *bio) 6280 { 6281 struct btrfs_bio *bbio = bio->bi_private; 6282 int is_orig_bio = 0; 6283 6284 if (bio->bi_status) { 6285 atomic_inc(&bbio->error); 6286 if (bio->bi_status == BLK_STS_IOERR || 6287 bio->bi_status == BLK_STS_TARGET) { 6288 struct btrfs_device *dev = btrfs_io_bio(bio)->device; 6289 6290 ASSERT(dev->bdev); 6291 if (bio_op(bio) == REQ_OP_WRITE) 6292 btrfs_dev_stat_inc_and_print(dev, 6293 BTRFS_DEV_STAT_WRITE_ERRS); 6294 else if (!(bio->bi_opf & REQ_RAHEAD)) 6295 btrfs_dev_stat_inc_and_print(dev, 6296 BTRFS_DEV_STAT_READ_ERRS); 6297 if (bio->bi_opf & REQ_PREFLUSH) 6298 btrfs_dev_stat_inc_and_print(dev, 6299 BTRFS_DEV_STAT_FLUSH_ERRS); 6300 } 6301 } 6302 6303 if (bio == bbio->orig_bio) 6304 is_orig_bio = 1; 6305 6306 btrfs_bio_counter_dec(bbio->fs_info); 6307 6308 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6309 if (!is_orig_bio) { 6310 bio_put(bio); 6311 bio = bbio->orig_bio; 6312 } 6313 6314 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6315 /* only send an error to the higher layers if it is 6316 * beyond the tolerance of the btrfs bio 6317 */ 6318 if (atomic_read(&bbio->error) > bbio->max_errors) { 6319 bio->bi_status = BLK_STS_IOERR; 6320 } else { 6321 /* 6322 * this bio is actually up to date, we didn't 6323 * go over the max number of errors 6324 */ 6325 bio->bi_status = BLK_STS_OK; 6326 } 6327 6328 btrfs_end_bbio(bbio, bio); 6329 } else if (!is_orig_bio) { 6330 bio_put(bio); 6331 } 6332 } 6333 6334 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, 6335 u64 physical, struct btrfs_device *dev) 6336 { 6337 struct btrfs_fs_info *fs_info = bbio->fs_info; 6338 6339 bio->bi_private = bbio; 6340 btrfs_io_bio(bio)->device = dev; 6341 bio->bi_end_io = btrfs_end_bio; 6342 bio->bi_iter.bi_sector = physical >> 9; 6343 btrfs_debug_in_rcu(fs_info, 6344 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6345 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector, 6346 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name), 6347 dev->devid, bio->bi_iter.bi_size); 6348 bio_set_dev(bio, dev->bdev); 6349 6350 btrfs_bio_counter_inc_noblocked(fs_info); 6351 6352 btrfsic_submit_bio(bio); 6353 } 6354 6355 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6356 { 6357 atomic_inc(&bbio->error); 6358 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6359 /* Should be the original bio. */ 6360 WARN_ON(bio != bbio->orig_bio); 6361 6362 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6363 bio->bi_iter.bi_sector = logical >> 9; 6364 if (atomic_read(&bbio->error) > bbio->max_errors) 6365 bio->bi_status = BLK_STS_IOERR; 6366 else 6367 bio->bi_status = BLK_STS_OK; 6368 btrfs_end_bbio(bbio, bio); 6369 } 6370 } 6371 6372 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6373 int mirror_num) 6374 { 6375 struct btrfs_device *dev; 6376 struct bio *first_bio = bio; 6377 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 6378 u64 length = 0; 6379 u64 map_length; 6380 int ret; 6381 int dev_nr; 6382 int total_devs; 6383 struct btrfs_bio *bbio = NULL; 6384 6385 length = bio->bi_iter.bi_size; 6386 map_length = length; 6387 6388 btrfs_bio_counter_inc_blocked(fs_info); 6389 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6390 &map_length, &bbio, mirror_num, 1); 6391 if (ret) { 6392 btrfs_bio_counter_dec(fs_info); 6393 return errno_to_blk_status(ret); 6394 } 6395 6396 total_devs = bbio->num_stripes; 6397 bbio->orig_bio = first_bio; 6398 bbio->private = first_bio->bi_private; 6399 bbio->end_io = first_bio->bi_end_io; 6400 bbio->fs_info = fs_info; 6401 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6402 6403 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6404 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { 6405 /* In this case, map_length has been set to the length of 6406 a single stripe; not the whole write */ 6407 if (bio_op(bio) == REQ_OP_WRITE) { 6408 ret = raid56_parity_write(fs_info, bio, bbio, 6409 map_length); 6410 } else { 6411 ret = raid56_parity_recover(fs_info, bio, bbio, 6412 map_length, mirror_num, 1); 6413 } 6414 6415 btrfs_bio_counter_dec(fs_info); 6416 return errno_to_blk_status(ret); 6417 } 6418 6419 if (map_length < length) { 6420 btrfs_crit(fs_info, 6421 "mapping failed logical %llu bio len %llu len %llu", 6422 logical, length, map_length); 6423 BUG(); 6424 } 6425 6426 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6427 dev = bbio->stripes[dev_nr].dev; 6428 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6429 &dev->dev_state) || 6430 (bio_op(first_bio) == REQ_OP_WRITE && 6431 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6432 bbio_error(bbio, first_bio, logical); 6433 continue; 6434 } 6435 6436 if (dev_nr < total_devs - 1) 6437 bio = btrfs_bio_clone(first_bio); 6438 else 6439 bio = first_bio; 6440 6441 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev); 6442 } 6443 btrfs_bio_counter_dec(fs_info); 6444 return BLK_STS_OK; 6445 } 6446 6447 /* 6448 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6449 * return NULL. 6450 * 6451 * If devid and uuid are both specified, the match must be exact, otherwise 6452 * only devid is used. 6453 * 6454 * If @seed is true, traverse through the seed devices. 6455 */ 6456 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, 6457 u64 devid, u8 *uuid, u8 *fsid, 6458 bool seed) 6459 { 6460 struct btrfs_device *device; 6461 6462 while (fs_devices) { 6463 if (!fsid || 6464 !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { 6465 list_for_each_entry(device, &fs_devices->devices, 6466 dev_list) { 6467 if (device->devid == devid && 6468 (!uuid || memcmp(device->uuid, uuid, 6469 BTRFS_UUID_SIZE) == 0)) 6470 return device; 6471 } 6472 } 6473 if (seed) 6474 fs_devices = fs_devices->seed; 6475 else 6476 return NULL; 6477 } 6478 return NULL; 6479 } 6480 6481 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6482 u64 devid, u8 *dev_uuid) 6483 { 6484 struct btrfs_device *device; 6485 unsigned int nofs_flag; 6486 6487 /* 6488 * We call this under the chunk_mutex, so we want to use NOFS for this 6489 * allocation, however we don't want to change btrfs_alloc_device() to 6490 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6491 * places. 6492 */ 6493 nofs_flag = memalloc_nofs_save(); 6494 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6495 memalloc_nofs_restore(nofs_flag); 6496 if (IS_ERR(device)) 6497 return device; 6498 6499 list_add(&device->dev_list, &fs_devices->devices); 6500 device->fs_devices = fs_devices; 6501 fs_devices->num_devices++; 6502 6503 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6504 fs_devices->missing_devices++; 6505 6506 return device; 6507 } 6508 6509 /** 6510 * btrfs_alloc_device - allocate struct btrfs_device 6511 * @fs_info: used only for generating a new devid, can be NULL if 6512 * devid is provided (i.e. @devid != NULL). 6513 * @devid: a pointer to devid for this device. If NULL a new devid 6514 * is generated. 6515 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6516 * is generated. 6517 * 6518 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6519 * on error. Returned struct is not linked onto any lists and must be 6520 * destroyed with btrfs_free_device. 6521 */ 6522 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6523 const u64 *devid, 6524 const u8 *uuid) 6525 { 6526 struct btrfs_device *dev; 6527 u64 tmp; 6528 6529 if (WARN_ON(!devid && !fs_info)) 6530 return ERR_PTR(-EINVAL); 6531 6532 dev = __alloc_device(); 6533 if (IS_ERR(dev)) 6534 return dev; 6535 6536 if (devid) 6537 tmp = *devid; 6538 else { 6539 int ret; 6540 6541 ret = find_next_devid(fs_info, &tmp); 6542 if (ret) { 6543 btrfs_free_device(dev); 6544 return ERR_PTR(ret); 6545 } 6546 } 6547 dev->devid = tmp; 6548 6549 if (uuid) 6550 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6551 else 6552 generate_random_uuid(dev->uuid); 6553 6554 return dev; 6555 } 6556 6557 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6558 u64 devid, u8 *uuid, bool error) 6559 { 6560 if (error) 6561 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6562 devid, uuid); 6563 else 6564 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6565 devid, uuid); 6566 } 6567 6568 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6569 { 6570 int index = btrfs_bg_flags_to_raid_index(type); 6571 int ncopies = btrfs_raid_array[index].ncopies; 6572 const int nparity = btrfs_raid_array[index].nparity; 6573 int data_stripes; 6574 6575 if (nparity) 6576 data_stripes = num_stripes - nparity; 6577 else 6578 data_stripes = num_stripes / ncopies; 6579 6580 return div_u64(chunk_len, data_stripes); 6581 } 6582 6583 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6584 struct btrfs_chunk *chunk) 6585 { 6586 struct btrfs_fs_info *fs_info = leaf->fs_info; 6587 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6588 struct map_lookup *map; 6589 struct extent_map *em; 6590 u64 logical; 6591 u64 length; 6592 u64 devid; 6593 u8 uuid[BTRFS_UUID_SIZE]; 6594 int num_stripes; 6595 int ret; 6596 int i; 6597 6598 logical = key->offset; 6599 length = btrfs_chunk_length(leaf, chunk); 6600 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6601 6602 /* 6603 * Only need to verify chunk item if we're reading from sys chunk array, 6604 * as chunk item in tree block is already verified by tree-checker. 6605 */ 6606 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6607 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6608 if (ret) 6609 return ret; 6610 } 6611 6612 read_lock(&map_tree->lock); 6613 em = lookup_extent_mapping(map_tree, logical, 1); 6614 read_unlock(&map_tree->lock); 6615 6616 /* already mapped? */ 6617 if (em && em->start <= logical && em->start + em->len > logical) { 6618 free_extent_map(em); 6619 return 0; 6620 } else if (em) { 6621 free_extent_map(em); 6622 } 6623 6624 em = alloc_extent_map(); 6625 if (!em) 6626 return -ENOMEM; 6627 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6628 if (!map) { 6629 free_extent_map(em); 6630 return -ENOMEM; 6631 } 6632 6633 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6634 em->map_lookup = map; 6635 em->start = logical; 6636 em->len = length; 6637 em->orig_start = 0; 6638 em->block_start = 0; 6639 em->block_len = em->len; 6640 6641 map->num_stripes = num_stripes; 6642 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6643 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6644 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6645 map->type = btrfs_chunk_type(leaf, chunk); 6646 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6647 map->verified_stripes = 0; 6648 em->orig_block_len = calc_stripe_length(map->type, em->len, 6649 map->num_stripes); 6650 for (i = 0; i < num_stripes; i++) { 6651 map->stripes[i].physical = 6652 btrfs_stripe_offset_nr(leaf, chunk, i); 6653 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6654 read_extent_buffer(leaf, uuid, (unsigned long) 6655 btrfs_stripe_dev_uuid_nr(chunk, i), 6656 BTRFS_UUID_SIZE); 6657 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, 6658 devid, uuid, NULL, true); 6659 if (!map->stripes[i].dev && 6660 !btrfs_test_opt(fs_info, DEGRADED)) { 6661 free_extent_map(em); 6662 btrfs_report_missing_device(fs_info, devid, uuid, true); 6663 return -ENOENT; 6664 } 6665 if (!map->stripes[i].dev) { 6666 map->stripes[i].dev = 6667 add_missing_dev(fs_info->fs_devices, devid, 6668 uuid); 6669 if (IS_ERR(map->stripes[i].dev)) { 6670 free_extent_map(em); 6671 btrfs_err(fs_info, 6672 "failed to init missing dev %llu: %ld", 6673 devid, PTR_ERR(map->stripes[i].dev)); 6674 return PTR_ERR(map->stripes[i].dev); 6675 } 6676 btrfs_report_missing_device(fs_info, devid, uuid, false); 6677 } 6678 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6679 &(map->stripes[i].dev->dev_state)); 6680 6681 } 6682 6683 write_lock(&map_tree->lock); 6684 ret = add_extent_mapping(map_tree, em, 0); 6685 write_unlock(&map_tree->lock); 6686 if (ret < 0) { 6687 btrfs_err(fs_info, 6688 "failed to add chunk map, start=%llu len=%llu: %d", 6689 em->start, em->len, ret); 6690 } 6691 free_extent_map(em); 6692 6693 return ret; 6694 } 6695 6696 static void fill_device_from_item(struct extent_buffer *leaf, 6697 struct btrfs_dev_item *dev_item, 6698 struct btrfs_device *device) 6699 { 6700 unsigned long ptr; 6701 6702 device->devid = btrfs_device_id(leaf, dev_item); 6703 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6704 device->total_bytes = device->disk_total_bytes; 6705 device->commit_total_bytes = device->disk_total_bytes; 6706 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6707 device->commit_bytes_used = device->bytes_used; 6708 device->type = btrfs_device_type(leaf, dev_item); 6709 device->io_align = btrfs_device_io_align(leaf, dev_item); 6710 device->io_width = btrfs_device_io_width(leaf, dev_item); 6711 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6712 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6713 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6714 6715 ptr = btrfs_device_uuid(dev_item); 6716 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6717 } 6718 6719 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6720 u8 *fsid) 6721 { 6722 struct btrfs_fs_devices *fs_devices; 6723 int ret; 6724 6725 lockdep_assert_held(&uuid_mutex); 6726 ASSERT(fsid); 6727 6728 fs_devices = fs_info->fs_devices->seed; 6729 while (fs_devices) { 6730 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6731 return fs_devices; 6732 6733 fs_devices = fs_devices->seed; 6734 } 6735 6736 fs_devices = find_fsid(fsid, NULL); 6737 if (!fs_devices) { 6738 if (!btrfs_test_opt(fs_info, DEGRADED)) 6739 return ERR_PTR(-ENOENT); 6740 6741 fs_devices = alloc_fs_devices(fsid, NULL); 6742 if (IS_ERR(fs_devices)) 6743 return fs_devices; 6744 6745 fs_devices->seeding = true; 6746 fs_devices->opened = 1; 6747 return fs_devices; 6748 } 6749 6750 fs_devices = clone_fs_devices(fs_devices); 6751 if (IS_ERR(fs_devices)) 6752 return fs_devices; 6753 6754 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 6755 if (ret) { 6756 free_fs_devices(fs_devices); 6757 fs_devices = ERR_PTR(ret); 6758 goto out; 6759 } 6760 6761 if (!fs_devices->seeding) { 6762 close_fs_devices(fs_devices); 6763 free_fs_devices(fs_devices); 6764 fs_devices = ERR_PTR(-EINVAL); 6765 goto out; 6766 } 6767 6768 fs_devices->seed = fs_info->fs_devices->seed; 6769 fs_info->fs_devices->seed = fs_devices; 6770 out: 6771 return fs_devices; 6772 } 6773 6774 static int read_one_dev(struct extent_buffer *leaf, 6775 struct btrfs_dev_item *dev_item) 6776 { 6777 struct btrfs_fs_info *fs_info = leaf->fs_info; 6778 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6779 struct btrfs_device *device; 6780 u64 devid; 6781 int ret; 6782 u8 fs_uuid[BTRFS_FSID_SIZE]; 6783 u8 dev_uuid[BTRFS_UUID_SIZE]; 6784 6785 devid = btrfs_device_id(leaf, dev_item); 6786 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6787 BTRFS_UUID_SIZE); 6788 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6789 BTRFS_FSID_SIZE); 6790 6791 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 6792 fs_devices = open_seed_devices(fs_info, fs_uuid); 6793 if (IS_ERR(fs_devices)) 6794 return PTR_ERR(fs_devices); 6795 } 6796 6797 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 6798 fs_uuid, true); 6799 if (!device) { 6800 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6801 btrfs_report_missing_device(fs_info, devid, 6802 dev_uuid, true); 6803 return -ENOENT; 6804 } 6805 6806 device = add_missing_dev(fs_devices, devid, dev_uuid); 6807 if (IS_ERR(device)) { 6808 btrfs_err(fs_info, 6809 "failed to add missing dev %llu: %ld", 6810 devid, PTR_ERR(device)); 6811 return PTR_ERR(device); 6812 } 6813 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6814 } else { 6815 if (!device->bdev) { 6816 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6817 btrfs_report_missing_device(fs_info, 6818 devid, dev_uuid, true); 6819 return -ENOENT; 6820 } 6821 btrfs_report_missing_device(fs_info, devid, 6822 dev_uuid, false); 6823 } 6824 6825 if (!device->bdev && 6826 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 6827 /* 6828 * this happens when a device that was properly setup 6829 * in the device info lists suddenly goes bad. 6830 * device->bdev is NULL, and so we have to set 6831 * device->missing to one here 6832 */ 6833 device->fs_devices->missing_devices++; 6834 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6835 } 6836 6837 /* Move the device to its own fs_devices */ 6838 if (device->fs_devices != fs_devices) { 6839 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 6840 &device->dev_state)); 6841 6842 list_move(&device->dev_list, &fs_devices->devices); 6843 device->fs_devices->num_devices--; 6844 fs_devices->num_devices++; 6845 6846 device->fs_devices->missing_devices--; 6847 fs_devices->missing_devices++; 6848 6849 device->fs_devices = fs_devices; 6850 } 6851 } 6852 6853 if (device->fs_devices != fs_info->fs_devices) { 6854 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 6855 if (device->generation != 6856 btrfs_device_generation(leaf, dev_item)) 6857 return -EINVAL; 6858 } 6859 6860 fill_device_from_item(leaf, dev_item, device); 6861 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 6862 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 6863 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 6864 device->fs_devices->total_rw_bytes += device->total_bytes; 6865 atomic64_add(device->total_bytes - device->bytes_used, 6866 &fs_info->free_chunk_space); 6867 } 6868 ret = 0; 6869 return ret; 6870 } 6871 6872 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 6873 { 6874 struct btrfs_root *root = fs_info->tree_root; 6875 struct btrfs_super_block *super_copy = fs_info->super_copy; 6876 struct extent_buffer *sb; 6877 struct btrfs_disk_key *disk_key; 6878 struct btrfs_chunk *chunk; 6879 u8 *array_ptr; 6880 unsigned long sb_array_offset; 6881 int ret = 0; 6882 u32 num_stripes; 6883 u32 array_size; 6884 u32 len = 0; 6885 u32 cur_offset; 6886 u64 type; 6887 struct btrfs_key key; 6888 6889 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 6890 /* 6891 * This will create extent buffer of nodesize, superblock size is 6892 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6893 * overallocate but we can keep it as-is, only the first page is used. 6894 */ 6895 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET); 6896 if (IS_ERR(sb)) 6897 return PTR_ERR(sb); 6898 set_extent_buffer_uptodate(sb); 6899 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6900 /* 6901 * The sb extent buffer is artificial and just used to read the system array. 6902 * set_extent_buffer_uptodate() call does not properly mark all it's 6903 * pages up-to-date when the page is larger: extent does not cover the 6904 * whole page and consequently check_page_uptodate does not find all 6905 * the page's extents up-to-date (the hole beyond sb), 6906 * write_extent_buffer then triggers a WARN_ON. 6907 * 6908 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6909 * but sb spans only this function. Add an explicit SetPageUptodate call 6910 * to silence the warning eg. on PowerPC 64. 6911 */ 6912 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 6913 SetPageUptodate(sb->pages[0]); 6914 6915 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6916 array_size = btrfs_super_sys_array_size(super_copy); 6917 6918 array_ptr = super_copy->sys_chunk_array; 6919 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6920 cur_offset = 0; 6921 6922 while (cur_offset < array_size) { 6923 disk_key = (struct btrfs_disk_key *)array_ptr; 6924 len = sizeof(*disk_key); 6925 if (cur_offset + len > array_size) 6926 goto out_short_read; 6927 6928 btrfs_disk_key_to_cpu(&key, disk_key); 6929 6930 array_ptr += len; 6931 sb_array_offset += len; 6932 cur_offset += len; 6933 6934 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 6935 btrfs_err(fs_info, 6936 "unexpected item type %u in sys_array at offset %u", 6937 (u32)key.type, cur_offset); 6938 ret = -EIO; 6939 break; 6940 } 6941 6942 chunk = (struct btrfs_chunk *)sb_array_offset; 6943 /* 6944 * At least one btrfs_chunk with one stripe must be present, 6945 * exact stripe count check comes afterwards 6946 */ 6947 len = btrfs_chunk_item_size(1); 6948 if (cur_offset + len > array_size) 6949 goto out_short_read; 6950 6951 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6952 if (!num_stripes) { 6953 btrfs_err(fs_info, 6954 "invalid number of stripes %u in sys_array at offset %u", 6955 num_stripes, cur_offset); 6956 ret = -EIO; 6957 break; 6958 } 6959 6960 type = btrfs_chunk_type(sb, chunk); 6961 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 6962 btrfs_err(fs_info, 6963 "invalid chunk type %llu in sys_array at offset %u", 6964 type, cur_offset); 6965 ret = -EIO; 6966 break; 6967 } 6968 6969 len = btrfs_chunk_item_size(num_stripes); 6970 if (cur_offset + len > array_size) 6971 goto out_short_read; 6972 6973 ret = read_one_chunk(&key, sb, chunk); 6974 if (ret) 6975 break; 6976 6977 array_ptr += len; 6978 sb_array_offset += len; 6979 cur_offset += len; 6980 } 6981 clear_extent_buffer_uptodate(sb); 6982 free_extent_buffer_stale(sb); 6983 return ret; 6984 6985 out_short_read: 6986 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 6987 len, cur_offset); 6988 clear_extent_buffer_uptodate(sb); 6989 free_extent_buffer_stale(sb); 6990 return -EIO; 6991 } 6992 6993 /* 6994 * Check if all chunks in the fs are OK for read-write degraded mount 6995 * 6996 * If the @failing_dev is specified, it's accounted as missing. 6997 * 6998 * Return true if all chunks meet the minimal RW mount requirements. 6999 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7000 */ 7001 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7002 struct btrfs_device *failing_dev) 7003 { 7004 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7005 struct extent_map *em; 7006 u64 next_start = 0; 7007 bool ret = true; 7008 7009 read_lock(&map_tree->lock); 7010 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7011 read_unlock(&map_tree->lock); 7012 /* No chunk at all? Return false anyway */ 7013 if (!em) { 7014 ret = false; 7015 goto out; 7016 } 7017 while (em) { 7018 struct map_lookup *map; 7019 int missing = 0; 7020 int max_tolerated; 7021 int i; 7022 7023 map = em->map_lookup; 7024 max_tolerated = 7025 btrfs_get_num_tolerated_disk_barrier_failures( 7026 map->type); 7027 for (i = 0; i < map->num_stripes; i++) { 7028 struct btrfs_device *dev = map->stripes[i].dev; 7029 7030 if (!dev || !dev->bdev || 7031 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7032 dev->last_flush_error) 7033 missing++; 7034 else if (failing_dev && failing_dev == dev) 7035 missing++; 7036 } 7037 if (missing > max_tolerated) { 7038 if (!failing_dev) 7039 btrfs_warn(fs_info, 7040 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7041 em->start, missing, max_tolerated); 7042 free_extent_map(em); 7043 ret = false; 7044 goto out; 7045 } 7046 next_start = extent_map_end(em); 7047 free_extent_map(em); 7048 7049 read_lock(&map_tree->lock); 7050 em = lookup_extent_mapping(map_tree, next_start, 7051 (u64)(-1) - next_start); 7052 read_unlock(&map_tree->lock); 7053 } 7054 out: 7055 return ret; 7056 } 7057 7058 static void readahead_tree_node_children(struct extent_buffer *node) 7059 { 7060 int i; 7061 const int nr_items = btrfs_header_nritems(node); 7062 7063 for (i = 0; i < nr_items; i++) { 7064 u64 start; 7065 7066 start = btrfs_node_blockptr(node, i); 7067 readahead_tree_block(node->fs_info, start); 7068 } 7069 } 7070 7071 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7072 { 7073 struct btrfs_root *root = fs_info->chunk_root; 7074 struct btrfs_path *path; 7075 struct extent_buffer *leaf; 7076 struct btrfs_key key; 7077 struct btrfs_key found_key; 7078 int ret; 7079 int slot; 7080 u64 total_dev = 0; 7081 u64 last_ra_node = 0; 7082 7083 path = btrfs_alloc_path(); 7084 if (!path) 7085 return -ENOMEM; 7086 7087 /* 7088 * uuid_mutex is needed only if we are mounting a sprout FS 7089 * otherwise we don't need it. 7090 */ 7091 mutex_lock(&uuid_mutex); 7092 7093 /* 7094 * It is possible for mount and umount to race in such a way that 7095 * we execute this code path, but open_fs_devices failed to clear 7096 * total_rw_bytes. We certainly want it cleared before reading the 7097 * device items, so clear it here. 7098 */ 7099 fs_info->fs_devices->total_rw_bytes = 0; 7100 7101 /* 7102 * Read all device items, and then all the chunk items. All 7103 * device items are found before any chunk item (their object id 7104 * is smaller than the lowest possible object id for a chunk 7105 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7106 */ 7107 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7108 key.offset = 0; 7109 key.type = 0; 7110 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7111 if (ret < 0) 7112 goto error; 7113 while (1) { 7114 struct extent_buffer *node; 7115 7116 leaf = path->nodes[0]; 7117 slot = path->slots[0]; 7118 if (slot >= btrfs_header_nritems(leaf)) { 7119 ret = btrfs_next_leaf(root, path); 7120 if (ret == 0) 7121 continue; 7122 if (ret < 0) 7123 goto error; 7124 break; 7125 } 7126 /* 7127 * The nodes on level 1 are not locked but we don't need to do 7128 * that during mount time as nothing else can access the tree 7129 */ 7130 node = path->nodes[1]; 7131 if (node) { 7132 if (last_ra_node != node->start) { 7133 readahead_tree_node_children(node); 7134 last_ra_node = node->start; 7135 } 7136 } 7137 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7138 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7139 struct btrfs_dev_item *dev_item; 7140 dev_item = btrfs_item_ptr(leaf, slot, 7141 struct btrfs_dev_item); 7142 ret = read_one_dev(leaf, dev_item); 7143 if (ret) 7144 goto error; 7145 total_dev++; 7146 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7147 struct btrfs_chunk *chunk; 7148 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7149 mutex_lock(&fs_info->chunk_mutex); 7150 ret = read_one_chunk(&found_key, leaf, chunk); 7151 mutex_unlock(&fs_info->chunk_mutex); 7152 if (ret) 7153 goto error; 7154 } 7155 path->slots[0]++; 7156 } 7157 7158 /* 7159 * After loading chunk tree, we've got all device information, 7160 * do another round of validation checks. 7161 */ 7162 if (total_dev != fs_info->fs_devices->total_devices) { 7163 btrfs_err(fs_info, 7164 "super_num_devices %llu mismatch with num_devices %llu found here", 7165 btrfs_super_num_devices(fs_info->super_copy), 7166 total_dev); 7167 ret = -EINVAL; 7168 goto error; 7169 } 7170 if (btrfs_super_total_bytes(fs_info->super_copy) < 7171 fs_info->fs_devices->total_rw_bytes) { 7172 btrfs_err(fs_info, 7173 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7174 btrfs_super_total_bytes(fs_info->super_copy), 7175 fs_info->fs_devices->total_rw_bytes); 7176 ret = -EINVAL; 7177 goto error; 7178 } 7179 ret = 0; 7180 error: 7181 mutex_unlock(&uuid_mutex); 7182 7183 btrfs_free_path(path); 7184 return ret; 7185 } 7186 7187 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7188 { 7189 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7190 struct btrfs_device *device; 7191 7192 while (fs_devices) { 7193 mutex_lock(&fs_devices->device_list_mutex); 7194 list_for_each_entry(device, &fs_devices->devices, dev_list) 7195 device->fs_info = fs_info; 7196 mutex_unlock(&fs_devices->device_list_mutex); 7197 7198 fs_devices = fs_devices->seed; 7199 } 7200 } 7201 7202 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7203 const struct btrfs_dev_stats_item *ptr, 7204 int index) 7205 { 7206 u64 val; 7207 7208 read_extent_buffer(eb, &val, 7209 offsetof(struct btrfs_dev_stats_item, values) + 7210 ((unsigned long)ptr) + (index * sizeof(u64)), 7211 sizeof(val)); 7212 return val; 7213 } 7214 7215 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7216 struct btrfs_dev_stats_item *ptr, 7217 int index, u64 val) 7218 { 7219 write_extent_buffer(eb, &val, 7220 offsetof(struct btrfs_dev_stats_item, values) + 7221 ((unsigned long)ptr) + (index * sizeof(u64)), 7222 sizeof(val)); 7223 } 7224 7225 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7226 { 7227 struct btrfs_key key; 7228 struct btrfs_root *dev_root = fs_info->dev_root; 7229 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7230 struct extent_buffer *eb; 7231 int slot; 7232 int ret = 0; 7233 struct btrfs_device *device; 7234 struct btrfs_path *path = NULL; 7235 int i; 7236 7237 path = btrfs_alloc_path(); 7238 if (!path) 7239 return -ENOMEM; 7240 7241 mutex_lock(&fs_devices->device_list_mutex); 7242 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7243 int item_size; 7244 struct btrfs_dev_stats_item *ptr; 7245 7246 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7247 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7248 key.offset = device->devid; 7249 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 7250 if (ret) { 7251 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7252 btrfs_dev_stat_set(device, i, 0); 7253 device->dev_stats_valid = 1; 7254 btrfs_release_path(path); 7255 continue; 7256 } 7257 slot = path->slots[0]; 7258 eb = path->nodes[0]; 7259 item_size = btrfs_item_size_nr(eb, slot); 7260 7261 ptr = btrfs_item_ptr(eb, slot, 7262 struct btrfs_dev_stats_item); 7263 7264 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7265 if (item_size >= (1 + i) * sizeof(__le64)) 7266 btrfs_dev_stat_set(device, i, 7267 btrfs_dev_stats_value(eb, ptr, i)); 7268 else 7269 btrfs_dev_stat_set(device, i, 0); 7270 } 7271 7272 device->dev_stats_valid = 1; 7273 btrfs_dev_stat_print_on_load(device); 7274 btrfs_release_path(path); 7275 } 7276 mutex_unlock(&fs_devices->device_list_mutex); 7277 7278 btrfs_free_path(path); 7279 return ret < 0 ? ret : 0; 7280 } 7281 7282 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7283 struct btrfs_device *device) 7284 { 7285 struct btrfs_fs_info *fs_info = trans->fs_info; 7286 struct btrfs_root *dev_root = fs_info->dev_root; 7287 struct btrfs_path *path; 7288 struct btrfs_key key; 7289 struct extent_buffer *eb; 7290 struct btrfs_dev_stats_item *ptr; 7291 int ret; 7292 int i; 7293 7294 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7295 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7296 key.offset = device->devid; 7297 7298 path = btrfs_alloc_path(); 7299 if (!path) 7300 return -ENOMEM; 7301 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7302 if (ret < 0) { 7303 btrfs_warn_in_rcu(fs_info, 7304 "error %d while searching for dev_stats item for device %s", 7305 ret, rcu_str_deref(device->name)); 7306 goto out; 7307 } 7308 7309 if (ret == 0 && 7310 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7311 /* need to delete old one and insert a new one */ 7312 ret = btrfs_del_item(trans, dev_root, path); 7313 if (ret != 0) { 7314 btrfs_warn_in_rcu(fs_info, 7315 "delete too small dev_stats item for device %s failed %d", 7316 rcu_str_deref(device->name), ret); 7317 goto out; 7318 } 7319 ret = 1; 7320 } 7321 7322 if (ret == 1) { 7323 /* need to insert a new item */ 7324 btrfs_release_path(path); 7325 ret = btrfs_insert_empty_item(trans, dev_root, path, 7326 &key, sizeof(*ptr)); 7327 if (ret < 0) { 7328 btrfs_warn_in_rcu(fs_info, 7329 "insert dev_stats item for device %s failed %d", 7330 rcu_str_deref(device->name), ret); 7331 goto out; 7332 } 7333 } 7334 7335 eb = path->nodes[0]; 7336 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7337 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7338 btrfs_set_dev_stats_value(eb, ptr, i, 7339 btrfs_dev_stat_read(device, i)); 7340 btrfs_mark_buffer_dirty(eb); 7341 7342 out: 7343 btrfs_free_path(path); 7344 return ret; 7345 } 7346 7347 /* 7348 * called from commit_transaction. Writes all changed device stats to disk. 7349 */ 7350 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7351 { 7352 struct btrfs_fs_info *fs_info = trans->fs_info; 7353 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7354 struct btrfs_device *device; 7355 int stats_cnt; 7356 int ret = 0; 7357 7358 mutex_lock(&fs_devices->device_list_mutex); 7359 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7360 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7361 if (!device->dev_stats_valid || stats_cnt == 0) 7362 continue; 7363 7364 7365 /* 7366 * There is a LOAD-LOAD control dependency between the value of 7367 * dev_stats_ccnt and updating the on-disk values which requires 7368 * reading the in-memory counters. Such control dependencies 7369 * require explicit read memory barriers. 7370 * 7371 * This memory barriers pairs with smp_mb__before_atomic in 7372 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7373 * barrier implied by atomic_xchg in 7374 * btrfs_dev_stats_read_and_reset 7375 */ 7376 smp_rmb(); 7377 7378 ret = update_dev_stat_item(trans, device); 7379 if (!ret) 7380 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7381 } 7382 mutex_unlock(&fs_devices->device_list_mutex); 7383 7384 return ret; 7385 } 7386 7387 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7388 { 7389 btrfs_dev_stat_inc(dev, index); 7390 btrfs_dev_stat_print_on_error(dev); 7391 } 7392 7393 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7394 { 7395 if (!dev->dev_stats_valid) 7396 return; 7397 btrfs_err_rl_in_rcu(dev->fs_info, 7398 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7399 rcu_str_deref(dev->name), 7400 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7401 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7402 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7403 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7404 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7405 } 7406 7407 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7408 { 7409 int i; 7410 7411 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7412 if (btrfs_dev_stat_read(dev, i) != 0) 7413 break; 7414 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7415 return; /* all values == 0, suppress message */ 7416 7417 btrfs_info_in_rcu(dev->fs_info, 7418 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7419 rcu_str_deref(dev->name), 7420 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7421 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7422 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7423 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7424 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7425 } 7426 7427 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7428 struct btrfs_ioctl_get_dev_stats *stats) 7429 { 7430 struct btrfs_device *dev; 7431 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7432 int i; 7433 7434 mutex_lock(&fs_devices->device_list_mutex); 7435 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL, 7436 true); 7437 mutex_unlock(&fs_devices->device_list_mutex); 7438 7439 if (!dev) { 7440 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7441 return -ENODEV; 7442 } else if (!dev->dev_stats_valid) { 7443 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7444 return -ENODEV; 7445 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7446 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7447 if (stats->nr_items > i) 7448 stats->values[i] = 7449 btrfs_dev_stat_read_and_reset(dev, i); 7450 else 7451 btrfs_dev_stat_set(dev, i, 0); 7452 } 7453 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7454 current->comm, task_pid_nr(current)); 7455 } else { 7456 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7457 if (stats->nr_items > i) 7458 stats->values[i] = btrfs_dev_stat_read(dev, i); 7459 } 7460 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7461 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7462 return 0; 7463 } 7464 7465 /* 7466 * Update the size and bytes used for each device where it changed. This is 7467 * delayed since we would otherwise get errors while writing out the 7468 * superblocks. 7469 * 7470 * Must be invoked during transaction commit. 7471 */ 7472 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7473 { 7474 struct btrfs_device *curr, *next; 7475 7476 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7477 7478 if (list_empty(&trans->dev_update_list)) 7479 return; 7480 7481 /* 7482 * We don't need the device_list_mutex here. This list is owned by the 7483 * transaction and the transaction must complete before the device is 7484 * released. 7485 */ 7486 mutex_lock(&trans->fs_info->chunk_mutex); 7487 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7488 post_commit_list) { 7489 list_del_init(&curr->post_commit_list); 7490 curr->commit_total_bytes = curr->disk_total_bytes; 7491 curr->commit_bytes_used = curr->bytes_used; 7492 } 7493 mutex_unlock(&trans->fs_info->chunk_mutex); 7494 } 7495 7496 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 7497 { 7498 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7499 while (fs_devices) { 7500 fs_devices->fs_info = fs_info; 7501 fs_devices = fs_devices->seed; 7502 } 7503 } 7504 7505 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 7506 { 7507 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7508 while (fs_devices) { 7509 fs_devices->fs_info = NULL; 7510 fs_devices = fs_devices->seed; 7511 } 7512 } 7513 7514 /* 7515 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7516 */ 7517 int btrfs_bg_type_to_factor(u64 flags) 7518 { 7519 const int index = btrfs_bg_flags_to_raid_index(flags); 7520 7521 return btrfs_raid_array[index].ncopies; 7522 } 7523 7524 7525 7526 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7527 u64 chunk_offset, u64 devid, 7528 u64 physical_offset, u64 physical_len) 7529 { 7530 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7531 struct extent_map *em; 7532 struct map_lookup *map; 7533 struct btrfs_device *dev; 7534 u64 stripe_len; 7535 bool found = false; 7536 int ret = 0; 7537 int i; 7538 7539 read_lock(&em_tree->lock); 7540 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7541 read_unlock(&em_tree->lock); 7542 7543 if (!em) { 7544 btrfs_err(fs_info, 7545 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7546 physical_offset, devid); 7547 ret = -EUCLEAN; 7548 goto out; 7549 } 7550 7551 map = em->map_lookup; 7552 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 7553 if (physical_len != stripe_len) { 7554 btrfs_err(fs_info, 7555 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7556 physical_offset, devid, em->start, physical_len, 7557 stripe_len); 7558 ret = -EUCLEAN; 7559 goto out; 7560 } 7561 7562 for (i = 0; i < map->num_stripes; i++) { 7563 if (map->stripes[i].dev->devid == devid && 7564 map->stripes[i].physical == physical_offset) { 7565 found = true; 7566 if (map->verified_stripes >= map->num_stripes) { 7567 btrfs_err(fs_info, 7568 "too many dev extents for chunk %llu found", 7569 em->start); 7570 ret = -EUCLEAN; 7571 goto out; 7572 } 7573 map->verified_stripes++; 7574 break; 7575 } 7576 } 7577 if (!found) { 7578 btrfs_err(fs_info, 7579 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7580 physical_offset, devid); 7581 ret = -EUCLEAN; 7582 } 7583 7584 /* Make sure no dev extent is beyond device bondary */ 7585 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); 7586 if (!dev) { 7587 btrfs_err(fs_info, "failed to find devid %llu", devid); 7588 ret = -EUCLEAN; 7589 goto out; 7590 } 7591 7592 /* It's possible this device is a dummy for seed device */ 7593 if (dev->disk_total_bytes == 0) { 7594 dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL, 7595 NULL, false); 7596 if (!dev) { 7597 btrfs_err(fs_info, "failed to find seed devid %llu", 7598 devid); 7599 ret = -EUCLEAN; 7600 goto out; 7601 } 7602 } 7603 7604 if (physical_offset + physical_len > dev->disk_total_bytes) { 7605 btrfs_err(fs_info, 7606 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7607 devid, physical_offset, physical_len, 7608 dev->disk_total_bytes); 7609 ret = -EUCLEAN; 7610 goto out; 7611 } 7612 out: 7613 free_extent_map(em); 7614 return ret; 7615 } 7616 7617 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7618 { 7619 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7620 struct extent_map *em; 7621 struct rb_node *node; 7622 int ret = 0; 7623 7624 read_lock(&em_tree->lock); 7625 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7626 em = rb_entry(node, struct extent_map, rb_node); 7627 if (em->map_lookup->num_stripes != 7628 em->map_lookup->verified_stripes) { 7629 btrfs_err(fs_info, 7630 "chunk %llu has missing dev extent, have %d expect %d", 7631 em->start, em->map_lookup->verified_stripes, 7632 em->map_lookup->num_stripes); 7633 ret = -EUCLEAN; 7634 goto out; 7635 } 7636 } 7637 out: 7638 read_unlock(&em_tree->lock); 7639 return ret; 7640 } 7641 7642 /* 7643 * Ensure that all dev extents are mapped to correct chunk, otherwise 7644 * later chunk allocation/free would cause unexpected behavior. 7645 * 7646 * NOTE: This will iterate through the whole device tree, which should be of 7647 * the same size level as the chunk tree. This slightly increases mount time. 7648 */ 7649 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7650 { 7651 struct btrfs_path *path; 7652 struct btrfs_root *root = fs_info->dev_root; 7653 struct btrfs_key key; 7654 u64 prev_devid = 0; 7655 u64 prev_dev_ext_end = 0; 7656 int ret = 0; 7657 7658 key.objectid = 1; 7659 key.type = BTRFS_DEV_EXTENT_KEY; 7660 key.offset = 0; 7661 7662 path = btrfs_alloc_path(); 7663 if (!path) 7664 return -ENOMEM; 7665 7666 path->reada = READA_FORWARD; 7667 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7668 if (ret < 0) 7669 goto out; 7670 7671 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7672 ret = btrfs_next_item(root, path); 7673 if (ret < 0) 7674 goto out; 7675 /* No dev extents at all? Not good */ 7676 if (ret > 0) { 7677 ret = -EUCLEAN; 7678 goto out; 7679 } 7680 } 7681 while (1) { 7682 struct extent_buffer *leaf = path->nodes[0]; 7683 struct btrfs_dev_extent *dext; 7684 int slot = path->slots[0]; 7685 u64 chunk_offset; 7686 u64 physical_offset; 7687 u64 physical_len; 7688 u64 devid; 7689 7690 btrfs_item_key_to_cpu(leaf, &key, slot); 7691 if (key.type != BTRFS_DEV_EXTENT_KEY) 7692 break; 7693 devid = key.objectid; 7694 physical_offset = key.offset; 7695 7696 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 7697 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 7698 physical_len = btrfs_dev_extent_length(leaf, dext); 7699 7700 /* Check if this dev extent overlaps with the previous one */ 7701 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 7702 btrfs_err(fs_info, 7703 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 7704 devid, physical_offset, prev_dev_ext_end); 7705 ret = -EUCLEAN; 7706 goto out; 7707 } 7708 7709 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 7710 physical_offset, physical_len); 7711 if (ret < 0) 7712 goto out; 7713 prev_devid = devid; 7714 prev_dev_ext_end = physical_offset + physical_len; 7715 7716 ret = btrfs_next_item(root, path); 7717 if (ret < 0) 7718 goto out; 7719 if (ret > 0) { 7720 ret = 0; 7721 break; 7722 } 7723 } 7724 7725 /* Ensure all chunks have corresponding dev extents */ 7726 ret = verify_chunk_dev_extent_mapping(fs_info); 7727 out: 7728 btrfs_free_path(path); 7729 return ret; 7730 } 7731 7732 /* 7733 * Check whether the given block group or device is pinned by any inode being 7734 * used as a swapfile. 7735 */ 7736 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 7737 { 7738 struct btrfs_swapfile_pin *sp; 7739 struct rb_node *node; 7740 7741 spin_lock(&fs_info->swapfile_pins_lock); 7742 node = fs_info->swapfile_pins.rb_node; 7743 while (node) { 7744 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 7745 if (ptr < sp->ptr) 7746 node = node->rb_left; 7747 else if (ptr > sp->ptr) 7748 node = node->rb_right; 7749 else 7750 break; 7751 } 7752 spin_unlock(&fs_info->swapfile_pins_lock); 7753 return node != NULL; 7754 } 7755