1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/bio.h> 8 #include <linux/slab.h> 9 #include <linux/buffer_head.h> 10 #include <linux/blkdev.h> 11 #include <linux/ratelimit.h> 12 #include <linux/kthread.h> 13 #include <linux/raid/pq.h> 14 #include <linux/semaphore.h> 15 #include <linux/uuid.h> 16 #include <linux/list_sort.h> 17 #include "ctree.h" 18 #include "extent_map.h" 19 #include "disk-io.h" 20 #include "transaction.h" 21 #include "print-tree.h" 22 #include "volumes.h" 23 #include "raid56.h" 24 #include "async-thread.h" 25 #include "check-integrity.h" 26 #include "rcu-string.h" 27 #include "math.h" 28 #include "dev-replace.h" 29 #include "sysfs.h" 30 #include "tree-checker.h" 31 #include "space-info.h" 32 33 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 34 [BTRFS_RAID_RAID10] = { 35 .sub_stripes = 2, 36 .dev_stripes = 1, 37 .devs_max = 0, /* 0 == as many as possible */ 38 .devs_min = 4, 39 .tolerated_failures = 1, 40 .devs_increment = 2, 41 .ncopies = 2, 42 .nparity = 0, 43 .raid_name = "raid10", 44 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 45 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 46 }, 47 [BTRFS_RAID_RAID1] = { 48 .sub_stripes = 1, 49 .dev_stripes = 1, 50 .devs_max = 2, 51 .devs_min = 2, 52 .tolerated_failures = 1, 53 .devs_increment = 2, 54 .ncopies = 2, 55 .nparity = 0, 56 .raid_name = "raid1", 57 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 58 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 59 }, 60 [BTRFS_RAID_DUP] = { 61 .sub_stripes = 1, 62 .dev_stripes = 2, 63 .devs_max = 1, 64 .devs_min = 1, 65 .tolerated_failures = 0, 66 .devs_increment = 1, 67 .ncopies = 2, 68 .nparity = 0, 69 .raid_name = "dup", 70 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 71 .mindev_error = 0, 72 }, 73 [BTRFS_RAID_RAID0] = { 74 .sub_stripes = 1, 75 .dev_stripes = 1, 76 .devs_max = 0, 77 .devs_min = 2, 78 .tolerated_failures = 0, 79 .devs_increment = 1, 80 .ncopies = 1, 81 .nparity = 0, 82 .raid_name = "raid0", 83 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 84 .mindev_error = 0, 85 }, 86 [BTRFS_RAID_SINGLE] = { 87 .sub_stripes = 1, 88 .dev_stripes = 1, 89 .devs_max = 1, 90 .devs_min = 1, 91 .tolerated_failures = 0, 92 .devs_increment = 1, 93 .ncopies = 1, 94 .nparity = 0, 95 .raid_name = "single", 96 .bg_flag = 0, 97 .mindev_error = 0, 98 }, 99 [BTRFS_RAID_RAID5] = { 100 .sub_stripes = 1, 101 .dev_stripes = 1, 102 .devs_max = 0, 103 .devs_min = 2, 104 .tolerated_failures = 1, 105 .devs_increment = 1, 106 .ncopies = 1, 107 .nparity = 1, 108 .raid_name = "raid5", 109 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 110 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 111 }, 112 [BTRFS_RAID_RAID6] = { 113 .sub_stripes = 1, 114 .dev_stripes = 1, 115 .devs_max = 0, 116 .devs_min = 3, 117 .tolerated_failures = 2, 118 .devs_increment = 1, 119 .ncopies = 1, 120 .nparity = 2, 121 .raid_name = "raid6", 122 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 123 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 124 }, 125 }; 126 127 const char *btrfs_bg_type_to_raid_name(u64 flags) 128 { 129 const int index = btrfs_bg_flags_to_raid_index(flags); 130 131 if (index >= BTRFS_NR_RAID_TYPES) 132 return NULL; 133 134 return btrfs_raid_array[index].raid_name; 135 } 136 137 /* 138 * Fill @buf with textual description of @bg_flags, no more than @size_buf 139 * bytes including terminating null byte. 140 */ 141 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 142 { 143 int i; 144 int ret; 145 char *bp = buf; 146 u64 flags = bg_flags; 147 u32 size_bp = size_buf; 148 149 if (!flags) { 150 strcpy(bp, "NONE"); 151 return; 152 } 153 154 #define DESCRIBE_FLAG(flag, desc) \ 155 do { \ 156 if (flags & (flag)) { \ 157 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 158 if (ret < 0 || ret >= size_bp) \ 159 goto out_overflow; \ 160 size_bp -= ret; \ 161 bp += ret; \ 162 flags &= ~(flag); \ 163 } \ 164 } while (0) 165 166 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 167 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 168 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 169 170 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 171 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 172 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 173 btrfs_raid_array[i].raid_name); 174 #undef DESCRIBE_FLAG 175 176 if (flags) { 177 ret = snprintf(bp, size_bp, "0x%llx|", flags); 178 size_bp -= ret; 179 } 180 181 if (size_bp < size_buf) 182 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 183 184 /* 185 * The text is trimmed, it's up to the caller to provide sufficiently 186 * large buffer 187 */ 188 out_overflow:; 189 } 190 191 static int init_first_rw_device(struct btrfs_trans_handle *trans); 192 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 193 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 194 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 195 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 196 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 197 enum btrfs_map_op op, 198 u64 logical, u64 *length, 199 struct btrfs_bio **bbio_ret, 200 int mirror_num, int need_raid_map); 201 202 /* 203 * Device locking 204 * ============== 205 * 206 * There are several mutexes that protect manipulation of devices and low-level 207 * structures like chunks but not block groups, extents or files 208 * 209 * uuid_mutex (global lock) 210 * ------------------------ 211 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 212 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 213 * device) or requested by the device= mount option 214 * 215 * the mutex can be very coarse and can cover long-running operations 216 * 217 * protects: updates to fs_devices counters like missing devices, rw devices, 218 * seeding, structure cloning, opening/closing devices at mount/umount time 219 * 220 * global::fs_devs - add, remove, updates to the global list 221 * 222 * does not protect: manipulation of the fs_devices::devices list! 223 * 224 * btrfs_device::name - renames (write side), read is RCU 225 * 226 * fs_devices::device_list_mutex (per-fs, with RCU) 227 * ------------------------------------------------ 228 * protects updates to fs_devices::devices, ie. adding and deleting 229 * 230 * simple list traversal with read-only actions can be done with RCU protection 231 * 232 * may be used to exclude some operations from running concurrently without any 233 * modifications to the list (see write_all_supers) 234 * 235 * balance_mutex 236 * ------------- 237 * protects balance structures (status, state) and context accessed from 238 * several places (internally, ioctl) 239 * 240 * chunk_mutex 241 * ----------- 242 * protects chunks, adding or removing during allocation, trim or when a new 243 * device is added/removed. Additionally it also protects post_commit_list of 244 * individual devices, since they can be added to the transaction's 245 * post_commit_list only with chunk_mutex held. 246 * 247 * cleaner_mutex 248 * ------------- 249 * a big lock that is held by the cleaner thread and prevents running subvolume 250 * cleaning together with relocation or delayed iputs 251 * 252 * 253 * Lock nesting 254 * ============ 255 * 256 * uuid_mutex 257 * volume_mutex 258 * device_list_mutex 259 * chunk_mutex 260 * balance_mutex 261 * 262 * 263 * Exclusive operations, BTRFS_FS_EXCL_OP 264 * ====================================== 265 * 266 * Maintains the exclusivity of the following operations that apply to the 267 * whole filesystem and cannot run in parallel. 268 * 269 * - Balance (*) 270 * - Device add 271 * - Device remove 272 * - Device replace (*) 273 * - Resize 274 * 275 * The device operations (as above) can be in one of the following states: 276 * 277 * - Running state 278 * - Paused state 279 * - Completed state 280 * 281 * Only device operations marked with (*) can go into the Paused state for the 282 * following reasons: 283 * 284 * - ioctl (only Balance can be Paused through ioctl) 285 * - filesystem remounted as read-only 286 * - filesystem unmounted and mounted as read-only 287 * - system power-cycle and filesystem mounted as read-only 288 * - filesystem or device errors leading to forced read-only 289 * 290 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations. 291 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set. 292 * A device operation in Paused or Running state can be canceled or resumed 293 * either by ioctl (Balance only) or when remounted as read-write. 294 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or 295 * completed. 296 */ 297 298 DEFINE_MUTEX(uuid_mutex); 299 static LIST_HEAD(fs_uuids); 300 struct list_head *btrfs_get_fs_uuids(void) 301 { 302 return &fs_uuids; 303 } 304 305 /* 306 * alloc_fs_devices - allocate struct btrfs_fs_devices 307 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 308 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 309 * 310 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 311 * The returned struct is not linked onto any lists and can be destroyed with 312 * kfree() right away. 313 */ 314 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 315 const u8 *metadata_fsid) 316 { 317 struct btrfs_fs_devices *fs_devs; 318 319 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 320 if (!fs_devs) 321 return ERR_PTR(-ENOMEM); 322 323 mutex_init(&fs_devs->device_list_mutex); 324 325 INIT_LIST_HEAD(&fs_devs->devices); 326 INIT_LIST_HEAD(&fs_devs->alloc_list); 327 INIT_LIST_HEAD(&fs_devs->fs_list); 328 if (fsid) 329 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 330 331 if (metadata_fsid) 332 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 333 else if (fsid) 334 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 335 336 return fs_devs; 337 } 338 339 void btrfs_free_device(struct btrfs_device *device) 340 { 341 WARN_ON(!list_empty(&device->post_commit_list)); 342 rcu_string_free(device->name); 343 extent_io_tree_release(&device->alloc_state); 344 bio_put(device->flush_bio); 345 kfree(device); 346 } 347 348 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 349 { 350 struct btrfs_device *device; 351 WARN_ON(fs_devices->opened); 352 while (!list_empty(&fs_devices->devices)) { 353 device = list_entry(fs_devices->devices.next, 354 struct btrfs_device, dev_list); 355 list_del(&device->dev_list); 356 btrfs_free_device(device); 357 } 358 kfree(fs_devices); 359 } 360 361 static void btrfs_kobject_uevent(struct block_device *bdev, 362 enum kobject_action action) 363 { 364 int ret; 365 366 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 367 if (ret) 368 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 369 action, 370 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 371 &disk_to_dev(bdev->bd_disk)->kobj); 372 } 373 374 void __exit btrfs_cleanup_fs_uuids(void) 375 { 376 struct btrfs_fs_devices *fs_devices; 377 378 while (!list_empty(&fs_uuids)) { 379 fs_devices = list_entry(fs_uuids.next, 380 struct btrfs_fs_devices, fs_list); 381 list_del(&fs_devices->fs_list); 382 free_fs_devices(fs_devices); 383 } 384 } 385 386 /* 387 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. 388 * Returned struct is not linked onto any lists and must be destroyed using 389 * btrfs_free_device. 390 */ 391 static struct btrfs_device *__alloc_device(void) 392 { 393 struct btrfs_device *dev; 394 395 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 396 if (!dev) 397 return ERR_PTR(-ENOMEM); 398 399 /* 400 * Preallocate a bio that's always going to be used for flushing device 401 * barriers and matches the device lifespan 402 */ 403 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); 404 if (!dev->flush_bio) { 405 kfree(dev); 406 return ERR_PTR(-ENOMEM); 407 } 408 409 INIT_LIST_HEAD(&dev->dev_list); 410 INIT_LIST_HEAD(&dev->dev_alloc_list); 411 INIT_LIST_HEAD(&dev->post_commit_list); 412 413 spin_lock_init(&dev->io_lock); 414 415 atomic_set(&dev->reada_in_flight, 0); 416 atomic_set(&dev->dev_stats_ccnt, 0); 417 btrfs_device_data_ordered_init(dev); 418 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 419 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 420 extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL); 421 422 return dev; 423 } 424 425 static noinline struct btrfs_fs_devices *find_fsid( 426 const u8 *fsid, const u8 *metadata_fsid) 427 { 428 struct btrfs_fs_devices *fs_devices; 429 430 ASSERT(fsid); 431 432 if (metadata_fsid) { 433 /* 434 * Handle scanned device having completed its fsid change but 435 * belonging to a fs_devices that was created by first scanning 436 * a device which didn't have its fsid/metadata_uuid changed 437 * at all and the CHANGING_FSID_V2 flag set. 438 */ 439 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 440 if (fs_devices->fsid_change && 441 memcmp(metadata_fsid, fs_devices->fsid, 442 BTRFS_FSID_SIZE) == 0 && 443 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 444 BTRFS_FSID_SIZE) == 0) { 445 return fs_devices; 446 } 447 } 448 /* 449 * Handle scanned device having completed its fsid change but 450 * belonging to a fs_devices that was created by a device that 451 * has an outdated pair of fsid/metadata_uuid and 452 * CHANGING_FSID_V2 flag set. 453 */ 454 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 455 if (fs_devices->fsid_change && 456 memcmp(fs_devices->metadata_uuid, 457 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 458 memcmp(metadata_fsid, fs_devices->metadata_uuid, 459 BTRFS_FSID_SIZE) == 0) { 460 return fs_devices; 461 } 462 } 463 } 464 465 /* Handle non-split brain cases */ 466 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 467 if (metadata_fsid) { 468 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 469 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 470 BTRFS_FSID_SIZE) == 0) 471 return fs_devices; 472 } else { 473 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 474 return fs_devices; 475 } 476 } 477 return NULL; 478 } 479 480 static int 481 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 482 int flush, struct block_device **bdev, 483 struct buffer_head **bh) 484 { 485 int ret; 486 487 *bdev = blkdev_get_by_path(device_path, flags, holder); 488 489 if (IS_ERR(*bdev)) { 490 ret = PTR_ERR(*bdev); 491 goto error; 492 } 493 494 if (flush) 495 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 496 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 497 if (ret) { 498 blkdev_put(*bdev, flags); 499 goto error; 500 } 501 invalidate_bdev(*bdev); 502 *bh = btrfs_read_dev_super(*bdev); 503 if (IS_ERR(*bh)) { 504 ret = PTR_ERR(*bh); 505 blkdev_put(*bdev, flags); 506 goto error; 507 } 508 509 return 0; 510 511 error: 512 *bdev = NULL; 513 *bh = NULL; 514 return ret; 515 } 516 517 static void requeue_list(struct btrfs_pending_bios *pending_bios, 518 struct bio *head, struct bio *tail) 519 { 520 521 struct bio *old_head; 522 523 old_head = pending_bios->head; 524 pending_bios->head = head; 525 if (pending_bios->tail) 526 tail->bi_next = old_head; 527 else 528 pending_bios->tail = tail; 529 } 530 531 /* 532 * we try to collect pending bios for a device so we don't get a large 533 * number of procs sending bios down to the same device. This greatly 534 * improves the schedulers ability to collect and merge the bios. 535 * 536 * But, it also turns into a long list of bios to process and that is sure 537 * to eventually make the worker thread block. The solution here is to 538 * make some progress and then put this work struct back at the end of 539 * the list if the block device is congested. This way, multiple devices 540 * can make progress from a single worker thread. 541 */ 542 static noinline void run_scheduled_bios(struct btrfs_device *device) 543 { 544 struct btrfs_fs_info *fs_info = device->fs_info; 545 struct bio *pending; 546 struct backing_dev_info *bdi; 547 struct btrfs_pending_bios *pending_bios; 548 struct bio *tail; 549 struct bio *cur; 550 int again = 0; 551 unsigned long num_run; 552 unsigned long batch_run = 0; 553 unsigned long last_waited = 0; 554 int force_reg = 0; 555 int sync_pending = 0; 556 struct blk_plug plug; 557 558 /* 559 * this function runs all the bios we've collected for 560 * a particular device. We don't want to wander off to 561 * another device without first sending all of these down. 562 * So, setup a plug here and finish it off before we return 563 */ 564 blk_start_plug(&plug); 565 566 bdi = device->bdev->bd_bdi; 567 568 loop: 569 spin_lock(&device->io_lock); 570 571 loop_lock: 572 num_run = 0; 573 574 /* take all the bios off the list at once and process them 575 * later on (without the lock held). But, remember the 576 * tail and other pointers so the bios can be properly reinserted 577 * into the list if we hit congestion 578 */ 579 if (!force_reg && device->pending_sync_bios.head) { 580 pending_bios = &device->pending_sync_bios; 581 force_reg = 1; 582 } else { 583 pending_bios = &device->pending_bios; 584 force_reg = 0; 585 } 586 587 pending = pending_bios->head; 588 tail = pending_bios->tail; 589 WARN_ON(pending && !tail); 590 591 /* 592 * if pending was null this time around, no bios need processing 593 * at all and we can stop. Otherwise it'll loop back up again 594 * and do an additional check so no bios are missed. 595 * 596 * device->running_pending is used to synchronize with the 597 * schedule_bio code. 598 */ 599 if (device->pending_sync_bios.head == NULL && 600 device->pending_bios.head == NULL) { 601 again = 0; 602 device->running_pending = 0; 603 } else { 604 again = 1; 605 device->running_pending = 1; 606 } 607 608 pending_bios->head = NULL; 609 pending_bios->tail = NULL; 610 611 spin_unlock(&device->io_lock); 612 613 while (pending) { 614 615 rmb(); 616 /* we want to work on both lists, but do more bios on the 617 * sync list than the regular list 618 */ 619 if ((num_run > 32 && 620 pending_bios != &device->pending_sync_bios && 621 device->pending_sync_bios.head) || 622 (num_run > 64 && pending_bios == &device->pending_sync_bios && 623 device->pending_bios.head)) { 624 spin_lock(&device->io_lock); 625 requeue_list(pending_bios, pending, tail); 626 goto loop_lock; 627 } 628 629 cur = pending; 630 pending = pending->bi_next; 631 cur->bi_next = NULL; 632 633 BUG_ON(atomic_read(&cur->__bi_cnt) == 0); 634 635 /* 636 * if we're doing the sync list, record that our 637 * plug has some sync requests on it 638 * 639 * If we're doing the regular list and there are 640 * sync requests sitting around, unplug before 641 * we add more 642 */ 643 if (pending_bios == &device->pending_sync_bios) { 644 sync_pending = 1; 645 } else if (sync_pending) { 646 blk_finish_plug(&plug); 647 blk_start_plug(&plug); 648 sync_pending = 0; 649 } 650 651 btrfsic_submit_bio(cur); 652 num_run++; 653 batch_run++; 654 655 cond_resched(); 656 657 /* 658 * we made progress, there is more work to do and the bdi 659 * is now congested. Back off and let other work structs 660 * run instead 661 */ 662 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 663 fs_info->fs_devices->open_devices > 1) { 664 struct io_context *ioc; 665 666 ioc = current->io_context; 667 668 /* 669 * the main goal here is that we don't want to 670 * block if we're going to be able to submit 671 * more requests without blocking. 672 * 673 * This code does two great things, it pokes into 674 * the elevator code from a filesystem _and_ 675 * it makes assumptions about how batching works. 676 */ 677 if (ioc && ioc->nr_batch_requests > 0 && 678 time_before(jiffies, ioc->last_waited + HZ/50UL) && 679 (last_waited == 0 || 680 ioc->last_waited == last_waited)) { 681 /* 682 * we want to go through our batch of 683 * requests and stop. So, we copy out 684 * the ioc->last_waited time and test 685 * against it before looping 686 */ 687 last_waited = ioc->last_waited; 688 cond_resched(); 689 continue; 690 } 691 spin_lock(&device->io_lock); 692 requeue_list(pending_bios, pending, tail); 693 device->running_pending = 1; 694 695 spin_unlock(&device->io_lock); 696 btrfs_queue_work(fs_info->submit_workers, 697 &device->work); 698 goto done; 699 } 700 } 701 702 cond_resched(); 703 if (again) 704 goto loop; 705 706 spin_lock(&device->io_lock); 707 if (device->pending_bios.head || device->pending_sync_bios.head) 708 goto loop_lock; 709 spin_unlock(&device->io_lock); 710 711 done: 712 blk_finish_plug(&plug); 713 } 714 715 static void pending_bios_fn(struct btrfs_work *work) 716 { 717 struct btrfs_device *device; 718 719 device = container_of(work, struct btrfs_device, work); 720 run_scheduled_bios(device); 721 } 722 723 static bool device_path_matched(const char *path, struct btrfs_device *device) 724 { 725 int found; 726 727 rcu_read_lock(); 728 found = strcmp(rcu_str_deref(device->name), path); 729 rcu_read_unlock(); 730 731 return found == 0; 732 } 733 734 /* 735 * Search and remove all stale (devices which are not mounted) devices. 736 * When both inputs are NULL, it will search and release all stale devices. 737 * path: Optional. When provided will it release all unmounted devices 738 * matching this path only. 739 * skip_dev: Optional. Will skip this device when searching for the stale 740 * devices. 741 * Return: 0 for success or if @path is NULL. 742 * -EBUSY if @path is a mounted device. 743 * -ENOENT if @path does not match any device in the list. 744 */ 745 static int btrfs_free_stale_devices(const char *path, 746 struct btrfs_device *skip_device) 747 { 748 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 749 struct btrfs_device *device, *tmp_device; 750 int ret = 0; 751 752 if (path) 753 ret = -ENOENT; 754 755 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 756 757 mutex_lock(&fs_devices->device_list_mutex); 758 list_for_each_entry_safe(device, tmp_device, 759 &fs_devices->devices, dev_list) { 760 if (skip_device && skip_device == device) 761 continue; 762 if (path && !device->name) 763 continue; 764 if (path && !device_path_matched(path, device)) 765 continue; 766 if (fs_devices->opened) { 767 /* for an already deleted device return 0 */ 768 if (path && ret != 0) 769 ret = -EBUSY; 770 break; 771 } 772 773 /* delete the stale device */ 774 fs_devices->num_devices--; 775 list_del(&device->dev_list); 776 btrfs_free_device(device); 777 778 ret = 0; 779 if (fs_devices->num_devices == 0) 780 break; 781 } 782 mutex_unlock(&fs_devices->device_list_mutex); 783 784 if (fs_devices->num_devices == 0) { 785 btrfs_sysfs_remove_fsid(fs_devices); 786 list_del(&fs_devices->fs_list); 787 free_fs_devices(fs_devices); 788 } 789 } 790 791 return ret; 792 } 793 794 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 795 struct btrfs_device *device, fmode_t flags, 796 void *holder) 797 { 798 struct request_queue *q; 799 struct block_device *bdev; 800 struct buffer_head *bh; 801 struct btrfs_super_block *disk_super; 802 u64 devid; 803 int ret; 804 805 if (device->bdev) 806 return -EINVAL; 807 if (!device->name) 808 return -EINVAL; 809 810 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 811 &bdev, &bh); 812 if (ret) 813 return ret; 814 815 disk_super = (struct btrfs_super_block *)bh->b_data; 816 devid = btrfs_stack_device_id(&disk_super->dev_item); 817 if (devid != device->devid) 818 goto error_brelse; 819 820 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 821 goto error_brelse; 822 823 device->generation = btrfs_super_generation(disk_super); 824 825 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 826 if (btrfs_super_incompat_flags(disk_super) & 827 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 828 pr_err( 829 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 830 goto error_brelse; 831 } 832 833 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 834 fs_devices->seeding = 1; 835 } else { 836 if (bdev_read_only(bdev)) 837 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 838 else 839 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 840 } 841 842 q = bdev_get_queue(bdev); 843 if (!blk_queue_nonrot(q)) 844 fs_devices->rotating = 1; 845 846 device->bdev = bdev; 847 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 848 device->mode = flags; 849 850 fs_devices->open_devices++; 851 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 852 device->devid != BTRFS_DEV_REPLACE_DEVID) { 853 fs_devices->rw_devices++; 854 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 855 } 856 brelse(bh); 857 858 return 0; 859 860 error_brelse: 861 brelse(bh); 862 blkdev_put(bdev, flags); 863 864 return -EINVAL; 865 } 866 867 /* 868 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 869 * being created with a disk that has already completed its fsid change. 870 */ 871 static struct btrfs_fs_devices *find_fsid_inprogress( 872 struct btrfs_super_block *disk_super) 873 { 874 struct btrfs_fs_devices *fs_devices; 875 876 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 877 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 878 BTRFS_FSID_SIZE) != 0 && 879 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 880 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 881 return fs_devices; 882 } 883 } 884 885 return NULL; 886 } 887 888 889 static struct btrfs_fs_devices *find_fsid_changed( 890 struct btrfs_super_block *disk_super) 891 { 892 struct btrfs_fs_devices *fs_devices; 893 894 /* 895 * Handles the case where scanned device is part of an fs that had 896 * multiple successful changes of FSID but curently device didn't 897 * observe it. Meaning our fsid will be different than theirs. 898 */ 899 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 900 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 901 BTRFS_FSID_SIZE) != 0 && 902 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 903 BTRFS_FSID_SIZE) == 0 && 904 memcmp(fs_devices->fsid, disk_super->fsid, 905 BTRFS_FSID_SIZE) != 0) { 906 return fs_devices; 907 } 908 } 909 910 return NULL; 911 } 912 /* 913 * Add new device to list of registered devices 914 * 915 * Returns: 916 * device pointer which was just added or updated when successful 917 * error pointer when failed 918 */ 919 static noinline struct btrfs_device *device_list_add(const char *path, 920 struct btrfs_super_block *disk_super, 921 bool *new_device_added) 922 { 923 struct btrfs_device *device; 924 struct btrfs_fs_devices *fs_devices = NULL; 925 struct rcu_string *name; 926 u64 found_transid = btrfs_super_generation(disk_super); 927 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 928 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 929 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 930 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 931 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 932 933 if (fsid_change_in_progress) { 934 if (!has_metadata_uuid) { 935 /* 936 * When we have an image which has CHANGING_FSID_V2 set 937 * it might belong to either a filesystem which has 938 * disks with completed fsid change or it might belong 939 * to fs with no UUID changes in effect, handle both. 940 */ 941 fs_devices = find_fsid_inprogress(disk_super); 942 if (!fs_devices) 943 fs_devices = find_fsid(disk_super->fsid, NULL); 944 } else { 945 fs_devices = find_fsid_changed(disk_super); 946 } 947 } else if (has_metadata_uuid) { 948 fs_devices = find_fsid(disk_super->fsid, 949 disk_super->metadata_uuid); 950 } else { 951 fs_devices = find_fsid(disk_super->fsid, NULL); 952 } 953 954 955 if (!fs_devices) { 956 if (has_metadata_uuid) 957 fs_devices = alloc_fs_devices(disk_super->fsid, 958 disk_super->metadata_uuid); 959 else 960 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 961 962 if (IS_ERR(fs_devices)) 963 return ERR_CAST(fs_devices); 964 965 fs_devices->fsid_change = fsid_change_in_progress; 966 967 mutex_lock(&fs_devices->device_list_mutex); 968 list_add(&fs_devices->fs_list, &fs_uuids); 969 970 device = NULL; 971 } else { 972 mutex_lock(&fs_devices->device_list_mutex); 973 device = btrfs_find_device(fs_devices, devid, 974 disk_super->dev_item.uuid, NULL, false); 975 976 /* 977 * If this disk has been pulled into an fs devices created by 978 * a device which had the CHANGING_FSID_V2 flag then replace the 979 * metadata_uuid/fsid values of the fs_devices. 980 */ 981 if (has_metadata_uuid && fs_devices->fsid_change && 982 found_transid > fs_devices->latest_generation) { 983 memcpy(fs_devices->fsid, disk_super->fsid, 984 BTRFS_FSID_SIZE); 985 memcpy(fs_devices->metadata_uuid, 986 disk_super->metadata_uuid, BTRFS_FSID_SIZE); 987 988 fs_devices->fsid_change = false; 989 } 990 } 991 992 if (!device) { 993 if (fs_devices->opened) { 994 mutex_unlock(&fs_devices->device_list_mutex); 995 return ERR_PTR(-EBUSY); 996 } 997 998 device = btrfs_alloc_device(NULL, &devid, 999 disk_super->dev_item.uuid); 1000 if (IS_ERR(device)) { 1001 mutex_unlock(&fs_devices->device_list_mutex); 1002 /* we can safely leave the fs_devices entry around */ 1003 return device; 1004 } 1005 1006 name = rcu_string_strdup(path, GFP_NOFS); 1007 if (!name) { 1008 btrfs_free_device(device); 1009 mutex_unlock(&fs_devices->device_list_mutex); 1010 return ERR_PTR(-ENOMEM); 1011 } 1012 rcu_assign_pointer(device->name, name); 1013 1014 list_add_rcu(&device->dev_list, &fs_devices->devices); 1015 fs_devices->num_devices++; 1016 1017 device->fs_devices = fs_devices; 1018 *new_device_added = true; 1019 1020 if (disk_super->label[0]) 1021 pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", 1022 disk_super->label, devid, found_transid, path); 1023 else 1024 pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", 1025 disk_super->fsid, devid, found_transid, path); 1026 1027 } else if (!device->name || strcmp(device->name->str, path)) { 1028 /* 1029 * When FS is already mounted. 1030 * 1. If you are here and if the device->name is NULL that 1031 * means this device was missing at time of FS mount. 1032 * 2. If you are here and if the device->name is different 1033 * from 'path' that means either 1034 * a. The same device disappeared and reappeared with 1035 * different name. or 1036 * b. The missing-disk-which-was-replaced, has 1037 * reappeared now. 1038 * 1039 * We must allow 1 and 2a above. But 2b would be a spurious 1040 * and unintentional. 1041 * 1042 * Further in case of 1 and 2a above, the disk at 'path' 1043 * would have missed some transaction when it was away and 1044 * in case of 2a the stale bdev has to be updated as well. 1045 * 2b must not be allowed at all time. 1046 */ 1047 1048 /* 1049 * For now, we do allow update to btrfs_fs_device through the 1050 * btrfs dev scan cli after FS has been mounted. We're still 1051 * tracking a problem where systems fail mount by subvolume id 1052 * when we reject replacement on a mounted FS. 1053 */ 1054 if (!fs_devices->opened && found_transid < device->generation) { 1055 /* 1056 * That is if the FS is _not_ mounted and if you 1057 * are here, that means there is more than one 1058 * disk with same uuid and devid.We keep the one 1059 * with larger generation number or the last-in if 1060 * generation are equal. 1061 */ 1062 mutex_unlock(&fs_devices->device_list_mutex); 1063 return ERR_PTR(-EEXIST); 1064 } 1065 1066 /* 1067 * We are going to replace the device path for a given devid, 1068 * make sure it's the same device if the device is mounted 1069 */ 1070 if (device->bdev) { 1071 struct block_device *path_bdev; 1072 1073 path_bdev = lookup_bdev(path); 1074 if (IS_ERR(path_bdev)) { 1075 mutex_unlock(&fs_devices->device_list_mutex); 1076 return ERR_CAST(path_bdev); 1077 } 1078 1079 if (device->bdev != path_bdev) { 1080 bdput(path_bdev); 1081 mutex_unlock(&fs_devices->device_list_mutex); 1082 btrfs_warn_in_rcu(device->fs_info, 1083 "duplicate device fsid:devid for %pU:%llu old:%s new:%s", 1084 disk_super->fsid, devid, 1085 rcu_str_deref(device->name), path); 1086 return ERR_PTR(-EEXIST); 1087 } 1088 bdput(path_bdev); 1089 btrfs_info_in_rcu(device->fs_info, 1090 "device fsid %pU devid %llu moved old:%s new:%s", 1091 disk_super->fsid, devid, 1092 rcu_str_deref(device->name), path); 1093 } 1094 1095 name = rcu_string_strdup(path, GFP_NOFS); 1096 if (!name) { 1097 mutex_unlock(&fs_devices->device_list_mutex); 1098 return ERR_PTR(-ENOMEM); 1099 } 1100 rcu_string_free(device->name); 1101 rcu_assign_pointer(device->name, name); 1102 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1103 fs_devices->missing_devices--; 1104 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1105 } 1106 } 1107 1108 /* 1109 * Unmount does not free the btrfs_device struct but would zero 1110 * generation along with most of the other members. So just update 1111 * it back. We need it to pick the disk with largest generation 1112 * (as above). 1113 */ 1114 if (!fs_devices->opened) { 1115 device->generation = found_transid; 1116 fs_devices->latest_generation = max_t(u64, found_transid, 1117 fs_devices->latest_generation); 1118 } 1119 1120 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 1121 1122 mutex_unlock(&fs_devices->device_list_mutex); 1123 return device; 1124 } 1125 1126 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 1127 { 1128 struct btrfs_fs_devices *fs_devices; 1129 struct btrfs_device *device; 1130 struct btrfs_device *orig_dev; 1131 1132 fs_devices = alloc_fs_devices(orig->fsid, NULL); 1133 if (IS_ERR(fs_devices)) 1134 return fs_devices; 1135 1136 mutex_lock(&orig->device_list_mutex); 1137 fs_devices->total_devices = orig->total_devices; 1138 1139 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1140 struct rcu_string *name; 1141 1142 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1143 orig_dev->uuid); 1144 if (IS_ERR(device)) 1145 goto error; 1146 1147 /* 1148 * This is ok to do without rcu read locked because we hold the 1149 * uuid mutex so nothing we touch in here is going to disappear. 1150 */ 1151 if (orig_dev->name) { 1152 name = rcu_string_strdup(orig_dev->name->str, 1153 GFP_KERNEL); 1154 if (!name) { 1155 btrfs_free_device(device); 1156 goto error; 1157 } 1158 rcu_assign_pointer(device->name, name); 1159 } 1160 1161 list_add(&device->dev_list, &fs_devices->devices); 1162 device->fs_devices = fs_devices; 1163 fs_devices->num_devices++; 1164 } 1165 mutex_unlock(&orig->device_list_mutex); 1166 return fs_devices; 1167 error: 1168 mutex_unlock(&orig->device_list_mutex); 1169 free_fs_devices(fs_devices); 1170 return ERR_PTR(-ENOMEM); 1171 } 1172 1173 /* 1174 * After we have read the system tree and know devids belonging to 1175 * this filesystem, remove the device which does not belong there. 1176 */ 1177 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step) 1178 { 1179 struct btrfs_device *device, *next; 1180 struct btrfs_device *latest_dev = NULL; 1181 1182 mutex_lock(&uuid_mutex); 1183 again: 1184 /* This is the initialized path, it is safe to release the devices. */ 1185 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1186 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 1187 &device->dev_state)) { 1188 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1189 &device->dev_state) && 1190 (!latest_dev || 1191 device->generation > latest_dev->generation)) { 1192 latest_dev = device; 1193 } 1194 continue; 1195 } 1196 1197 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 1198 /* 1199 * In the first step, keep the device which has 1200 * the correct fsid and the devid that is used 1201 * for the dev_replace procedure. 1202 * In the second step, the dev_replace state is 1203 * read from the device tree and it is known 1204 * whether the procedure is really active or 1205 * not, which means whether this device is 1206 * used or whether it should be removed. 1207 */ 1208 if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1209 &device->dev_state)) { 1210 continue; 1211 } 1212 } 1213 if (device->bdev) { 1214 blkdev_put(device->bdev, device->mode); 1215 device->bdev = NULL; 1216 fs_devices->open_devices--; 1217 } 1218 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1219 list_del_init(&device->dev_alloc_list); 1220 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1221 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1222 &device->dev_state)) 1223 fs_devices->rw_devices--; 1224 } 1225 list_del_init(&device->dev_list); 1226 fs_devices->num_devices--; 1227 btrfs_free_device(device); 1228 } 1229 1230 if (fs_devices->seed) { 1231 fs_devices = fs_devices->seed; 1232 goto again; 1233 } 1234 1235 fs_devices->latest_bdev = latest_dev->bdev; 1236 1237 mutex_unlock(&uuid_mutex); 1238 } 1239 1240 static void btrfs_close_bdev(struct btrfs_device *device) 1241 { 1242 if (!device->bdev) 1243 return; 1244 1245 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1246 sync_blockdev(device->bdev); 1247 invalidate_bdev(device->bdev); 1248 } 1249 1250 blkdev_put(device->bdev, device->mode); 1251 } 1252 1253 static void btrfs_close_one_device(struct btrfs_device *device) 1254 { 1255 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1256 struct btrfs_device *new_device; 1257 struct rcu_string *name; 1258 1259 if (device->bdev) 1260 fs_devices->open_devices--; 1261 1262 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1263 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1264 list_del_init(&device->dev_alloc_list); 1265 fs_devices->rw_devices--; 1266 } 1267 1268 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1269 fs_devices->missing_devices--; 1270 1271 btrfs_close_bdev(device); 1272 1273 new_device = btrfs_alloc_device(NULL, &device->devid, 1274 device->uuid); 1275 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 1276 1277 /* Safe because we are under uuid_mutex */ 1278 if (device->name) { 1279 name = rcu_string_strdup(device->name->str, GFP_NOFS); 1280 BUG_ON(!name); /* -ENOMEM */ 1281 rcu_assign_pointer(new_device->name, name); 1282 } 1283 1284 list_replace_rcu(&device->dev_list, &new_device->dev_list); 1285 new_device->fs_devices = device->fs_devices; 1286 1287 synchronize_rcu(); 1288 btrfs_free_device(device); 1289 } 1290 1291 static int close_fs_devices(struct btrfs_fs_devices *fs_devices) 1292 { 1293 struct btrfs_device *device, *tmp; 1294 1295 if (--fs_devices->opened > 0) 1296 return 0; 1297 1298 mutex_lock(&fs_devices->device_list_mutex); 1299 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 1300 btrfs_close_one_device(device); 1301 } 1302 mutex_unlock(&fs_devices->device_list_mutex); 1303 1304 WARN_ON(fs_devices->open_devices); 1305 WARN_ON(fs_devices->rw_devices); 1306 fs_devices->opened = 0; 1307 fs_devices->seeding = 0; 1308 1309 return 0; 1310 } 1311 1312 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1313 { 1314 struct btrfs_fs_devices *seed_devices = NULL; 1315 int ret; 1316 1317 mutex_lock(&uuid_mutex); 1318 ret = close_fs_devices(fs_devices); 1319 if (!fs_devices->opened) { 1320 seed_devices = fs_devices->seed; 1321 fs_devices->seed = NULL; 1322 } 1323 mutex_unlock(&uuid_mutex); 1324 1325 while (seed_devices) { 1326 fs_devices = seed_devices; 1327 seed_devices = fs_devices->seed; 1328 close_fs_devices(fs_devices); 1329 free_fs_devices(fs_devices); 1330 } 1331 return ret; 1332 } 1333 1334 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1335 fmode_t flags, void *holder) 1336 { 1337 struct btrfs_device *device; 1338 struct btrfs_device *latest_dev = NULL; 1339 int ret = 0; 1340 1341 flags |= FMODE_EXCL; 1342 1343 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1344 /* Just open everything we can; ignore failures here */ 1345 if (btrfs_open_one_device(fs_devices, device, flags, holder)) 1346 continue; 1347 1348 if (!latest_dev || 1349 device->generation > latest_dev->generation) 1350 latest_dev = device; 1351 } 1352 if (fs_devices->open_devices == 0) { 1353 ret = -EINVAL; 1354 goto out; 1355 } 1356 fs_devices->opened = 1; 1357 fs_devices->latest_bdev = latest_dev->bdev; 1358 fs_devices->total_rw_bytes = 0; 1359 out: 1360 return ret; 1361 } 1362 1363 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) 1364 { 1365 struct btrfs_device *dev1, *dev2; 1366 1367 dev1 = list_entry(a, struct btrfs_device, dev_list); 1368 dev2 = list_entry(b, struct btrfs_device, dev_list); 1369 1370 if (dev1->devid < dev2->devid) 1371 return -1; 1372 else if (dev1->devid > dev2->devid) 1373 return 1; 1374 return 0; 1375 } 1376 1377 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1378 fmode_t flags, void *holder) 1379 { 1380 int ret; 1381 1382 lockdep_assert_held(&uuid_mutex); 1383 1384 mutex_lock(&fs_devices->device_list_mutex); 1385 if (fs_devices->opened) { 1386 fs_devices->opened++; 1387 ret = 0; 1388 } else { 1389 list_sort(NULL, &fs_devices->devices, devid_cmp); 1390 ret = open_fs_devices(fs_devices, flags, holder); 1391 } 1392 mutex_unlock(&fs_devices->device_list_mutex); 1393 1394 return ret; 1395 } 1396 1397 static void btrfs_release_disk_super(struct page *page) 1398 { 1399 kunmap(page); 1400 put_page(page); 1401 } 1402 1403 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, 1404 struct page **page, 1405 struct btrfs_super_block **disk_super) 1406 { 1407 void *p; 1408 pgoff_t index; 1409 1410 /* make sure our super fits in the device */ 1411 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1412 return 1; 1413 1414 /* make sure our super fits in the page */ 1415 if (sizeof(**disk_super) > PAGE_SIZE) 1416 return 1; 1417 1418 /* make sure our super doesn't straddle pages on disk */ 1419 index = bytenr >> PAGE_SHIFT; 1420 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) 1421 return 1; 1422 1423 /* pull in the page with our super */ 1424 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 1425 index, GFP_KERNEL); 1426 1427 if (IS_ERR_OR_NULL(*page)) 1428 return 1; 1429 1430 p = kmap(*page); 1431 1432 /* align our pointer to the offset of the super block */ 1433 *disk_super = p + offset_in_page(bytenr); 1434 1435 if (btrfs_super_bytenr(*disk_super) != bytenr || 1436 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { 1437 btrfs_release_disk_super(*page); 1438 return 1; 1439 } 1440 1441 if ((*disk_super)->label[0] && 1442 (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) 1443 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1444 1445 return 0; 1446 } 1447 1448 int btrfs_forget_devices(const char *path) 1449 { 1450 int ret; 1451 1452 mutex_lock(&uuid_mutex); 1453 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL); 1454 mutex_unlock(&uuid_mutex); 1455 1456 return ret; 1457 } 1458 1459 /* 1460 * Look for a btrfs signature on a device. This may be called out of the mount path 1461 * and we are not allowed to call set_blocksize during the scan. The superblock 1462 * is read via pagecache 1463 */ 1464 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1465 void *holder) 1466 { 1467 struct btrfs_super_block *disk_super; 1468 bool new_device_added = false; 1469 struct btrfs_device *device = NULL; 1470 struct block_device *bdev; 1471 struct page *page; 1472 u64 bytenr; 1473 1474 lockdep_assert_held(&uuid_mutex); 1475 1476 /* 1477 * we would like to check all the supers, but that would make 1478 * a btrfs mount succeed after a mkfs from a different FS. 1479 * So, we need to add a special mount option to scan for 1480 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1481 */ 1482 bytenr = btrfs_sb_offset(0); 1483 flags |= FMODE_EXCL; 1484 1485 bdev = blkdev_get_by_path(path, flags, holder); 1486 if (IS_ERR(bdev)) 1487 return ERR_CAST(bdev); 1488 1489 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { 1490 device = ERR_PTR(-EINVAL); 1491 goto error_bdev_put; 1492 } 1493 1494 device = device_list_add(path, disk_super, &new_device_added); 1495 if (!IS_ERR(device)) { 1496 if (new_device_added) 1497 btrfs_free_stale_devices(path, device); 1498 } 1499 1500 btrfs_release_disk_super(page); 1501 1502 error_bdev_put: 1503 blkdev_put(bdev, flags); 1504 1505 return device; 1506 } 1507 1508 /* 1509 * Try to find a chunk that intersects [start, start + len] range and when one 1510 * such is found, record the end of it in *start 1511 */ 1512 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1513 u64 len) 1514 { 1515 u64 physical_start, physical_end; 1516 1517 lockdep_assert_held(&device->fs_info->chunk_mutex); 1518 1519 if (!find_first_extent_bit(&device->alloc_state, *start, 1520 &physical_start, &physical_end, 1521 CHUNK_ALLOCATED, NULL)) { 1522 1523 if (in_range(physical_start, *start, len) || 1524 in_range(*start, physical_start, 1525 physical_end - physical_start)) { 1526 *start = physical_end + 1; 1527 return true; 1528 } 1529 } 1530 return false; 1531 } 1532 1533 1534 /* 1535 * find_free_dev_extent_start - find free space in the specified device 1536 * @device: the device which we search the free space in 1537 * @num_bytes: the size of the free space that we need 1538 * @search_start: the position from which to begin the search 1539 * @start: store the start of the free space. 1540 * @len: the size of the free space. that we find, or the size 1541 * of the max free space if we don't find suitable free space 1542 * 1543 * this uses a pretty simple search, the expectation is that it is 1544 * called very infrequently and that a given device has a small number 1545 * of extents 1546 * 1547 * @start is used to store the start of the free space if we find. But if we 1548 * don't find suitable free space, it will be used to store the start position 1549 * of the max free space. 1550 * 1551 * @len is used to store the size of the free space that we find. 1552 * But if we don't find suitable free space, it is used to store the size of 1553 * the max free space. 1554 */ 1555 int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes, 1556 u64 search_start, u64 *start, u64 *len) 1557 { 1558 struct btrfs_fs_info *fs_info = device->fs_info; 1559 struct btrfs_root *root = fs_info->dev_root; 1560 struct btrfs_key key; 1561 struct btrfs_dev_extent *dev_extent; 1562 struct btrfs_path *path; 1563 u64 hole_size; 1564 u64 max_hole_start; 1565 u64 max_hole_size; 1566 u64 extent_end; 1567 u64 search_end = device->total_bytes; 1568 int ret; 1569 int slot; 1570 struct extent_buffer *l; 1571 1572 /* 1573 * We don't want to overwrite the superblock on the drive nor any area 1574 * used by the boot loader (grub for example), so we make sure to start 1575 * at an offset of at least 1MB. 1576 */ 1577 search_start = max_t(u64, search_start, SZ_1M); 1578 1579 path = btrfs_alloc_path(); 1580 if (!path) 1581 return -ENOMEM; 1582 1583 max_hole_start = search_start; 1584 max_hole_size = 0; 1585 1586 again: 1587 if (search_start >= search_end || 1588 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1589 ret = -ENOSPC; 1590 goto out; 1591 } 1592 1593 path->reada = READA_FORWARD; 1594 path->search_commit_root = 1; 1595 path->skip_locking = 1; 1596 1597 key.objectid = device->devid; 1598 key.offset = search_start; 1599 key.type = BTRFS_DEV_EXTENT_KEY; 1600 1601 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1602 if (ret < 0) 1603 goto out; 1604 if (ret > 0) { 1605 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1606 if (ret < 0) 1607 goto out; 1608 } 1609 1610 while (1) { 1611 l = path->nodes[0]; 1612 slot = path->slots[0]; 1613 if (slot >= btrfs_header_nritems(l)) { 1614 ret = btrfs_next_leaf(root, path); 1615 if (ret == 0) 1616 continue; 1617 if (ret < 0) 1618 goto out; 1619 1620 break; 1621 } 1622 btrfs_item_key_to_cpu(l, &key, slot); 1623 1624 if (key.objectid < device->devid) 1625 goto next; 1626 1627 if (key.objectid > device->devid) 1628 break; 1629 1630 if (key.type != BTRFS_DEV_EXTENT_KEY) 1631 goto next; 1632 1633 if (key.offset > search_start) { 1634 hole_size = key.offset - search_start; 1635 1636 /* 1637 * Have to check before we set max_hole_start, otherwise 1638 * we could end up sending back this offset anyway. 1639 */ 1640 if (contains_pending_extent(device, &search_start, 1641 hole_size)) { 1642 if (key.offset >= search_start) 1643 hole_size = key.offset - search_start; 1644 else 1645 hole_size = 0; 1646 } 1647 1648 if (hole_size > max_hole_size) { 1649 max_hole_start = search_start; 1650 max_hole_size = hole_size; 1651 } 1652 1653 /* 1654 * If this free space is greater than which we need, 1655 * it must be the max free space that we have found 1656 * until now, so max_hole_start must point to the start 1657 * of this free space and the length of this free space 1658 * is stored in max_hole_size. Thus, we return 1659 * max_hole_start and max_hole_size and go back to the 1660 * caller. 1661 */ 1662 if (hole_size >= num_bytes) { 1663 ret = 0; 1664 goto out; 1665 } 1666 } 1667 1668 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1669 extent_end = key.offset + btrfs_dev_extent_length(l, 1670 dev_extent); 1671 if (extent_end > search_start) 1672 search_start = extent_end; 1673 next: 1674 path->slots[0]++; 1675 cond_resched(); 1676 } 1677 1678 /* 1679 * At this point, search_start should be the end of 1680 * allocated dev extents, and when shrinking the device, 1681 * search_end may be smaller than search_start. 1682 */ 1683 if (search_end > search_start) { 1684 hole_size = search_end - search_start; 1685 1686 if (contains_pending_extent(device, &search_start, hole_size)) { 1687 btrfs_release_path(path); 1688 goto again; 1689 } 1690 1691 if (hole_size > max_hole_size) { 1692 max_hole_start = search_start; 1693 max_hole_size = hole_size; 1694 } 1695 } 1696 1697 /* See above. */ 1698 if (max_hole_size < num_bytes) 1699 ret = -ENOSPC; 1700 else 1701 ret = 0; 1702 1703 out: 1704 btrfs_free_path(path); 1705 *start = max_hole_start; 1706 if (len) 1707 *len = max_hole_size; 1708 return ret; 1709 } 1710 1711 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1712 u64 *start, u64 *len) 1713 { 1714 /* FIXME use last free of some kind */ 1715 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1716 } 1717 1718 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1719 struct btrfs_device *device, 1720 u64 start, u64 *dev_extent_len) 1721 { 1722 struct btrfs_fs_info *fs_info = device->fs_info; 1723 struct btrfs_root *root = fs_info->dev_root; 1724 int ret; 1725 struct btrfs_path *path; 1726 struct btrfs_key key; 1727 struct btrfs_key found_key; 1728 struct extent_buffer *leaf = NULL; 1729 struct btrfs_dev_extent *extent = NULL; 1730 1731 path = btrfs_alloc_path(); 1732 if (!path) 1733 return -ENOMEM; 1734 1735 key.objectid = device->devid; 1736 key.offset = start; 1737 key.type = BTRFS_DEV_EXTENT_KEY; 1738 again: 1739 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1740 if (ret > 0) { 1741 ret = btrfs_previous_item(root, path, key.objectid, 1742 BTRFS_DEV_EXTENT_KEY); 1743 if (ret) 1744 goto out; 1745 leaf = path->nodes[0]; 1746 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1747 extent = btrfs_item_ptr(leaf, path->slots[0], 1748 struct btrfs_dev_extent); 1749 BUG_ON(found_key.offset > start || found_key.offset + 1750 btrfs_dev_extent_length(leaf, extent) < start); 1751 key = found_key; 1752 btrfs_release_path(path); 1753 goto again; 1754 } else if (ret == 0) { 1755 leaf = path->nodes[0]; 1756 extent = btrfs_item_ptr(leaf, path->slots[0], 1757 struct btrfs_dev_extent); 1758 } else { 1759 btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); 1760 goto out; 1761 } 1762 1763 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1764 1765 ret = btrfs_del_item(trans, root, path); 1766 if (ret) { 1767 btrfs_handle_fs_error(fs_info, ret, 1768 "Failed to remove dev extent item"); 1769 } else { 1770 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1771 } 1772 out: 1773 btrfs_free_path(path); 1774 return ret; 1775 } 1776 1777 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1778 struct btrfs_device *device, 1779 u64 chunk_offset, u64 start, u64 num_bytes) 1780 { 1781 int ret; 1782 struct btrfs_path *path; 1783 struct btrfs_fs_info *fs_info = device->fs_info; 1784 struct btrfs_root *root = fs_info->dev_root; 1785 struct btrfs_dev_extent *extent; 1786 struct extent_buffer *leaf; 1787 struct btrfs_key key; 1788 1789 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 1790 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1791 path = btrfs_alloc_path(); 1792 if (!path) 1793 return -ENOMEM; 1794 1795 key.objectid = device->devid; 1796 key.offset = start; 1797 key.type = BTRFS_DEV_EXTENT_KEY; 1798 ret = btrfs_insert_empty_item(trans, root, path, &key, 1799 sizeof(*extent)); 1800 if (ret) 1801 goto out; 1802 1803 leaf = path->nodes[0]; 1804 extent = btrfs_item_ptr(leaf, path->slots[0], 1805 struct btrfs_dev_extent); 1806 btrfs_set_dev_extent_chunk_tree(leaf, extent, 1807 BTRFS_CHUNK_TREE_OBJECTID); 1808 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 1809 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 1810 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1811 1812 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1813 btrfs_mark_buffer_dirty(leaf); 1814 out: 1815 btrfs_free_path(path); 1816 return ret; 1817 } 1818 1819 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1820 { 1821 struct extent_map_tree *em_tree; 1822 struct extent_map *em; 1823 struct rb_node *n; 1824 u64 ret = 0; 1825 1826 em_tree = &fs_info->mapping_tree; 1827 read_lock(&em_tree->lock); 1828 n = rb_last(&em_tree->map.rb_root); 1829 if (n) { 1830 em = rb_entry(n, struct extent_map, rb_node); 1831 ret = em->start + em->len; 1832 } 1833 read_unlock(&em_tree->lock); 1834 1835 return ret; 1836 } 1837 1838 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1839 u64 *devid_ret) 1840 { 1841 int ret; 1842 struct btrfs_key key; 1843 struct btrfs_key found_key; 1844 struct btrfs_path *path; 1845 1846 path = btrfs_alloc_path(); 1847 if (!path) 1848 return -ENOMEM; 1849 1850 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1851 key.type = BTRFS_DEV_ITEM_KEY; 1852 key.offset = (u64)-1; 1853 1854 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1855 if (ret < 0) 1856 goto error; 1857 1858 BUG_ON(ret == 0); /* Corruption */ 1859 1860 ret = btrfs_previous_item(fs_info->chunk_root, path, 1861 BTRFS_DEV_ITEMS_OBJECTID, 1862 BTRFS_DEV_ITEM_KEY); 1863 if (ret) { 1864 *devid_ret = 1; 1865 } else { 1866 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1867 path->slots[0]); 1868 *devid_ret = found_key.offset + 1; 1869 } 1870 ret = 0; 1871 error: 1872 btrfs_free_path(path); 1873 return ret; 1874 } 1875 1876 /* 1877 * the device information is stored in the chunk root 1878 * the btrfs_device struct should be fully filled in 1879 */ 1880 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1881 struct btrfs_device *device) 1882 { 1883 int ret; 1884 struct btrfs_path *path; 1885 struct btrfs_dev_item *dev_item; 1886 struct extent_buffer *leaf; 1887 struct btrfs_key key; 1888 unsigned long ptr; 1889 1890 path = btrfs_alloc_path(); 1891 if (!path) 1892 return -ENOMEM; 1893 1894 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1895 key.type = BTRFS_DEV_ITEM_KEY; 1896 key.offset = device->devid; 1897 1898 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1899 &key, sizeof(*dev_item)); 1900 if (ret) 1901 goto out; 1902 1903 leaf = path->nodes[0]; 1904 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1905 1906 btrfs_set_device_id(leaf, dev_item, device->devid); 1907 btrfs_set_device_generation(leaf, dev_item, 0); 1908 btrfs_set_device_type(leaf, dev_item, device->type); 1909 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1910 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1911 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1912 btrfs_set_device_total_bytes(leaf, dev_item, 1913 btrfs_device_get_disk_total_bytes(device)); 1914 btrfs_set_device_bytes_used(leaf, dev_item, 1915 btrfs_device_get_bytes_used(device)); 1916 btrfs_set_device_group(leaf, dev_item, 0); 1917 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1918 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1919 btrfs_set_device_start_offset(leaf, dev_item, 0); 1920 1921 ptr = btrfs_device_uuid(dev_item); 1922 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1923 ptr = btrfs_device_fsid(dev_item); 1924 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1925 ptr, BTRFS_FSID_SIZE); 1926 btrfs_mark_buffer_dirty(leaf); 1927 1928 ret = 0; 1929 out: 1930 btrfs_free_path(path); 1931 return ret; 1932 } 1933 1934 /* 1935 * Function to update ctime/mtime for a given device path. 1936 * Mainly used for ctime/mtime based probe like libblkid. 1937 */ 1938 static void update_dev_time(const char *path_name) 1939 { 1940 struct file *filp; 1941 1942 filp = filp_open(path_name, O_RDWR, 0); 1943 if (IS_ERR(filp)) 1944 return; 1945 file_update_time(filp); 1946 filp_close(filp, NULL); 1947 } 1948 1949 static int btrfs_rm_dev_item(struct btrfs_device *device) 1950 { 1951 struct btrfs_root *root = device->fs_info->chunk_root; 1952 int ret; 1953 struct btrfs_path *path; 1954 struct btrfs_key key; 1955 struct btrfs_trans_handle *trans; 1956 1957 path = btrfs_alloc_path(); 1958 if (!path) 1959 return -ENOMEM; 1960 1961 trans = btrfs_start_transaction(root, 0); 1962 if (IS_ERR(trans)) { 1963 btrfs_free_path(path); 1964 return PTR_ERR(trans); 1965 } 1966 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1967 key.type = BTRFS_DEV_ITEM_KEY; 1968 key.offset = device->devid; 1969 1970 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1971 if (ret) { 1972 if (ret > 0) 1973 ret = -ENOENT; 1974 btrfs_abort_transaction(trans, ret); 1975 btrfs_end_transaction(trans); 1976 goto out; 1977 } 1978 1979 ret = btrfs_del_item(trans, root, path); 1980 if (ret) { 1981 btrfs_abort_transaction(trans, ret); 1982 btrfs_end_transaction(trans); 1983 } 1984 1985 out: 1986 btrfs_free_path(path); 1987 if (!ret) 1988 ret = btrfs_commit_transaction(trans); 1989 return ret; 1990 } 1991 1992 /* 1993 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1994 * filesystem. It's up to the caller to adjust that number regarding eg. device 1995 * replace. 1996 */ 1997 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1998 u64 num_devices) 1999 { 2000 u64 all_avail; 2001 unsigned seq; 2002 int i; 2003 2004 do { 2005 seq = read_seqbegin(&fs_info->profiles_lock); 2006 2007 all_avail = fs_info->avail_data_alloc_bits | 2008 fs_info->avail_system_alloc_bits | 2009 fs_info->avail_metadata_alloc_bits; 2010 } while (read_seqretry(&fs_info->profiles_lock, seq)); 2011 2012 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2013 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 2014 continue; 2015 2016 if (num_devices < btrfs_raid_array[i].devs_min) { 2017 int ret = btrfs_raid_array[i].mindev_error; 2018 2019 if (ret) 2020 return ret; 2021 } 2022 } 2023 2024 return 0; 2025 } 2026 2027 static struct btrfs_device * btrfs_find_next_active_device( 2028 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 2029 { 2030 struct btrfs_device *next_device; 2031 2032 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2033 if (next_device != device && 2034 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2035 && next_device->bdev) 2036 return next_device; 2037 } 2038 2039 return NULL; 2040 } 2041 2042 /* 2043 * Helper function to check if the given device is part of s_bdev / latest_bdev 2044 * and replace it with the provided or the next active device, in the context 2045 * where this function called, there should be always be another device (or 2046 * this_dev) which is active. 2047 */ 2048 void btrfs_assign_next_active_device(struct btrfs_device *device, 2049 struct btrfs_device *this_dev) 2050 { 2051 struct btrfs_fs_info *fs_info = device->fs_info; 2052 struct btrfs_device *next_device; 2053 2054 if (this_dev) 2055 next_device = this_dev; 2056 else 2057 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2058 device); 2059 ASSERT(next_device); 2060 2061 if (fs_info->sb->s_bdev && 2062 (fs_info->sb->s_bdev == device->bdev)) 2063 fs_info->sb->s_bdev = next_device->bdev; 2064 2065 if (fs_info->fs_devices->latest_bdev == device->bdev) 2066 fs_info->fs_devices->latest_bdev = next_device->bdev; 2067 } 2068 2069 /* 2070 * Return btrfs_fs_devices::num_devices excluding the device that's being 2071 * currently replaced. 2072 */ 2073 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2074 { 2075 u64 num_devices = fs_info->fs_devices->num_devices; 2076 2077 down_read(&fs_info->dev_replace.rwsem); 2078 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2079 ASSERT(num_devices > 1); 2080 num_devices--; 2081 } 2082 up_read(&fs_info->dev_replace.rwsem); 2083 2084 return num_devices; 2085 } 2086 2087 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, 2088 u64 devid) 2089 { 2090 struct btrfs_device *device; 2091 struct btrfs_fs_devices *cur_devices; 2092 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2093 u64 num_devices; 2094 int ret = 0; 2095 2096 mutex_lock(&uuid_mutex); 2097 2098 num_devices = btrfs_num_devices(fs_info); 2099 2100 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2101 if (ret) 2102 goto out; 2103 2104 device = btrfs_find_device_by_devspec(fs_info, devid, device_path); 2105 2106 if (IS_ERR(device)) { 2107 if (PTR_ERR(device) == -ENOENT && 2108 strcmp(device_path, "missing") == 0) 2109 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2110 else 2111 ret = PTR_ERR(device); 2112 goto out; 2113 } 2114 2115 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2116 btrfs_warn_in_rcu(fs_info, 2117 "cannot remove device %s (devid %llu) due to active swapfile", 2118 rcu_str_deref(device->name), device->devid); 2119 ret = -ETXTBSY; 2120 goto out; 2121 } 2122 2123 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2124 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 2125 goto out; 2126 } 2127 2128 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2129 fs_info->fs_devices->rw_devices == 1) { 2130 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 2131 goto out; 2132 } 2133 2134 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2135 mutex_lock(&fs_info->chunk_mutex); 2136 list_del_init(&device->dev_alloc_list); 2137 device->fs_devices->rw_devices--; 2138 mutex_unlock(&fs_info->chunk_mutex); 2139 } 2140 2141 mutex_unlock(&uuid_mutex); 2142 ret = btrfs_shrink_device(device, 0); 2143 mutex_lock(&uuid_mutex); 2144 if (ret) 2145 goto error_undo; 2146 2147 /* 2148 * TODO: the superblock still includes this device in its num_devices 2149 * counter although write_all_supers() is not locked out. This 2150 * could give a filesystem state which requires a degraded mount. 2151 */ 2152 ret = btrfs_rm_dev_item(device); 2153 if (ret) 2154 goto error_undo; 2155 2156 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2157 btrfs_scrub_cancel_dev(device); 2158 2159 /* 2160 * the device list mutex makes sure that we don't change 2161 * the device list while someone else is writing out all 2162 * the device supers. Whoever is writing all supers, should 2163 * lock the device list mutex before getting the number of 2164 * devices in the super block (super_copy). Conversely, 2165 * whoever updates the number of devices in the super block 2166 * (super_copy) should hold the device list mutex. 2167 */ 2168 2169 /* 2170 * In normal cases the cur_devices == fs_devices. But in case 2171 * of deleting a seed device, the cur_devices should point to 2172 * its own fs_devices listed under the fs_devices->seed. 2173 */ 2174 cur_devices = device->fs_devices; 2175 mutex_lock(&fs_devices->device_list_mutex); 2176 list_del_rcu(&device->dev_list); 2177 2178 cur_devices->num_devices--; 2179 cur_devices->total_devices--; 2180 /* Update total_devices of the parent fs_devices if it's seed */ 2181 if (cur_devices != fs_devices) 2182 fs_devices->total_devices--; 2183 2184 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2185 cur_devices->missing_devices--; 2186 2187 btrfs_assign_next_active_device(device, NULL); 2188 2189 if (device->bdev) { 2190 cur_devices->open_devices--; 2191 /* remove sysfs entry */ 2192 btrfs_sysfs_rm_device_link(fs_devices, device); 2193 } 2194 2195 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2196 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2197 mutex_unlock(&fs_devices->device_list_mutex); 2198 2199 /* 2200 * at this point, the device is zero sized and detached from 2201 * the devices list. All that's left is to zero out the old 2202 * supers and free the device. 2203 */ 2204 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2205 btrfs_scratch_superblocks(device->bdev, device->name->str); 2206 2207 btrfs_close_bdev(device); 2208 synchronize_rcu(); 2209 btrfs_free_device(device); 2210 2211 if (cur_devices->open_devices == 0) { 2212 while (fs_devices) { 2213 if (fs_devices->seed == cur_devices) { 2214 fs_devices->seed = cur_devices->seed; 2215 break; 2216 } 2217 fs_devices = fs_devices->seed; 2218 } 2219 cur_devices->seed = NULL; 2220 close_fs_devices(cur_devices); 2221 free_fs_devices(cur_devices); 2222 } 2223 2224 out: 2225 mutex_unlock(&uuid_mutex); 2226 return ret; 2227 2228 error_undo: 2229 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2230 mutex_lock(&fs_info->chunk_mutex); 2231 list_add(&device->dev_alloc_list, 2232 &fs_devices->alloc_list); 2233 device->fs_devices->rw_devices++; 2234 mutex_unlock(&fs_info->chunk_mutex); 2235 } 2236 goto out; 2237 } 2238 2239 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2240 { 2241 struct btrfs_fs_devices *fs_devices; 2242 2243 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2244 2245 /* 2246 * in case of fs with no seed, srcdev->fs_devices will point 2247 * to fs_devices of fs_info. However when the dev being replaced is 2248 * a seed dev it will point to the seed's local fs_devices. In short 2249 * srcdev will have its correct fs_devices in both the cases. 2250 */ 2251 fs_devices = srcdev->fs_devices; 2252 2253 list_del_rcu(&srcdev->dev_list); 2254 list_del(&srcdev->dev_alloc_list); 2255 fs_devices->num_devices--; 2256 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2257 fs_devices->missing_devices--; 2258 2259 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2260 fs_devices->rw_devices--; 2261 2262 if (srcdev->bdev) 2263 fs_devices->open_devices--; 2264 } 2265 2266 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2267 { 2268 struct btrfs_fs_info *fs_info = srcdev->fs_info; 2269 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2270 2271 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) { 2272 /* zero out the old super if it is writable */ 2273 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); 2274 } 2275 2276 btrfs_close_bdev(srcdev); 2277 synchronize_rcu(); 2278 btrfs_free_device(srcdev); 2279 2280 /* if this is no devs we rather delete the fs_devices */ 2281 if (!fs_devices->num_devices) { 2282 struct btrfs_fs_devices *tmp_fs_devices; 2283 2284 /* 2285 * On a mounted FS, num_devices can't be zero unless it's a 2286 * seed. In case of a seed device being replaced, the replace 2287 * target added to the sprout FS, so there will be no more 2288 * device left under the seed FS. 2289 */ 2290 ASSERT(fs_devices->seeding); 2291 2292 tmp_fs_devices = fs_info->fs_devices; 2293 while (tmp_fs_devices) { 2294 if (tmp_fs_devices->seed == fs_devices) { 2295 tmp_fs_devices->seed = fs_devices->seed; 2296 break; 2297 } 2298 tmp_fs_devices = tmp_fs_devices->seed; 2299 } 2300 fs_devices->seed = NULL; 2301 close_fs_devices(fs_devices); 2302 free_fs_devices(fs_devices); 2303 } 2304 } 2305 2306 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2307 { 2308 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2309 2310 WARN_ON(!tgtdev); 2311 mutex_lock(&fs_devices->device_list_mutex); 2312 2313 btrfs_sysfs_rm_device_link(fs_devices, tgtdev); 2314 2315 if (tgtdev->bdev) 2316 fs_devices->open_devices--; 2317 2318 fs_devices->num_devices--; 2319 2320 btrfs_assign_next_active_device(tgtdev, NULL); 2321 2322 list_del_rcu(&tgtdev->dev_list); 2323 2324 mutex_unlock(&fs_devices->device_list_mutex); 2325 2326 /* 2327 * The update_dev_time() with in btrfs_scratch_superblocks() 2328 * may lead to a call to btrfs_show_devname() which will try 2329 * to hold device_list_mutex. And here this device 2330 * is already out of device list, so we don't have to hold 2331 * the device_list_mutex lock. 2332 */ 2333 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); 2334 2335 btrfs_close_bdev(tgtdev); 2336 synchronize_rcu(); 2337 btrfs_free_device(tgtdev); 2338 } 2339 2340 static struct btrfs_device *btrfs_find_device_by_path( 2341 struct btrfs_fs_info *fs_info, const char *device_path) 2342 { 2343 int ret = 0; 2344 struct btrfs_super_block *disk_super; 2345 u64 devid; 2346 u8 *dev_uuid; 2347 struct block_device *bdev; 2348 struct buffer_head *bh; 2349 struct btrfs_device *device; 2350 2351 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2352 fs_info->bdev_holder, 0, &bdev, &bh); 2353 if (ret) 2354 return ERR_PTR(ret); 2355 disk_super = (struct btrfs_super_block *)bh->b_data; 2356 devid = btrfs_stack_device_id(&disk_super->dev_item); 2357 dev_uuid = disk_super->dev_item.uuid; 2358 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2359 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2360 disk_super->metadata_uuid, true); 2361 else 2362 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2363 disk_super->fsid, true); 2364 2365 brelse(bh); 2366 if (!device) 2367 device = ERR_PTR(-ENOENT); 2368 blkdev_put(bdev, FMODE_READ); 2369 return device; 2370 } 2371 2372 /* 2373 * Lookup a device given by device id, or the path if the id is 0. 2374 */ 2375 struct btrfs_device *btrfs_find_device_by_devspec( 2376 struct btrfs_fs_info *fs_info, u64 devid, 2377 const char *device_path) 2378 { 2379 struct btrfs_device *device; 2380 2381 if (devid) { 2382 device = btrfs_find_device(fs_info->fs_devices, devid, NULL, 2383 NULL, true); 2384 if (!device) 2385 return ERR_PTR(-ENOENT); 2386 return device; 2387 } 2388 2389 if (!device_path || !device_path[0]) 2390 return ERR_PTR(-EINVAL); 2391 2392 if (strcmp(device_path, "missing") == 0) { 2393 /* Find first missing device */ 2394 list_for_each_entry(device, &fs_info->fs_devices->devices, 2395 dev_list) { 2396 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 2397 &device->dev_state) && !device->bdev) 2398 return device; 2399 } 2400 return ERR_PTR(-ENOENT); 2401 } 2402 2403 return btrfs_find_device_by_path(fs_info, device_path); 2404 } 2405 2406 /* 2407 * does all the dirty work required for changing file system's UUID. 2408 */ 2409 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2410 { 2411 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2412 struct btrfs_fs_devices *old_devices; 2413 struct btrfs_fs_devices *seed_devices; 2414 struct btrfs_super_block *disk_super = fs_info->super_copy; 2415 struct btrfs_device *device; 2416 u64 super_flags; 2417 2418 lockdep_assert_held(&uuid_mutex); 2419 if (!fs_devices->seeding) 2420 return -EINVAL; 2421 2422 seed_devices = alloc_fs_devices(NULL, NULL); 2423 if (IS_ERR(seed_devices)) 2424 return PTR_ERR(seed_devices); 2425 2426 old_devices = clone_fs_devices(fs_devices); 2427 if (IS_ERR(old_devices)) { 2428 kfree(seed_devices); 2429 return PTR_ERR(old_devices); 2430 } 2431 2432 list_add(&old_devices->fs_list, &fs_uuids); 2433 2434 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2435 seed_devices->opened = 1; 2436 INIT_LIST_HEAD(&seed_devices->devices); 2437 INIT_LIST_HEAD(&seed_devices->alloc_list); 2438 mutex_init(&seed_devices->device_list_mutex); 2439 2440 mutex_lock(&fs_devices->device_list_mutex); 2441 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2442 synchronize_rcu); 2443 list_for_each_entry(device, &seed_devices->devices, dev_list) 2444 device->fs_devices = seed_devices; 2445 2446 mutex_lock(&fs_info->chunk_mutex); 2447 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2448 mutex_unlock(&fs_info->chunk_mutex); 2449 2450 fs_devices->seeding = 0; 2451 fs_devices->num_devices = 0; 2452 fs_devices->open_devices = 0; 2453 fs_devices->missing_devices = 0; 2454 fs_devices->rotating = 0; 2455 fs_devices->seed = seed_devices; 2456 2457 generate_random_uuid(fs_devices->fsid); 2458 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2459 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2460 mutex_unlock(&fs_devices->device_list_mutex); 2461 2462 super_flags = btrfs_super_flags(disk_super) & 2463 ~BTRFS_SUPER_FLAG_SEEDING; 2464 btrfs_set_super_flags(disk_super, super_flags); 2465 2466 return 0; 2467 } 2468 2469 /* 2470 * Store the expected generation for seed devices in device items. 2471 */ 2472 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2473 { 2474 struct btrfs_fs_info *fs_info = trans->fs_info; 2475 struct btrfs_root *root = fs_info->chunk_root; 2476 struct btrfs_path *path; 2477 struct extent_buffer *leaf; 2478 struct btrfs_dev_item *dev_item; 2479 struct btrfs_device *device; 2480 struct btrfs_key key; 2481 u8 fs_uuid[BTRFS_FSID_SIZE]; 2482 u8 dev_uuid[BTRFS_UUID_SIZE]; 2483 u64 devid; 2484 int ret; 2485 2486 path = btrfs_alloc_path(); 2487 if (!path) 2488 return -ENOMEM; 2489 2490 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2491 key.offset = 0; 2492 key.type = BTRFS_DEV_ITEM_KEY; 2493 2494 while (1) { 2495 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2496 if (ret < 0) 2497 goto error; 2498 2499 leaf = path->nodes[0]; 2500 next_slot: 2501 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2502 ret = btrfs_next_leaf(root, path); 2503 if (ret > 0) 2504 break; 2505 if (ret < 0) 2506 goto error; 2507 leaf = path->nodes[0]; 2508 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2509 btrfs_release_path(path); 2510 continue; 2511 } 2512 2513 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2514 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2515 key.type != BTRFS_DEV_ITEM_KEY) 2516 break; 2517 2518 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2519 struct btrfs_dev_item); 2520 devid = btrfs_device_id(leaf, dev_item); 2521 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2522 BTRFS_UUID_SIZE); 2523 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2524 BTRFS_FSID_SIZE); 2525 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 2526 fs_uuid, true); 2527 BUG_ON(!device); /* Logic error */ 2528 2529 if (device->fs_devices->seeding) { 2530 btrfs_set_device_generation(leaf, dev_item, 2531 device->generation); 2532 btrfs_mark_buffer_dirty(leaf); 2533 } 2534 2535 path->slots[0]++; 2536 goto next_slot; 2537 } 2538 ret = 0; 2539 error: 2540 btrfs_free_path(path); 2541 return ret; 2542 } 2543 2544 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2545 { 2546 struct btrfs_root *root = fs_info->dev_root; 2547 struct request_queue *q; 2548 struct btrfs_trans_handle *trans; 2549 struct btrfs_device *device; 2550 struct block_device *bdev; 2551 struct super_block *sb = fs_info->sb; 2552 struct rcu_string *name; 2553 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2554 u64 orig_super_total_bytes; 2555 u64 orig_super_num_devices; 2556 int seeding_dev = 0; 2557 int ret = 0; 2558 bool unlocked = false; 2559 2560 if (sb_rdonly(sb) && !fs_devices->seeding) 2561 return -EROFS; 2562 2563 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2564 fs_info->bdev_holder); 2565 if (IS_ERR(bdev)) 2566 return PTR_ERR(bdev); 2567 2568 if (fs_devices->seeding) { 2569 seeding_dev = 1; 2570 down_write(&sb->s_umount); 2571 mutex_lock(&uuid_mutex); 2572 } 2573 2574 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2575 2576 mutex_lock(&fs_devices->device_list_mutex); 2577 list_for_each_entry(device, &fs_devices->devices, dev_list) { 2578 if (device->bdev == bdev) { 2579 ret = -EEXIST; 2580 mutex_unlock( 2581 &fs_devices->device_list_mutex); 2582 goto error; 2583 } 2584 } 2585 mutex_unlock(&fs_devices->device_list_mutex); 2586 2587 device = btrfs_alloc_device(fs_info, NULL, NULL); 2588 if (IS_ERR(device)) { 2589 /* we can safely leave the fs_devices entry around */ 2590 ret = PTR_ERR(device); 2591 goto error; 2592 } 2593 2594 name = rcu_string_strdup(device_path, GFP_KERNEL); 2595 if (!name) { 2596 ret = -ENOMEM; 2597 goto error_free_device; 2598 } 2599 rcu_assign_pointer(device->name, name); 2600 2601 trans = btrfs_start_transaction(root, 0); 2602 if (IS_ERR(trans)) { 2603 ret = PTR_ERR(trans); 2604 goto error_free_device; 2605 } 2606 2607 q = bdev_get_queue(bdev); 2608 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2609 device->generation = trans->transid; 2610 device->io_width = fs_info->sectorsize; 2611 device->io_align = fs_info->sectorsize; 2612 device->sector_size = fs_info->sectorsize; 2613 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2614 fs_info->sectorsize); 2615 device->disk_total_bytes = device->total_bytes; 2616 device->commit_total_bytes = device->total_bytes; 2617 device->fs_info = fs_info; 2618 device->bdev = bdev; 2619 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2620 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2621 device->mode = FMODE_EXCL; 2622 device->dev_stats_valid = 1; 2623 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2624 2625 if (seeding_dev) { 2626 sb->s_flags &= ~SB_RDONLY; 2627 ret = btrfs_prepare_sprout(fs_info); 2628 if (ret) { 2629 btrfs_abort_transaction(trans, ret); 2630 goto error_trans; 2631 } 2632 } 2633 2634 device->fs_devices = fs_devices; 2635 2636 mutex_lock(&fs_devices->device_list_mutex); 2637 mutex_lock(&fs_info->chunk_mutex); 2638 list_add_rcu(&device->dev_list, &fs_devices->devices); 2639 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2640 fs_devices->num_devices++; 2641 fs_devices->open_devices++; 2642 fs_devices->rw_devices++; 2643 fs_devices->total_devices++; 2644 fs_devices->total_rw_bytes += device->total_bytes; 2645 2646 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2647 2648 if (!blk_queue_nonrot(q)) 2649 fs_devices->rotating = 1; 2650 2651 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2652 btrfs_set_super_total_bytes(fs_info->super_copy, 2653 round_down(orig_super_total_bytes + device->total_bytes, 2654 fs_info->sectorsize)); 2655 2656 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2657 btrfs_set_super_num_devices(fs_info->super_copy, 2658 orig_super_num_devices + 1); 2659 2660 /* add sysfs device entry */ 2661 btrfs_sysfs_add_device_link(fs_devices, device); 2662 2663 /* 2664 * we've got more storage, clear any full flags on the space 2665 * infos 2666 */ 2667 btrfs_clear_space_info_full(fs_info); 2668 2669 mutex_unlock(&fs_info->chunk_mutex); 2670 mutex_unlock(&fs_devices->device_list_mutex); 2671 2672 if (seeding_dev) { 2673 mutex_lock(&fs_info->chunk_mutex); 2674 ret = init_first_rw_device(trans); 2675 mutex_unlock(&fs_info->chunk_mutex); 2676 if (ret) { 2677 btrfs_abort_transaction(trans, ret); 2678 goto error_sysfs; 2679 } 2680 } 2681 2682 ret = btrfs_add_dev_item(trans, device); 2683 if (ret) { 2684 btrfs_abort_transaction(trans, ret); 2685 goto error_sysfs; 2686 } 2687 2688 if (seeding_dev) { 2689 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; 2690 2691 ret = btrfs_finish_sprout(trans); 2692 if (ret) { 2693 btrfs_abort_transaction(trans, ret); 2694 goto error_sysfs; 2695 } 2696 2697 /* Sprouting would change fsid of the mounted root, 2698 * so rename the fsid on the sysfs 2699 */ 2700 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", 2701 fs_info->fs_devices->fsid); 2702 if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf)) 2703 btrfs_warn(fs_info, 2704 "sysfs: failed to create fsid for sprout"); 2705 } 2706 2707 ret = btrfs_commit_transaction(trans); 2708 2709 if (seeding_dev) { 2710 mutex_unlock(&uuid_mutex); 2711 up_write(&sb->s_umount); 2712 unlocked = true; 2713 2714 if (ret) /* transaction commit */ 2715 return ret; 2716 2717 ret = btrfs_relocate_sys_chunks(fs_info); 2718 if (ret < 0) 2719 btrfs_handle_fs_error(fs_info, ret, 2720 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2721 trans = btrfs_attach_transaction(root); 2722 if (IS_ERR(trans)) { 2723 if (PTR_ERR(trans) == -ENOENT) 2724 return 0; 2725 ret = PTR_ERR(trans); 2726 trans = NULL; 2727 goto error_sysfs; 2728 } 2729 ret = btrfs_commit_transaction(trans); 2730 } 2731 2732 /* Update ctime/mtime for libblkid */ 2733 update_dev_time(device_path); 2734 return ret; 2735 2736 error_sysfs: 2737 btrfs_sysfs_rm_device_link(fs_devices, device); 2738 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2739 mutex_lock(&fs_info->chunk_mutex); 2740 list_del_rcu(&device->dev_list); 2741 list_del(&device->dev_alloc_list); 2742 fs_info->fs_devices->num_devices--; 2743 fs_info->fs_devices->open_devices--; 2744 fs_info->fs_devices->rw_devices--; 2745 fs_info->fs_devices->total_devices--; 2746 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2747 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2748 btrfs_set_super_total_bytes(fs_info->super_copy, 2749 orig_super_total_bytes); 2750 btrfs_set_super_num_devices(fs_info->super_copy, 2751 orig_super_num_devices); 2752 mutex_unlock(&fs_info->chunk_mutex); 2753 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2754 error_trans: 2755 if (seeding_dev) 2756 sb->s_flags |= SB_RDONLY; 2757 if (trans) 2758 btrfs_end_transaction(trans); 2759 error_free_device: 2760 btrfs_free_device(device); 2761 error: 2762 blkdev_put(bdev, FMODE_EXCL); 2763 if (seeding_dev && !unlocked) { 2764 mutex_unlock(&uuid_mutex); 2765 up_write(&sb->s_umount); 2766 } 2767 return ret; 2768 } 2769 2770 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2771 struct btrfs_device *device) 2772 { 2773 int ret; 2774 struct btrfs_path *path; 2775 struct btrfs_root *root = device->fs_info->chunk_root; 2776 struct btrfs_dev_item *dev_item; 2777 struct extent_buffer *leaf; 2778 struct btrfs_key key; 2779 2780 path = btrfs_alloc_path(); 2781 if (!path) 2782 return -ENOMEM; 2783 2784 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2785 key.type = BTRFS_DEV_ITEM_KEY; 2786 key.offset = device->devid; 2787 2788 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2789 if (ret < 0) 2790 goto out; 2791 2792 if (ret > 0) { 2793 ret = -ENOENT; 2794 goto out; 2795 } 2796 2797 leaf = path->nodes[0]; 2798 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2799 2800 btrfs_set_device_id(leaf, dev_item, device->devid); 2801 btrfs_set_device_type(leaf, dev_item, device->type); 2802 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2803 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2804 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2805 btrfs_set_device_total_bytes(leaf, dev_item, 2806 btrfs_device_get_disk_total_bytes(device)); 2807 btrfs_set_device_bytes_used(leaf, dev_item, 2808 btrfs_device_get_bytes_used(device)); 2809 btrfs_mark_buffer_dirty(leaf); 2810 2811 out: 2812 btrfs_free_path(path); 2813 return ret; 2814 } 2815 2816 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2817 struct btrfs_device *device, u64 new_size) 2818 { 2819 struct btrfs_fs_info *fs_info = device->fs_info; 2820 struct btrfs_super_block *super_copy = fs_info->super_copy; 2821 u64 old_total; 2822 u64 diff; 2823 2824 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2825 return -EACCES; 2826 2827 new_size = round_down(new_size, fs_info->sectorsize); 2828 2829 mutex_lock(&fs_info->chunk_mutex); 2830 old_total = btrfs_super_total_bytes(super_copy); 2831 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2832 2833 if (new_size <= device->total_bytes || 2834 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2835 mutex_unlock(&fs_info->chunk_mutex); 2836 return -EINVAL; 2837 } 2838 2839 btrfs_set_super_total_bytes(super_copy, 2840 round_down(old_total + diff, fs_info->sectorsize)); 2841 device->fs_devices->total_rw_bytes += diff; 2842 2843 btrfs_device_set_total_bytes(device, new_size); 2844 btrfs_device_set_disk_total_bytes(device, new_size); 2845 btrfs_clear_space_info_full(device->fs_info); 2846 if (list_empty(&device->post_commit_list)) 2847 list_add_tail(&device->post_commit_list, 2848 &trans->transaction->dev_update_list); 2849 mutex_unlock(&fs_info->chunk_mutex); 2850 2851 return btrfs_update_device(trans, device); 2852 } 2853 2854 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2855 { 2856 struct btrfs_fs_info *fs_info = trans->fs_info; 2857 struct btrfs_root *root = fs_info->chunk_root; 2858 int ret; 2859 struct btrfs_path *path; 2860 struct btrfs_key key; 2861 2862 path = btrfs_alloc_path(); 2863 if (!path) 2864 return -ENOMEM; 2865 2866 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2867 key.offset = chunk_offset; 2868 key.type = BTRFS_CHUNK_ITEM_KEY; 2869 2870 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2871 if (ret < 0) 2872 goto out; 2873 else if (ret > 0) { /* Logic error or corruption */ 2874 btrfs_handle_fs_error(fs_info, -ENOENT, 2875 "Failed lookup while freeing chunk."); 2876 ret = -ENOENT; 2877 goto out; 2878 } 2879 2880 ret = btrfs_del_item(trans, root, path); 2881 if (ret < 0) 2882 btrfs_handle_fs_error(fs_info, ret, 2883 "Failed to delete chunk item."); 2884 out: 2885 btrfs_free_path(path); 2886 return ret; 2887 } 2888 2889 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2890 { 2891 struct btrfs_super_block *super_copy = fs_info->super_copy; 2892 struct btrfs_disk_key *disk_key; 2893 struct btrfs_chunk *chunk; 2894 u8 *ptr; 2895 int ret = 0; 2896 u32 num_stripes; 2897 u32 array_size; 2898 u32 len = 0; 2899 u32 cur; 2900 struct btrfs_key key; 2901 2902 mutex_lock(&fs_info->chunk_mutex); 2903 array_size = btrfs_super_sys_array_size(super_copy); 2904 2905 ptr = super_copy->sys_chunk_array; 2906 cur = 0; 2907 2908 while (cur < array_size) { 2909 disk_key = (struct btrfs_disk_key *)ptr; 2910 btrfs_disk_key_to_cpu(&key, disk_key); 2911 2912 len = sizeof(*disk_key); 2913 2914 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2915 chunk = (struct btrfs_chunk *)(ptr + len); 2916 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2917 len += btrfs_chunk_item_size(num_stripes); 2918 } else { 2919 ret = -EIO; 2920 break; 2921 } 2922 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2923 key.offset == chunk_offset) { 2924 memmove(ptr, ptr + len, array_size - (cur + len)); 2925 array_size -= len; 2926 btrfs_set_super_sys_array_size(super_copy, array_size); 2927 } else { 2928 ptr += len; 2929 cur += len; 2930 } 2931 } 2932 mutex_unlock(&fs_info->chunk_mutex); 2933 return ret; 2934 } 2935 2936 /* 2937 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 2938 * @logical: Logical block offset in bytes. 2939 * @length: Length of extent in bytes. 2940 * 2941 * Return: Chunk mapping or ERR_PTR. 2942 */ 2943 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 2944 u64 logical, u64 length) 2945 { 2946 struct extent_map_tree *em_tree; 2947 struct extent_map *em; 2948 2949 em_tree = &fs_info->mapping_tree; 2950 read_lock(&em_tree->lock); 2951 em = lookup_extent_mapping(em_tree, logical, length); 2952 read_unlock(&em_tree->lock); 2953 2954 if (!em) { 2955 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 2956 logical, length); 2957 return ERR_PTR(-EINVAL); 2958 } 2959 2960 if (em->start > logical || em->start + em->len < logical) { 2961 btrfs_crit(fs_info, 2962 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 2963 logical, length, em->start, em->start + em->len); 2964 free_extent_map(em); 2965 return ERR_PTR(-EINVAL); 2966 } 2967 2968 /* callers are responsible for dropping em's ref. */ 2969 return em; 2970 } 2971 2972 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2973 { 2974 struct btrfs_fs_info *fs_info = trans->fs_info; 2975 struct extent_map *em; 2976 struct map_lookup *map; 2977 u64 dev_extent_len = 0; 2978 int i, ret = 0; 2979 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2980 2981 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 2982 if (IS_ERR(em)) { 2983 /* 2984 * This is a logic error, but we don't want to just rely on the 2985 * user having built with ASSERT enabled, so if ASSERT doesn't 2986 * do anything we still error out. 2987 */ 2988 ASSERT(0); 2989 return PTR_ERR(em); 2990 } 2991 map = em->map_lookup; 2992 mutex_lock(&fs_info->chunk_mutex); 2993 check_system_chunk(trans, map->type); 2994 mutex_unlock(&fs_info->chunk_mutex); 2995 2996 /* 2997 * Take the device list mutex to prevent races with the final phase of 2998 * a device replace operation that replaces the device object associated 2999 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). 3000 */ 3001 mutex_lock(&fs_devices->device_list_mutex); 3002 for (i = 0; i < map->num_stripes; i++) { 3003 struct btrfs_device *device = map->stripes[i].dev; 3004 ret = btrfs_free_dev_extent(trans, device, 3005 map->stripes[i].physical, 3006 &dev_extent_len); 3007 if (ret) { 3008 mutex_unlock(&fs_devices->device_list_mutex); 3009 btrfs_abort_transaction(trans, ret); 3010 goto out; 3011 } 3012 3013 if (device->bytes_used > 0) { 3014 mutex_lock(&fs_info->chunk_mutex); 3015 btrfs_device_set_bytes_used(device, 3016 device->bytes_used - dev_extent_len); 3017 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3018 btrfs_clear_space_info_full(fs_info); 3019 mutex_unlock(&fs_info->chunk_mutex); 3020 } 3021 3022 ret = btrfs_update_device(trans, device); 3023 if (ret) { 3024 mutex_unlock(&fs_devices->device_list_mutex); 3025 btrfs_abort_transaction(trans, ret); 3026 goto out; 3027 } 3028 } 3029 mutex_unlock(&fs_devices->device_list_mutex); 3030 3031 ret = btrfs_free_chunk(trans, chunk_offset); 3032 if (ret) { 3033 btrfs_abort_transaction(trans, ret); 3034 goto out; 3035 } 3036 3037 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3038 3039 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3040 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3041 if (ret) { 3042 btrfs_abort_transaction(trans, ret); 3043 goto out; 3044 } 3045 } 3046 3047 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3048 if (ret) { 3049 btrfs_abort_transaction(trans, ret); 3050 goto out; 3051 } 3052 3053 out: 3054 /* once for us */ 3055 free_extent_map(em); 3056 return ret; 3057 } 3058 3059 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3060 { 3061 struct btrfs_root *root = fs_info->chunk_root; 3062 struct btrfs_trans_handle *trans; 3063 int ret; 3064 3065 /* 3066 * Prevent races with automatic removal of unused block groups. 3067 * After we relocate and before we remove the chunk with offset 3068 * chunk_offset, automatic removal of the block group can kick in, 3069 * resulting in a failure when calling btrfs_remove_chunk() below. 3070 * 3071 * Make sure to acquire this mutex before doing a tree search (dev 3072 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3073 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3074 * we release the path used to search the chunk/dev tree and before 3075 * the current task acquires this mutex and calls us. 3076 */ 3077 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); 3078 3079 ret = btrfs_can_relocate(fs_info, chunk_offset); 3080 if (ret) 3081 return -ENOSPC; 3082 3083 /* step one, relocate all the extents inside this chunk */ 3084 btrfs_scrub_pause(fs_info); 3085 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3086 btrfs_scrub_continue(fs_info); 3087 if (ret) 3088 return ret; 3089 3090 /* 3091 * We add the kobjects here (and after forcing data chunk creation) 3092 * since relocation is the only place we'll create chunks of a new 3093 * type at runtime. The only place where we'll remove the last 3094 * chunk of a type is the call immediately below this one. Even 3095 * so, we're protected against races with the cleaner thread since 3096 * we're covered by the delete_unused_bgs_mutex. 3097 */ 3098 btrfs_add_raid_kobjects(fs_info); 3099 3100 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3101 chunk_offset); 3102 if (IS_ERR(trans)) { 3103 ret = PTR_ERR(trans); 3104 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3105 return ret; 3106 } 3107 3108 /* 3109 * step two, delete the device extents and the 3110 * chunk tree entries 3111 */ 3112 ret = btrfs_remove_chunk(trans, chunk_offset); 3113 btrfs_end_transaction(trans); 3114 return ret; 3115 } 3116 3117 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3118 { 3119 struct btrfs_root *chunk_root = fs_info->chunk_root; 3120 struct btrfs_path *path; 3121 struct extent_buffer *leaf; 3122 struct btrfs_chunk *chunk; 3123 struct btrfs_key key; 3124 struct btrfs_key found_key; 3125 u64 chunk_type; 3126 bool retried = false; 3127 int failed = 0; 3128 int ret; 3129 3130 path = btrfs_alloc_path(); 3131 if (!path) 3132 return -ENOMEM; 3133 3134 again: 3135 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3136 key.offset = (u64)-1; 3137 key.type = BTRFS_CHUNK_ITEM_KEY; 3138 3139 while (1) { 3140 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3141 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3142 if (ret < 0) { 3143 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3144 goto error; 3145 } 3146 BUG_ON(ret == 0); /* Corruption */ 3147 3148 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3149 key.type); 3150 if (ret) 3151 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3152 if (ret < 0) 3153 goto error; 3154 if (ret > 0) 3155 break; 3156 3157 leaf = path->nodes[0]; 3158 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3159 3160 chunk = btrfs_item_ptr(leaf, path->slots[0], 3161 struct btrfs_chunk); 3162 chunk_type = btrfs_chunk_type(leaf, chunk); 3163 btrfs_release_path(path); 3164 3165 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3166 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3167 if (ret == -ENOSPC) 3168 failed++; 3169 else 3170 BUG_ON(ret); 3171 } 3172 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3173 3174 if (found_key.offset == 0) 3175 break; 3176 key.offset = found_key.offset - 1; 3177 } 3178 ret = 0; 3179 if (failed && !retried) { 3180 failed = 0; 3181 retried = true; 3182 goto again; 3183 } else if (WARN_ON(failed && retried)) { 3184 ret = -ENOSPC; 3185 } 3186 error: 3187 btrfs_free_path(path); 3188 return ret; 3189 } 3190 3191 /* 3192 * return 1 : allocate a data chunk successfully, 3193 * return <0: errors during allocating a data chunk, 3194 * return 0 : no need to allocate a data chunk. 3195 */ 3196 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3197 u64 chunk_offset) 3198 { 3199 struct btrfs_block_group_cache *cache; 3200 u64 bytes_used; 3201 u64 chunk_type; 3202 3203 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3204 ASSERT(cache); 3205 chunk_type = cache->flags; 3206 btrfs_put_block_group(cache); 3207 3208 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { 3209 spin_lock(&fs_info->data_sinfo->lock); 3210 bytes_used = fs_info->data_sinfo->bytes_used; 3211 spin_unlock(&fs_info->data_sinfo->lock); 3212 3213 if (!bytes_used) { 3214 struct btrfs_trans_handle *trans; 3215 int ret; 3216 3217 trans = btrfs_join_transaction(fs_info->tree_root); 3218 if (IS_ERR(trans)) 3219 return PTR_ERR(trans); 3220 3221 ret = btrfs_force_chunk_alloc(trans, 3222 BTRFS_BLOCK_GROUP_DATA); 3223 btrfs_end_transaction(trans); 3224 if (ret < 0) 3225 return ret; 3226 3227 btrfs_add_raid_kobjects(fs_info); 3228 3229 return 1; 3230 } 3231 } 3232 return 0; 3233 } 3234 3235 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3236 struct btrfs_balance_control *bctl) 3237 { 3238 struct btrfs_root *root = fs_info->tree_root; 3239 struct btrfs_trans_handle *trans; 3240 struct btrfs_balance_item *item; 3241 struct btrfs_disk_balance_args disk_bargs; 3242 struct btrfs_path *path; 3243 struct extent_buffer *leaf; 3244 struct btrfs_key key; 3245 int ret, err; 3246 3247 path = btrfs_alloc_path(); 3248 if (!path) 3249 return -ENOMEM; 3250 3251 trans = btrfs_start_transaction(root, 0); 3252 if (IS_ERR(trans)) { 3253 btrfs_free_path(path); 3254 return PTR_ERR(trans); 3255 } 3256 3257 key.objectid = BTRFS_BALANCE_OBJECTID; 3258 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3259 key.offset = 0; 3260 3261 ret = btrfs_insert_empty_item(trans, root, path, &key, 3262 sizeof(*item)); 3263 if (ret) 3264 goto out; 3265 3266 leaf = path->nodes[0]; 3267 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3268 3269 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3270 3271 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3272 btrfs_set_balance_data(leaf, item, &disk_bargs); 3273 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3274 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3275 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3276 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3277 3278 btrfs_set_balance_flags(leaf, item, bctl->flags); 3279 3280 btrfs_mark_buffer_dirty(leaf); 3281 out: 3282 btrfs_free_path(path); 3283 err = btrfs_commit_transaction(trans); 3284 if (err && !ret) 3285 ret = err; 3286 return ret; 3287 } 3288 3289 static int del_balance_item(struct btrfs_fs_info *fs_info) 3290 { 3291 struct btrfs_root *root = fs_info->tree_root; 3292 struct btrfs_trans_handle *trans; 3293 struct btrfs_path *path; 3294 struct btrfs_key key; 3295 int ret, err; 3296 3297 path = btrfs_alloc_path(); 3298 if (!path) 3299 return -ENOMEM; 3300 3301 trans = btrfs_start_transaction(root, 0); 3302 if (IS_ERR(trans)) { 3303 btrfs_free_path(path); 3304 return PTR_ERR(trans); 3305 } 3306 3307 key.objectid = BTRFS_BALANCE_OBJECTID; 3308 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3309 key.offset = 0; 3310 3311 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3312 if (ret < 0) 3313 goto out; 3314 if (ret > 0) { 3315 ret = -ENOENT; 3316 goto out; 3317 } 3318 3319 ret = btrfs_del_item(trans, root, path); 3320 out: 3321 btrfs_free_path(path); 3322 err = btrfs_commit_transaction(trans); 3323 if (err && !ret) 3324 ret = err; 3325 return ret; 3326 } 3327 3328 /* 3329 * This is a heuristic used to reduce the number of chunks balanced on 3330 * resume after balance was interrupted. 3331 */ 3332 static void update_balance_args(struct btrfs_balance_control *bctl) 3333 { 3334 /* 3335 * Turn on soft mode for chunk types that were being converted. 3336 */ 3337 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3338 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3339 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3340 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3341 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3342 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3343 3344 /* 3345 * Turn on usage filter if is not already used. The idea is 3346 * that chunks that we have already balanced should be 3347 * reasonably full. Don't do it for chunks that are being 3348 * converted - that will keep us from relocating unconverted 3349 * (albeit full) chunks. 3350 */ 3351 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3352 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3353 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3354 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3355 bctl->data.usage = 90; 3356 } 3357 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3358 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3359 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3360 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3361 bctl->sys.usage = 90; 3362 } 3363 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3364 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3365 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3366 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3367 bctl->meta.usage = 90; 3368 } 3369 } 3370 3371 /* 3372 * Clear the balance status in fs_info and delete the balance item from disk. 3373 */ 3374 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3375 { 3376 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3377 int ret; 3378 3379 BUG_ON(!fs_info->balance_ctl); 3380 3381 spin_lock(&fs_info->balance_lock); 3382 fs_info->balance_ctl = NULL; 3383 spin_unlock(&fs_info->balance_lock); 3384 3385 kfree(bctl); 3386 ret = del_balance_item(fs_info); 3387 if (ret) 3388 btrfs_handle_fs_error(fs_info, ret, NULL); 3389 } 3390 3391 /* 3392 * Balance filters. Return 1 if chunk should be filtered out 3393 * (should not be balanced). 3394 */ 3395 static int chunk_profiles_filter(u64 chunk_type, 3396 struct btrfs_balance_args *bargs) 3397 { 3398 chunk_type = chunk_to_extended(chunk_type) & 3399 BTRFS_EXTENDED_PROFILE_MASK; 3400 3401 if (bargs->profiles & chunk_type) 3402 return 0; 3403 3404 return 1; 3405 } 3406 3407 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3408 struct btrfs_balance_args *bargs) 3409 { 3410 struct btrfs_block_group_cache *cache; 3411 u64 chunk_used; 3412 u64 user_thresh_min; 3413 u64 user_thresh_max; 3414 int ret = 1; 3415 3416 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3417 chunk_used = btrfs_block_group_used(&cache->item); 3418 3419 if (bargs->usage_min == 0) 3420 user_thresh_min = 0; 3421 else 3422 user_thresh_min = div_factor_fine(cache->key.offset, 3423 bargs->usage_min); 3424 3425 if (bargs->usage_max == 0) 3426 user_thresh_max = 1; 3427 else if (bargs->usage_max > 100) 3428 user_thresh_max = cache->key.offset; 3429 else 3430 user_thresh_max = div_factor_fine(cache->key.offset, 3431 bargs->usage_max); 3432 3433 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3434 ret = 0; 3435 3436 btrfs_put_block_group(cache); 3437 return ret; 3438 } 3439 3440 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3441 u64 chunk_offset, struct btrfs_balance_args *bargs) 3442 { 3443 struct btrfs_block_group_cache *cache; 3444 u64 chunk_used, user_thresh; 3445 int ret = 1; 3446 3447 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3448 chunk_used = btrfs_block_group_used(&cache->item); 3449 3450 if (bargs->usage_min == 0) 3451 user_thresh = 1; 3452 else if (bargs->usage > 100) 3453 user_thresh = cache->key.offset; 3454 else 3455 user_thresh = div_factor_fine(cache->key.offset, 3456 bargs->usage); 3457 3458 if (chunk_used < user_thresh) 3459 ret = 0; 3460 3461 btrfs_put_block_group(cache); 3462 return ret; 3463 } 3464 3465 static int chunk_devid_filter(struct extent_buffer *leaf, 3466 struct btrfs_chunk *chunk, 3467 struct btrfs_balance_args *bargs) 3468 { 3469 struct btrfs_stripe *stripe; 3470 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3471 int i; 3472 3473 for (i = 0; i < num_stripes; i++) { 3474 stripe = btrfs_stripe_nr(chunk, i); 3475 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3476 return 0; 3477 } 3478 3479 return 1; 3480 } 3481 3482 static u64 calc_data_stripes(u64 type, int num_stripes) 3483 { 3484 const int index = btrfs_bg_flags_to_raid_index(type); 3485 const int ncopies = btrfs_raid_array[index].ncopies; 3486 const int nparity = btrfs_raid_array[index].nparity; 3487 3488 if (nparity) 3489 return num_stripes - nparity; 3490 else 3491 return num_stripes / ncopies; 3492 } 3493 3494 /* [pstart, pend) */ 3495 static int chunk_drange_filter(struct extent_buffer *leaf, 3496 struct btrfs_chunk *chunk, 3497 struct btrfs_balance_args *bargs) 3498 { 3499 struct btrfs_stripe *stripe; 3500 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3501 u64 stripe_offset; 3502 u64 stripe_length; 3503 u64 type; 3504 int factor; 3505 int i; 3506 3507 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3508 return 0; 3509 3510 type = btrfs_chunk_type(leaf, chunk); 3511 factor = calc_data_stripes(type, num_stripes); 3512 3513 for (i = 0; i < num_stripes; i++) { 3514 stripe = btrfs_stripe_nr(chunk, i); 3515 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3516 continue; 3517 3518 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3519 stripe_length = btrfs_chunk_length(leaf, chunk); 3520 stripe_length = div_u64(stripe_length, factor); 3521 3522 if (stripe_offset < bargs->pend && 3523 stripe_offset + stripe_length > bargs->pstart) 3524 return 0; 3525 } 3526 3527 return 1; 3528 } 3529 3530 /* [vstart, vend) */ 3531 static int chunk_vrange_filter(struct extent_buffer *leaf, 3532 struct btrfs_chunk *chunk, 3533 u64 chunk_offset, 3534 struct btrfs_balance_args *bargs) 3535 { 3536 if (chunk_offset < bargs->vend && 3537 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3538 /* at least part of the chunk is inside this vrange */ 3539 return 0; 3540 3541 return 1; 3542 } 3543 3544 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3545 struct btrfs_chunk *chunk, 3546 struct btrfs_balance_args *bargs) 3547 { 3548 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3549 3550 if (bargs->stripes_min <= num_stripes 3551 && num_stripes <= bargs->stripes_max) 3552 return 0; 3553 3554 return 1; 3555 } 3556 3557 static int chunk_soft_convert_filter(u64 chunk_type, 3558 struct btrfs_balance_args *bargs) 3559 { 3560 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3561 return 0; 3562 3563 chunk_type = chunk_to_extended(chunk_type) & 3564 BTRFS_EXTENDED_PROFILE_MASK; 3565 3566 if (bargs->target == chunk_type) 3567 return 1; 3568 3569 return 0; 3570 } 3571 3572 static int should_balance_chunk(struct extent_buffer *leaf, 3573 struct btrfs_chunk *chunk, u64 chunk_offset) 3574 { 3575 struct btrfs_fs_info *fs_info = leaf->fs_info; 3576 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3577 struct btrfs_balance_args *bargs = NULL; 3578 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3579 3580 /* type filter */ 3581 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3582 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3583 return 0; 3584 } 3585 3586 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3587 bargs = &bctl->data; 3588 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3589 bargs = &bctl->sys; 3590 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3591 bargs = &bctl->meta; 3592 3593 /* profiles filter */ 3594 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3595 chunk_profiles_filter(chunk_type, bargs)) { 3596 return 0; 3597 } 3598 3599 /* usage filter */ 3600 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3601 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3602 return 0; 3603 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3604 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3605 return 0; 3606 } 3607 3608 /* devid filter */ 3609 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3610 chunk_devid_filter(leaf, chunk, bargs)) { 3611 return 0; 3612 } 3613 3614 /* drange filter, makes sense only with devid filter */ 3615 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3616 chunk_drange_filter(leaf, chunk, bargs)) { 3617 return 0; 3618 } 3619 3620 /* vrange filter */ 3621 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3622 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3623 return 0; 3624 } 3625 3626 /* stripes filter */ 3627 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3628 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3629 return 0; 3630 } 3631 3632 /* soft profile changing mode */ 3633 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3634 chunk_soft_convert_filter(chunk_type, bargs)) { 3635 return 0; 3636 } 3637 3638 /* 3639 * limited by count, must be the last filter 3640 */ 3641 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3642 if (bargs->limit == 0) 3643 return 0; 3644 else 3645 bargs->limit--; 3646 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3647 /* 3648 * Same logic as the 'limit' filter; the minimum cannot be 3649 * determined here because we do not have the global information 3650 * about the count of all chunks that satisfy the filters. 3651 */ 3652 if (bargs->limit_max == 0) 3653 return 0; 3654 else 3655 bargs->limit_max--; 3656 } 3657 3658 return 1; 3659 } 3660 3661 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3662 { 3663 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3664 struct btrfs_root *chunk_root = fs_info->chunk_root; 3665 u64 chunk_type; 3666 struct btrfs_chunk *chunk; 3667 struct btrfs_path *path = NULL; 3668 struct btrfs_key key; 3669 struct btrfs_key found_key; 3670 struct extent_buffer *leaf; 3671 int slot; 3672 int ret; 3673 int enospc_errors = 0; 3674 bool counting = true; 3675 /* The single value limit and min/max limits use the same bytes in the */ 3676 u64 limit_data = bctl->data.limit; 3677 u64 limit_meta = bctl->meta.limit; 3678 u64 limit_sys = bctl->sys.limit; 3679 u32 count_data = 0; 3680 u32 count_meta = 0; 3681 u32 count_sys = 0; 3682 int chunk_reserved = 0; 3683 3684 path = btrfs_alloc_path(); 3685 if (!path) { 3686 ret = -ENOMEM; 3687 goto error; 3688 } 3689 3690 /* zero out stat counters */ 3691 spin_lock(&fs_info->balance_lock); 3692 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3693 spin_unlock(&fs_info->balance_lock); 3694 again: 3695 if (!counting) { 3696 /* 3697 * The single value limit and min/max limits use the same bytes 3698 * in the 3699 */ 3700 bctl->data.limit = limit_data; 3701 bctl->meta.limit = limit_meta; 3702 bctl->sys.limit = limit_sys; 3703 } 3704 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3705 key.offset = (u64)-1; 3706 key.type = BTRFS_CHUNK_ITEM_KEY; 3707 3708 while (1) { 3709 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3710 atomic_read(&fs_info->balance_cancel_req)) { 3711 ret = -ECANCELED; 3712 goto error; 3713 } 3714 3715 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3716 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3717 if (ret < 0) { 3718 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3719 goto error; 3720 } 3721 3722 /* 3723 * this shouldn't happen, it means the last relocate 3724 * failed 3725 */ 3726 if (ret == 0) 3727 BUG(); /* FIXME break ? */ 3728 3729 ret = btrfs_previous_item(chunk_root, path, 0, 3730 BTRFS_CHUNK_ITEM_KEY); 3731 if (ret) { 3732 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3733 ret = 0; 3734 break; 3735 } 3736 3737 leaf = path->nodes[0]; 3738 slot = path->slots[0]; 3739 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3740 3741 if (found_key.objectid != key.objectid) { 3742 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3743 break; 3744 } 3745 3746 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3747 chunk_type = btrfs_chunk_type(leaf, chunk); 3748 3749 if (!counting) { 3750 spin_lock(&fs_info->balance_lock); 3751 bctl->stat.considered++; 3752 spin_unlock(&fs_info->balance_lock); 3753 } 3754 3755 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3756 3757 btrfs_release_path(path); 3758 if (!ret) { 3759 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3760 goto loop; 3761 } 3762 3763 if (counting) { 3764 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3765 spin_lock(&fs_info->balance_lock); 3766 bctl->stat.expected++; 3767 spin_unlock(&fs_info->balance_lock); 3768 3769 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3770 count_data++; 3771 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3772 count_sys++; 3773 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3774 count_meta++; 3775 3776 goto loop; 3777 } 3778 3779 /* 3780 * Apply limit_min filter, no need to check if the LIMITS 3781 * filter is used, limit_min is 0 by default 3782 */ 3783 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3784 count_data < bctl->data.limit_min) 3785 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3786 count_meta < bctl->meta.limit_min) 3787 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3788 count_sys < bctl->sys.limit_min)) { 3789 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3790 goto loop; 3791 } 3792 3793 if (!chunk_reserved) { 3794 /* 3795 * We may be relocating the only data chunk we have, 3796 * which could potentially end up with losing data's 3797 * raid profile, so lets allocate an empty one in 3798 * advance. 3799 */ 3800 ret = btrfs_may_alloc_data_chunk(fs_info, 3801 found_key.offset); 3802 if (ret < 0) { 3803 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3804 goto error; 3805 } else if (ret == 1) { 3806 chunk_reserved = 1; 3807 } 3808 } 3809 3810 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3811 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3812 if (ret == -ENOSPC) { 3813 enospc_errors++; 3814 } else if (ret == -ETXTBSY) { 3815 btrfs_info(fs_info, 3816 "skipping relocation of block group %llu due to active swapfile", 3817 found_key.offset); 3818 ret = 0; 3819 } else if (ret) { 3820 goto error; 3821 } else { 3822 spin_lock(&fs_info->balance_lock); 3823 bctl->stat.completed++; 3824 spin_unlock(&fs_info->balance_lock); 3825 } 3826 loop: 3827 if (found_key.offset == 0) 3828 break; 3829 key.offset = found_key.offset - 1; 3830 } 3831 3832 if (counting) { 3833 btrfs_release_path(path); 3834 counting = false; 3835 goto again; 3836 } 3837 error: 3838 btrfs_free_path(path); 3839 if (enospc_errors) { 3840 btrfs_info(fs_info, "%d enospc errors during balance", 3841 enospc_errors); 3842 if (!ret) 3843 ret = -ENOSPC; 3844 } 3845 3846 return ret; 3847 } 3848 3849 /** 3850 * alloc_profile_is_valid - see if a given profile is valid and reduced 3851 * @flags: profile to validate 3852 * @extended: if true @flags is treated as an extended profile 3853 */ 3854 static int alloc_profile_is_valid(u64 flags, int extended) 3855 { 3856 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3857 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3858 3859 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3860 3861 /* 1) check that all other bits are zeroed */ 3862 if (flags & ~mask) 3863 return 0; 3864 3865 /* 2) see if profile is reduced */ 3866 if (flags == 0) 3867 return !extended; /* "0" is valid for usual profiles */ 3868 3869 /* true if exactly one bit set */ 3870 return is_power_of_2(flags); 3871 } 3872 3873 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3874 { 3875 /* cancel requested || normal exit path */ 3876 return atomic_read(&fs_info->balance_cancel_req) || 3877 (atomic_read(&fs_info->balance_pause_req) == 0 && 3878 atomic_read(&fs_info->balance_cancel_req) == 0); 3879 } 3880 3881 /* Non-zero return value signifies invalidity */ 3882 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, 3883 u64 allowed) 3884 { 3885 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && 3886 (!alloc_profile_is_valid(bctl_arg->target, 1) || 3887 (bctl_arg->target & ~allowed))); 3888 } 3889 3890 /* 3891 * Fill @buf with textual description of balance filter flags @bargs, up to 3892 * @size_buf including the terminating null. The output may be trimmed if it 3893 * does not fit into the provided buffer. 3894 */ 3895 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 3896 u32 size_buf) 3897 { 3898 int ret; 3899 u32 size_bp = size_buf; 3900 char *bp = buf; 3901 u64 flags = bargs->flags; 3902 char tmp_buf[128] = {'\0'}; 3903 3904 if (!flags) 3905 return; 3906 3907 #define CHECK_APPEND_NOARG(a) \ 3908 do { \ 3909 ret = snprintf(bp, size_bp, (a)); \ 3910 if (ret < 0 || ret >= size_bp) \ 3911 goto out_overflow; \ 3912 size_bp -= ret; \ 3913 bp += ret; \ 3914 } while (0) 3915 3916 #define CHECK_APPEND_1ARG(a, v1) \ 3917 do { \ 3918 ret = snprintf(bp, size_bp, (a), (v1)); \ 3919 if (ret < 0 || ret >= size_bp) \ 3920 goto out_overflow; \ 3921 size_bp -= ret; \ 3922 bp += ret; \ 3923 } while (0) 3924 3925 #define CHECK_APPEND_2ARG(a, v1, v2) \ 3926 do { \ 3927 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 3928 if (ret < 0 || ret >= size_bp) \ 3929 goto out_overflow; \ 3930 size_bp -= ret; \ 3931 bp += ret; \ 3932 } while (0) 3933 3934 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 3935 CHECK_APPEND_1ARG("convert=%s,", 3936 btrfs_bg_type_to_raid_name(bargs->target)); 3937 3938 if (flags & BTRFS_BALANCE_ARGS_SOFT) 3939 CHECK_APPEND_NOARG("soft,"); 3940 3941 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 3942 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 3943 sizeof(tmp_buf)); 3944 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 3945 } 3946 3947 if (flags & BTRFS_BALANCE_ARGS_USAGE) 3948 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 3949 3950 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 3951 CHECK_APPEND_2ARG("usage=%u..%u,", 3952 bargs->usage_min, bargs->usage_max); 3953 3954 if (flags & BTRFS_BALANCE_ARGS_DEVID) 3955 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 3956 3957 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 3958 CHECK_APPEND_2ARG("drange=%llu..%llu,", 3959 bargs->pstart, bargs->pend); 3960 3961 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 3962 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 3963 bargs->vstart, bargs->vend); 3964 3965 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 3966 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 3967 3968 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 3969 CHECK_APPEND_2ARG("limit=%u..%u,", 3970 bargs->limit_min, bargs->limit_max); 3971 3972 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 3973 CHECK_APPEND_2ARG("stripes=%u..%u,", 3974 bargs->stripes_min, bargs->stripes_max); 3975 3976 #undef CHECK_APPEND_2ARG 3977 #undef CHECK_APPEND_1ARG 3978 #undef CHECK_APPEND_NOARG 3979 3980 out_overflow: 3981 3982 if (size_bp < size_buf) 3983 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 3984 else 3985 buf[0] = '\0'; 3986 } 3987 3988 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 3989 { 3990 u32 size_buf = 1024; 3991 char tmp_buf[192] = {'\0'}; 3992 char *buf; 3993 char *bp; 3994 u32 size_bp = size_buf; 3995 int ret; 3996 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3997 3998 buf = kzalloc(size_buf, GFP_KERNEL); 3999 if (!buf) 4000 return; 4001 4002 bp = buf; 4003 4004 #define CHECK_APPEND_1ARG(a, v1) \ 4005 do { \ 4006 ret = snprintf(bp, size_bp, (a), (v1)); \ 4007 if (ret < 0 || ret >= size_bp) \ 4008 goto out_overflow; \ 4009 size_bp -= ret; \ 4010 bp += ret; \ 4011 } while (0) 4012 4013 if (bctl->flags & BTRFS_BALANCE_FORCE) 4014 CHECK_APPEND_1ARG("%s", "-f "); 4015 4016 if (bctl->flags & BTRFS_BALANCE_DATA) { 4017 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4018 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4019 } 4020 4021 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4022 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4023 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4024 } 4025 4026 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4027 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4028 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4029 } 4030 4031 #undef CHECK_APPEND_1ARG 4032 4033 out_overflow: 4034 4035 if (size_bp < size_buf) 4036 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4037 btrfs_info(fs_info, "balance: %s %s", 4038 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4039 "resume" : "start", buf); 4040 4041 kfree(buf); 4042 } 4043 4044 /* 4045 * Should be called with balance mutexe held 4046 */ 4047 int btrfs_balance(struct btrfs_fs_info *fs_info, 4048 struct btrfs_balance_control *bctl, 4049 struct btrfs_ioctl_balance_args *bargs) 4050 { 4051 u64 meta_target, data_target; 4052 u64 allowed; 4053 int mixed = 0; 4054 int ret; 4055 u64 num_devices; 4056 unsigned seq; 4057 bool reducing_integrity; 4058 int i; 4059 4060 if (btrfs_fs_closing(fs_info) || 4061 atomic_read(&fs_info->balance_pause_req) || 4062 atomic_read(&fs_info->balance_cancel_req)) { 4063 ret = -EINVAL; 4064 goto out; 4065 } 4066 4067 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4068 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4069 mixed = 1; 4070 4071 /* 4072 * In case of mixed groups both data and meta should be picked, 4073 * and identical options should be given for both of them. 4074 */ 4075 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4076 if (mixed && (bctl->flags & allowed)) { 4077 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4078 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4079 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4080 btrfs_err(fs_info, 4081 "balance: mixed groups data and metadata options must be the same"); 4082 ret = -EINVAL; 4083 goto out; 4084 } 4085 } 4086 4087 num_devices = btrfs_num_devices(fs_info); 4088 allowed = 0; 4089 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4090 if (num_devices >= btrfs_raid_array[i].devs_min) 4091 allowed |= btrfs_raid_array[i].bg_flag; 4092 4093 if (validate_convert_profile(&bctl->data, allowed)) { 4094 btrfs_err(fs_info, 4095 "balance: invalid convert data profile %s", 4096 btrfs_bg_type_to_raid_name(bctl->data.target)); 4097 ret = -EINVAL; 4098 goto out; 4099 } 4100 if (validate_convert_profile(&bctl->meta, allowed)) { 4101 btrfs_err(fs_info, 4102 "balance: invalid convert metadata profile %s", 4103 btrfs_bg_type_to_raid_name(bctl->meta.target)); 4104 ret = -EINVAL; 4105 goto out; 4106 } 4107 if (validate_convert_profile(&bctl->sys, allowed)) { 4108 btrfs_err(fs_info, 4109 "balance: invalid convert system profile %s", 4110 btrfs_bg_type_to_raid_name(bctl->sys.target)); 4111 ret = -EINVAL; 4112 goto out; 4113 } 4114 4115 /* 4116 * Allow to reduce metadata or system integrity only if force set for 4117 * profiles with redundancy (copies, parity) 4118 */ 4119 allowed = 0; 4120 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4121 if (btrfs_raid_array[i].ncopies >= 2 || 4122 btrfs_raid_array[i].tolerated_failures >= 1) 4123 allowed |= btrfs_raid_array[i].bg_flag; 4124 } 4125 do { 4126 seq = read_seqbegin(&fs_info->profiles_lock); 4127 4128 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4129 (fs_info->avail_system_alloc_bits & allowed) && 4130 !(bctl->sys.target & allowed)) || 4131 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4132 (fs_info->avail_metadata_alloc_bits & allowed) && 4133 !(bctl->meta.target & allowed))) 4134 reducing_integrity = true; 4135 else 4136 reducing_integrity = false; 4137 4138 /* if we're not converting, the target field is uninitialized */ 4139 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4140 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4141 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4142 bctl->data.target : fs_info->avail_data_alloc_bits; 4143 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4144 4145 if (reducing_integrity) { 4146 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4147 btrfs_info(fs_info, 4148 "balance: force reducing metadata integrity"); 4149 } else { 4150 btrfs_err(fs_info, 4151 "balance: reduces metadata integrity, use --force if you want this"); 4152 ret = -EINVAL; 4153 goto out; 4154 } 4155 } 4156 4157 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4158 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4159 btrfs_warn(fs_info, 4160 "balance: metadata profile %s has lower redundancy than data profile %s", 4161 btrfs_bg_type_to_raid_name(meta_target), 4162 btrfs_bg_type_to_raid_name(data_target)); 4163 } 4164 4165 if (fs_info->send_in_progress) { 4166 btrfs_warn_rl(fs_info, 4167 "cannot run balance while send operations are in progress (%d in progress)", 4168 fs_info->send_in_progress); 4169 ret = -EAGAIN; 4170 goto out; 4171 } 4172 4173 ret = insert_balance_item(fs_info, bctl); 4174 if (ret && ret != -EEXIST) 4175 goto out; 4176 4177 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4178 BUG_ON(ret == -EEXIST); 4179 BUG_ON(fs_info->balance_ctl); 4180 spin_lock(&fs_info->balance_lock); 4181 fs_info->balance_ctl = bctl; 4182 spin_unlock(&fs_info->balance_lock); 4183 } else { 4184 BUG_ON(ret != -EEXIST); 4185 spin_lock(&fs_info->balance_lock); 4186 update_balance_args(bctl); 4187 spin_unlock(&fs_info->balance_lock); 4188 } 4189 4190 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4191 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4192 describe_balance_start_or_resume(fs_info); 4193 mutex_unlock(&fs_info->balance_mutex); 4194 4195 ret = __btrfs_balance(fs_info); 4196 4197 mutex_lock(&fs_info->balance_mutex); 4198 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) 4199 btrfs_info(fs_info, "balance: paused"); 4200 else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req)) 4201 btrfs_info(fs_info, "balance: canceled"); 4202 else 4203 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4204 4205 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4206 4207 if (bargs) { 4208 memset(bargs, 0, sizeof(*bargs)); 4209 btrfs_update_ioctl_balance_args(fs_info, bargs); 4210 } 4211 4212 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4213 balance_need_close(fs_info)) { 4214 reset_balance_state(fs_info); 4215 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4216 } 4217 4218 wake_up(&fs_info->balance_wait_q); 4219 4220 return ret; 4221 out: 4222 if (bctl->flags & BTRFS_BALANCE_RESUME) 4223 reset_balance_state(fs_info); 4224 else 4225 kfree(bctl); 4226 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4227 4228 return ret; 4229 } 4230 4231 static int balance_kthread(void *data) 4232 { 4233 struct btrfs_fs_info *fs_info = data; 4234 int ret = 0; 4235 4236 mutex_lock(&fs_info->balance_mutex); 4237 if (fs_info->balance_ctl) 4238 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4239 mutex_unlock(&fs_info->balance_mutex); 4240 4241 return ret; 4242 } 4243 4244 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4245 { 4246 struct task_struct *tsk; 4247 4248 mutex_lock(&fs_info->balance_mutex); 4249 if (!fs_info->balance_ctl) { 4250 mutex_unlock(&fs_info->balance_mutex); 4251 return 0; 4252 } 4253 mutex_unlock(&fs_info->balance_mutex); 4254 4255 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4256 btrfs_info(fs_info, "balance: resume skipped"); 4257 return 0; 4258 } 4259 4260 /* 4261 * A ro->rw remount sequence should continue with the paused balance 4262 * regardless of who pauses it, system or the user as of now, so set 4263 * the resume flag. 4264 */ 4265 spin_lock(&fs_info->balance_lock); 4266 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4267 spin_unlock(&fs_info->balance_lock); 4268 4269 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4270 return PTR_ERR_OR_ZERO(tsk); 4271 } 4272 4273 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4274 { 4275 struct btrfs_balance_control *bctl; 4276 struct btrfs_balance_item *item; 4277 struct btrfs_disk_balance_args disk_bargs; 4278 struct btrfs_path *path; 4279 struct extent_buffer *leaf; 4280 struct btrfs_key key; 4281 int ret; 4282 4283 path = btrfs_alloc_path(); 4284 if (!path) 4285 return -ENOMEM; 4286 4287 key.objectid = BTRFS_BALANCE_OBJECTID; 4288 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4289 key.offset = 0; 4290 4291 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4292 if (ret < 0) 4293 goto out; 4294 if (ret > 0) { /* ret = -ENOENT; */ 4295 ret = 0; 4296 goto out; 4297 } 4298 4299 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4300 if (!bctl) { 4301 ret = -ENOMEM; 4302 goto out; 4303 } 4304 4305 leaf = path->nodes[0]; 4306 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4307 4308 bctl->flags = btrfs_balance_flags(leaf, item); 4309 bctl->flags |= BTRFS_BALANCE_RESUME; 4310 4311 btrfs_balance_data(leaf, item, &disk_bargs); 4312 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4313 btrfs_balance_meta(leaf, item, &disk_bargs); 4314 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4315 btrfs_balance_sys(leaf, item, &disk_bargs); 4316 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4317 4318 /* 4319 * This should never happen, as the paused balance state is recovered 4320 * during mount without any chance of other exclusive ops to collide. 4321 * 4322 * This gives the exclusive op status to balance and keeps in paused 4323 * state until user intervention (cancel or umount). If the ownership 4324 * cannot be assigned, show a message but do not fail. The balance 4325 * is in a paused state and must have fs_info::balance_ctl properly 4326 * set up. 4327 */ 4328 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) 4329 btrfs_warn(fs_info, 4330 "balance: cannot set exclusive op status, resume manually"); 4331 4332 mutex_lock(&fs_info->balance_mutex); 4333 BUG_ON(fs_info->balance_ctl); 4334 spin_lock(&fs_info->balance_lock); 4335 fs_info->balance_ctl = bctl; 4336 spin_unlock(&fs_info->balance_lock); 4337 mutex_unlock(&fs_info->balance_mutex); 4338 out: 4339 btrfs_free_path(path); 4340 return ret; 4341 } 4342 4343 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4344 { 4345 int ret = 0; 4346 4347 mutex_lock(&fs_info->balance_mutex); 4348 if (!fs_info->balance_ctl) { 4349 mutex_unlock(&fs_info->balance_mutex); 4350 return -ENOTCONN; 4351 } 4352 4353 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4354 atomic_inc(&fs_info->balance_pause_req); 4355 mutex_unlock(&fs_info->balance_mutex); 4356 4357 wait_event(fs_info->balance_wait_q, 4358 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4359 4360 mutex_lock(&fs_info->balance_mutex); 4361 /* we are good with balance_ctl ripped off from under us */ 4362 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4363 atomic_dec(&fs_info->balance_pause_req); 4364 } else { 4365 ret = -ENOTCONN; 4366 } 4367 4368 mutex_unlock(&fs_info->balance_mutex); 4369 return ret; 4370 } 4371 4372 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4373 { 4374 mutex_lock(&fs_info->balance_mutex); 4375 if (!fs_info->balance_ctl) { 4376 mutex_unlock(&fs_info->balance_mutex); 4377 return -ENOTCONN; 4378 } 4379 4380 /* 4381 * A paused balance with the item stored on disk can be resumed at 4382 * mount time if the mount is read-write. Otherwise it's still paused 4383 * and we must not allow cancelling as it deletes the item. 4384 */ 4385 if (sb_rdonly(fs_info->sb)) { 4386 mutex_unlock(&fs_info->balance_mutex); 4387 return -EROFS; 4388 } 4389 4390 atomic_inc(&fs_info->balance_cancel_req); 4391 /* 4392 * if we are running just wait and return, balance item is 4393 * deleted in btrfs_balance in this case 4394 */ 4395 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4396 mutex_unlock(&fs_info->balance_mutex); 4397 wait_event(fs_info->balance_wait_q, 4398 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4399 mutex_lock(&fs_info->balance_mutex); 4400 } else { 4401 mutex_unlock(&fs_info->balance_mutex); 4402 /* 4403 * Lock released to allow other waiters to continue, we'll 4404 * reexamine the status again. 4405 */ 4406 mutex_lock(&fs_info->balance_mutex); 4407 4408 if (fs_info->balance_ctl) { 4409 reset_balance_state(fs_info); 4410 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4411 btrfs_info(fs_info, "balance: canceled"); 4412 } 4413 } 4414 4415 BUG_ON(fs_info->balance_ctl || 4416 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4417 atomic_dec(&fs_info->balance_cancel_req); 4418 mutex_unlock(&fs_info->balance_mutex); 4419 return 0; 4420 } 4421 4422 static int btrfs_uuid_scan_kthread(void *data) 4423 { 4424 struct btrfs_fs_info *fs_info = data; 4425 struct btrfs_root *root = fs_info->tree_root; 4426 struct btrfs_key key; 4427 struct btrfs_path *path = NULL; 4428 int ret = 0; 4429 struct extent_buffer *eb; 4430 int slot; 4431 struct btrfs_root_item root_item; 4432 u32 item_size; 4433 struct btrfs_trans_handle *trans = NULL; 4434 4435 path = btrfs_alloc_path(); 4436 if (!path) { 4437 ret = -ENOMEM; 4438 goto out; 4439 } 4440 4441 key.objectid = 0; 4442 key.type = BTRFS_ROOT_ITEM_KEY; 4443 key.offset = 0; 4444 4445 while (1) { 4446 ret = btrfs_search_forward(root, &key, path, 4447 BTRFS_OLDEST_GENERATION); 4448 if (ret) { 4449 if (ret > 0) 4450 ret = 0; 4451 break; 4452 } 4453 4454 if (key.type != BTRFS_ROOT_ITEM_KEY || 4455 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4456 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4457 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4458 goto skip; 4459 4460 eb = path->nodes[0]; 4461 slot = path->slots[0]; 4462 item_size = btrfs_item_size_nr(eb, slot); 4463 if (item_size < sizeof(root_item)) 4464 goto skip; 4465 4466 read_extent_buffer(eb, &root_item, 4467 btrfs_item_ptr_offset(eb, slot), 4468 (int)sizeof(root_item)); 4469 if (btrfs_root_refs(&root_item) == 0) 4470 goto skip; 4471 4472 if (!btrfs_is_empty_uuid(root_item.uuid) || 4473 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4474 if (trans) 4475 goto update_tree; 4476 4477 btrfs_release_path(path); 4478 /* 4479 * 1 - subvol uuid item 4480 * 1 - received_subvol uuid item 4481 */ 4482 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4483 if (IS_ERR(trans)) { 4484 ret = PTR_ERR(trans); 4485 break; 4486 } 4487 continue; 4488 } else { 4489 goto skip; 4490 } 4491 update_tree: 4492 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4493 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4494 BTRFS_UUID_KEY_SUBVOL, 4495 key.objectid); 4496 if (ret < 0) { 4497 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4498 ret); 4499 break; 4500 } 4501 } 4502 4503 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4504 ret = btrfs_uuid_tree_add(trans, 4505 root_item.received_uuid, 4506 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4507 key.objectid); 4508 if (ret < 0) { 4509 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4510 ret); 4511 break; 4512 } 4513 } 4514 4515 skip: 4516 if (trans) { 4517 ret = btrfs_end_transaction(trans); 4518 trans = NULL; 4519 if (ret) 4520 break; 4521 } 4522 4523 btrfs_release_path(path); 4524 if (key.offset < (u64)-1) { 4525 key.offset++; 4526 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4527 key.offset = 0; 4528 key.type = BTRFS_ROOT_ITEM_KEY; 4529 } else if (key.objectid < (u64)-1) { 4530 key.offset = 0; 4531 key.type = BTRFS_ROOT_ITEM_KEY; 4532 key.objectid++; 4533 } else { 4534 break; 4535 } 4536 cond_resched(); 4537 } 4538 4539 out: 4540 btrfs_free_path(path); 4541 if (trans && !IS_ERR(trans)) 4542 btrfs_end_transaction(trans); 4543 if (ret) 4544 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4545 else 4546 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4547 up(&fs_info->uuid_tree_rescan_sem); 4548 return 0; 4549 } 4550 4551 /* 4552 * Callback for btrfs_uuid_tree_iterate(). 4553 * returns: 4554 * 0 check succeeded, the entry is not outdated. 4555 * < 0 if an error occurred. 4556 * > 0 if the check failed, which means the caller shall remove the entry. 4557 */ 4558 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 4559 u8 *uuid, u8 type, u64 subid) 4560 { 4561 struct btrfs_key key; 4562 int ret = 0; 4563 struct btrfs_root *subvol_root; 4564 4565 if (type != BTRFS_UUID_KEY_SUBVOL && 4566 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 4567 goto out; 4568 4569 key.objectid = subid; 4570 key.type = BTRFS_ROOT_ITEM_KEY; 4571 key.offset = (u64)-1; 4572 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 4573 if (IS_ERR(subvol_root)) { 4574 ret = PTR_ERR(subvol_root); 4575 if (ret == -ENOENT) 4576 ret = 1; 4577 goto out; 4578 } 4579 4580 switch (type) { 4581 case BTRFS_UUID_KEY_SUBVOL: 4582 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 4583 ret = 1; 4584 break; 4585 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 4586 if (memcmp(uuid, subvol_root->root_item.received_uuid, 4587 BTRFS_UUID_SIZE)) 4588 ret = 1; 4589 break; 4590 } 4591 4592 out: 4593 return ret; 4594 } 4595 4596 static int btrfs_uuid_rescan_kthread(void *data) 4597 { 4598 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 4599 int ret; 4600 4601 /* 4602 * 1st step is to iterate through the existing UUID tree and 4603 * to delete all entries that contain outdated data. 4604 * 2nd step is to add all missing entries to the UUID tree. 4605 */ 4606 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 4607 if (ret < 0) { 4608 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 4609 up(&fs_info->uuid_tree_rescan_sem); 4610 return ret; 4611 } 4612 return btrfs_uuid_scan_kthread(data); 4613 } 4614 4615 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4616 { 4617 struct btrfs_trans_handle *trans; 4618 struct btrfs_root *tree_root = fs_info->tree_root; 4619 struct btrfs_root *uuid_root; 4620 struct task_struct *task; 4621 int ret; 4622 4623 /* 4624 * 1 - root node 4625 * 1 - root item 4626 */ 4627 trans = btrfs_start_transaction(tree_root, 2); 4628 if (IS_ERR(trans)) 4629 return PTR_ERR(trans); 4630 4631 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4632 if (IS_ERR(uuid_root)) { 4633 ret = PTR_ERR(uuid_root); 4634 btrfs_abort_transaction(trans, ret); 4635 btrfs_end_transaction(trans); 4636 return ret; 4637 } 4638 4639 fs_info->uuid_root = uuid_root; 4640 4641 ret = btrfs_commit_transaction(trans); 4642 if (ret) 4643 return ret; 4644 4645 down(&fs_info->uuid_tree_rescan_sem); 4646 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4647 if (IS_ERR(task)) { 4648 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4649 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4650 up(&fs_info->uuid_tree_rescan_sem); 4651 return PTR_ERR(task); 4652 } 4653 4654 return 0; 4655 } 4656 4657 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 4658 { 4659 struct task_struct *task; 4660 4661 down(&fs_info->uuid_tree_rescan_sem); 4662 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 4663 if (IS_ERR(task)) { 4664 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4665 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 4666 up(&fs_info->uuid_tree_rescan_sem); 4667 return PTR_ERR(task); 4668 } 4669 4670 return 0; 4671 } 4672 4673 /* 4674 * shrinking a device means finding all of the device extents past 4675 * the new size, and then following the back refs to the chunks. 4676 * The chunk relocation code actually frees the device extent 4677 */ 4678 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4679 { 4680 struct btrfs_fs_info *fs_info = device->fs_info; 4681 struct btrfs_root *root = fs_info->dev_root; 4682 struct btrfs_trans_handle *trans; 4683 struct btrfs_dev_extent *dev_extent = NULL; 4684 struct btrfs_path *path; 4685 u64 length; 4686 u64 chunk_offset; 4687 int ret; 4688 int slot; 4689 int failed = 0; 4690 bool retried = false; 4691 struct extent_buffer *l; 4692 struct btrfs_key key; 4693 struct btrfs_super_block *super_copy = fs_info->super_copy; 4694 u64 old_total = btrfs_super_total_bytes(super_copy); 4695 u64 old_size = btrfs_device_get_total_bytes(device); 4696 u64 diff; 4697 u64 start; 4698 4699 new_size = round_down(new_size, fs_info->sectorsize); 4700 start = new_size; 4701 diff = round_down(old_size - new_size, fs_info->sectorsize); 4702 4703 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4704 return -EINVAL; 4705 4706 path = btrfs_alloc_path(); 4707 if (!path) 4708 return -ENOMEM; 4709 4710 path->reada = READA_BACK; 4711 4712 trans = btrfs_start_transaction(root, 0); 4713 if (IS_ERR(trans)) { 4714 btrfs_free_path(path); 4715 return PTR_ERR(trans); 4716 } 4717 4718 mutex_lock(&fs_info->chunk_mutex); 4719 4720 btrfs_device_set_total_bytes(device, new_size); 4721 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4722 device->fs_devices->total_rw_bytes -= diff; 4723 atomic64_sub(diff, &fs_info->free_chunk_space); 4724 } 4725 4726 /* 4727 * Once the device's size has been set to the new size, ensure all 4728 * in-memory chunks are synced to disk so that the loop below sees them 4729 * and relocates them accordingly. 4730 */ 4731 if (contains_pending_extent(device, &start, diff)) { 4732 mutex_unlock(&fs_info->chunk_mutex); 4733 ret = btrfs_commit_transaction(trans); 4734 if (ret) 4735 goto done; 4736 } else { 4737 mutex_unlock(&fs_info->chunk_mutex); 4738 btrfs_end_transaction(trans); 4739 } 4740 4741 again: 4742 key.objectid = device->devid; 4743 key.offset = (u64)-1; 4744 key.type = BTRFS_DEV_EXTENT_KEY; 4745 4746 do { 4747 mutex_lock(&fs_info->delete_unused_bgs_mutex); 4748 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4749 if (ret < 0) { 4750 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4751 goto done; 4752 } 4753 4754 ret = btrfs_previous_item(root, path, 0, key.type); 4755 if (ret) 4756 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4757 if (ret < 0) 4758 goto done; 4759 if (ret) { 4760 ret = 0; 4761 btrfs_release_path(path); 4762 break; 4763 } 4764 4765 l = path->nodes[0]; 4766 slot = path->slots[0]; 4767 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4768 4769 if (key.objectid != device->devid) { 4770 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4771 btrfs_release_path(path); 4772 break; 4773 } 4774 4775 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4776 length = btrfs_dev_extent_length(l, dev_extent); 4777 4778 if (key.offset + length <= new_size) { 4779 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4780 btrfs_release_path(path); 4781 break; 4782 } 4783 4784 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4785 btrfs_release_path(path); 4786 4787 /* 4788 * We may be relocating the only data chunk we have, 4789 * which could potentially end up with losing data's 4790 * raid profile, so lets allocate an empty one in 4791 * advance. 4792 */ 4793 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4794 if (ret < 0) { 4795 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4796 goto done; 4797 } 4798 4799 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4800 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4801 if (ret == -ENOSPC) { 4802 failed++; 4803 } else if (ret) { 4804 if (ret == -ETXTBSY) { 4805 btrfs_warn(fs_info, 4806 "could not shrink block group %llu due to active swapfile", 4807 chunk_offset); 4808 } 4809 goto done; 4810 } 4811 } while (key.offset-- > 0); 4812 4813 if (failed && !retried) { 4814 failed = 0; 4815 retried = true; 4816 goto again; 4817 } else if (failed && retried) { 4818 ret = -ENOSPC; 4819 goto done; 4820 } 4821 4822 /* Shrinking succeeded, else we would be at "done". */ 4823 trans = btrfs_start_transaction(root, 0); 4824 if (IS_ERR(trans)) { 4825 ret = PTR_ERR(trans); 4826 goto done; 4827 } 4828 4829 mutex_lock(&fs_info->chunk_mutex); 4830 btrfs_device_set_disk_total_bytes(device, new_size); 4831 if (list_empty(&device->post_commit_list)) 4832 list_add_tail(&device->post_commit_list, 4833 &trans->transaction->dev_update_list); 4834 4835 WARN_ON(diff > old_total); 4836 btrfs_set_super_total_bytes(super_copy, 4837 round_down(old_total - diff, fs_info->sectorsize)); 4838 mutex_unlock(&fs_info->chunk_mutex); 4839 4840 /* Now btrfs_update_device() will change the on-disk size. */ 4841 ret = btrfs_update_device(trans, device); 4842 if (ret < 0) { 4843 btrfs_abort_transaction(trans, ret); 4844 btrfs_end_transaction(trans); 4845 } else { 4846 ret = btrfs_commit_transaction(trans); 4847 } 4848 done: 4849 btrfs_free_path(path); 4850 if (ret) { 4851 mutex_lock(&fs_info->chunk_mutex); 4852 btrfs_device_set_total_bytes(device, old_size); 4853 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4854 device->fs_devices->total_rw_bytes += diff; 4855 atomic64_add(diff, &fs_info->free_chunk_space); 4856 mutex_unlock(&fs_info->chunk_mutex); 4857 } 4858 return ret; 4859 } 4860 4861 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4862 struct btrfs_key *key, 4863 struct btrfs_chunk *chunk, int item_size) 4864 { 4865 struct btrfs_super_block *super_copy = fs_info->super_copy; 4866 struct btrfs_disk_key disk_key; 4867 u32 array_size; 4868 u8 *ptr; 4869 4870 mutex_lock(&fs_info->chunk_mutex); 4871 array_size = btrfs_super_sys_array_size(super_copy); 4872 if (array_size + item_size + sizeof(disk_key) 4873 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4874 mutex_unlock(&fs_info->chunk_mutex); 4875 return -EFBIG; 4876 } 4877 4878 ptr = super_copy->sys_chunk_array + array_size; 4879 btrfs_cpu_key_to_disk(&disk_key, key); 4880 memcpy(ptr, &disk_key, sizeof(disk_key)); 4881 ptr += sizeof(disk_key); 4882 memcpy(ptr, chunk, item_size); 4883 item_size += sizeof(disk_key); 4884 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4885 mutex_unlock(&fs_info->chunk_mutex); 4886 4887 return 0; 4888 } 4889 4890 /* 4891 * sort the devices in descending order by max_avail, total_avail 4892 */ 4893 static int btrfs_cmp_device_info(const void *a, const void *b) 4894 { 4895 const struct btrfs_device_info *di_a = a; 4896 const struct btrfs_device_info *di_b = b; 4897 4898 if (di_a->max_avail > di_b->max_avail) 4899 return -1; 4900 if (di_a->max_avail < di_b->max_avail) 4901 return 1; 4902 if (di_a->total_avail > di_b->total_avail) 4903 return -1; 4904 if (di_a->total_avail < di_b->total_avail) 4905 return 1; 4906 return 0; 4907 } 4908 4909 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4910 { 4911 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4912 return; 4913 4914 btrfs_set_fs_incompat(info, RAID56); 4915 } 4916 4917 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4918 u64 start, u64 type) 4919 { 4920 struct btrfs_fs_info *info = trans->fs_info; 4921 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4922 struct btrfs_device *device; 4923 struct map_lookup *map = NULL; 4924 struct extent_map_tree *em_tree; 4925 struct extent_map *em; 4926 struct btrfs_device_info *devices_info = NULL; 4927 u64 total_avail; 4928 int num_stripes; /* total number of stripes to allocate */ 4929 int data_stripes; /* number of stripes that count for 4930 block group size */ 4931 int sub_stripes; /* sub_stripes info for map */ 4932 int dev_stripes; /* stripes per dev */ 4933 int devs_max; /* max devs to use */ 4934 int devs_min; /* min devs needed */ 4935 int devs_increment; /* ndevs has to be a multiple of this */ 4936 int ncopies; /* how many copies to data has */ 4937 int nparity; /* number of stripes worth of bytes to 4938 store parity information */ 4939 int ret; 4940 u64 max_stripe_size; 4941 u64 max_chunk_size; 4942 u64 stripe_size; 4943 u64 chunk_size; 4944 int ndevs; 4945 int i; 4946 int j; 4947 int index; 4948 4949 BUG_ON(!alloc_profile_is_valid(type, 0)); 4950 4951 if (list_empty(&fs_devices->alloc_list)) { 4952 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4953 btrfs_debug(info, "%s: no writable device", __func__); 4954 return -ENOSPC; 4955 } 4956 4957 index = btrfs_bg_flags_to_raid_index(type); 4958 4959 sub_stripes = btrfs_raid_array[index].sub_stripes; 4960 dev_stripes = btrfs_raid_array[index].dev_stripes; 4961 devs_max = btrfs_raid_array[index].devs_max; 4962 if (!devs_max) 4963 devs_max = BTRFS_MAX_DEVS(info); 4964 devs_min = btrfs_raid_array[index].devs_min; 4965 devs_increment = btrfs_raid_array[index].devs_increment; 4966 ncopies = btrfs_raid_array[index].ncopies; 4967 nparity = btrfs_raid_array[index].nparity; 4968 4969 if (type & BTRFS_BLOCK_GROUP_DATA) { 4970 max_stripe_size = SZ_1G; 4971 max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; 4972 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4973 /* for larger filesystems, use larger metadata chunks */ 4974 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4975 max_stripe_size = SZ_1G; 4976 else 4977 max_stripe_size = SZ_256M; 4978 max_chunk_size = max_stripe_size; 4979 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4980 max_stripe_size = SZ_32M; 4981 max_chunk_size = 2 * max_stripe_size; 4982 } else { 4983 btrfs_err(info, "invalid chunk type 0x%llx requested", 4984 type); 4985 BUG(); 4986 } 4987 4988 /* We don't want a chunk larger than 10% of writable space */ 4989 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4990 max_chunk_size); 4991 4992 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4993 GFP_NOFS); 4994 if (!devices_info) 4995 return -ENOMEM; 4996 4997 /* 4998 * in the first pass through the devices list, we gather information 4999 * about the available holes on each device. 5000 */ 5001 ndevs = 0; 5002 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5003 u64 max_avail; 5004 u64 dev_offset; 5005 5006 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5007 WARN(1, KERN_ERR 5008 "BTRFS: read-only device in alloc_list\n"); 5009 continue; 5010 } 5011 5012 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5013 &device->dev_state) || 5014 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5015 continue; 5016 5017 if (device->total_bytes > device->bytes_used) 5018 total_avail = device->total_bytes - device->bytes_used; 5019 else 5020 total_avail = 0; 5021 5022 /* If there is no space on this device, skip it. */ 5023 if (total_avail == 0) 5024 continue; 5025 5026 ret = find_free_dev_extent(device, 5027 max_stripe_size * dev_stripes, 5028 &dev_offset, &max_avail); 5029 if (ret && ret != -ENOSPC) 5030 goto error; 5031 5032 if (ret == 0) 5033 max_avail = max_stripe_size * dev_stripes; 5034 5035 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) { 5036 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5037 btrfs_debug(info, 5038 "%s: devid %llu has no free space, have=%llu want=%u", 5039 __func__, device->devid, max_avail, 5040 BTRFS_STRIPE_LEN * dev_stripes); 5041 continue; 5042 } 5043 5044 if (ndevs == fs_devices->rw_devices) { 5045 WARN(1, "%s: found more than %llu devices\n", 5046 __func__, fs_devices->rw_devices); 5047 break; 5048 } 5049 devices_info[ndevs].dev_offset = dev_offset; 5050 devices_info[ndevs].max_avail = max_avail; 5051 devices_info[ndevs].total_avail = total_avail; 5052 devices_info[ndevs].dev = device; 5053 ++ndevs; 5054 } 5055 5056 /* 5057 * now sort the devices by hole size / available space 5058 */ 5059 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5060 btrfs_cmp_device_info, NULL); 5061 5062 /* round down to number of usable stripes */ 5063 ndevs = round_down(ndevs, devs_increment); 5064 5065 if (ndevs < devs_min) { 5066 ret = -ENOSPC; 5067 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5068 btrfs_debug(info, 5069 "%s: not enough devices with free space: have=%d minimum required=%d", 5070 __func__, ndevs, devs_min); 5071 } 5072 goto error; 5073 } 5074 5075 ndevs = min(ndevs, devs_max); 5076 5077 /* 5078 * The primary goal is to maximize the number of stripes, so use as 5079 * many devices as possible, even if the stripes are not maximum sized. 5080 * 5081 * The DUP profile stores more than one stripe per device, the 5082 * max_avail is the total size so we have to adjust. 5083 */ 5084 stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); 5085 num_stripes = ndevs * dev_stripes; 5086 5087 /* 5088 * this will have to be fixed for RAID1 and RAID10 over 5089 * more drives 5090 */ 5091 data_stripes = (num_stripes - nparity) / ncopies; 5092 5093 /* 5094 * Use the number of data stripes to figure out how big this chunk 5095 * is really going to be in terms of logical address space, 5096 * and compare that answer with the max chunk size. If it's higher, 5097 * we try to reduce stripe_size. 5098 */ 5099 if (stripe_size * data_stripes > max_chunk_size) { 5100 /* 5101 * Reduce stripe_size, round it up to a 16MB boundary again and 5102 * then use it, unless it ends up being even bigger than the 5103 * previous value we had already. 5104 */ 5105 stripe_size = min(round_up(div_u64(max_chunk_size, 5106 data_stripes), SZ_16M), 5107 stripe_size); 5108 } 5109 5110 /* align to BTRFS_STRIPE_LEN */ 5111 stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); 5112 5113 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 5114 if (!map) { 5115 ret = -ENOMEM; 5116 goto error; 5117 } 5118 map->num_stripes = num_stripes; 5119 5120 for (i = 0; i < ndevs; ++i) { 5121 for (j = 0; j < dev_stripes; ++j) { 5122 int s = i * dev_stripes + j; 5123 map->stripes[s].dev = devices_info[i].dev; 5124 map->stripes[s].physical = devices_info[i].dev_offset + 5125 j * stripe_size; 5126 } 5127 } 5128 map->stripe_len = BTRFS_STRIPE_LEN; 5129 map->io_align = BTRFS_STRIPE_LEN; 5130 map->io_width = BTRFS_STRIPE_LEN; 5131 map->type = type; 5132 map->sub_stripes = sub_stripes; 5133 5134 chunk_size = stripe_size * data_stripes; 5135 5136 trace_btrfs_chunk_alloc(info, map, start, chunk_size); 5137 5138 em = alloc_extent_map(); 5139 if (!em) { 5140 kfree(map); 5141 ret = -ENOMEM; 5142 goto error; 5143 } 5144 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5145 em->map_lookup = map; 5146 em->start = start; 5147 em->len = chunk_size; 5148 em->block_start = 0; 5149 em->block_len = em->len; 5150 em->orig_block_len = stripe_size; 5151 5152 em_tree = &info->mapping_tree; 5153 write_lock(&em_tree->lock); 5154 ret = add_extent_mapping(em_tree, em, 0); 5155 if (ret) { 5156 write_unlock(&em_tree->lock); 5157 free_extent_map(em); 5158 goto error; 5159 } 5160 write_unlock(&em_tree->lock); 5161 5162 ret = btrfs_make_block_group(trans, 0, type, start, chunk_size); 5163 if (ret) 5164 goto error_del_extent; 5165 5166 for (i = 0; i < map->num_stripes; i++) { 5167 struct btrfs_device *dev = map->stripes[i].dev; 5168 5169 btrfs_device_set_bytes_used(dev, dev->bytes_used + stripe_size); 5170 if (list_empty(&dev->post_commit_list)) 5171 list_add_tail(&dev->post_commit_list, 5172 &trans->transaction->dev_update_list); 5173 } 5174 5175 atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); 5176 5177 free_extent_map(em); 5178 check_raid56_incompat_flag(info, type); 5179 5180 kfree(devices_info); 5181 return 0; 5182 5183 error_del_extent: 5184 write_lock(&em_tree->lock); 5185 remove_extent_mapping(em_tree, em); 5186 write_unlock(&em_tree->lock); 5187 5188 /* One for our allocation */ 5189 free_extent_map(em); 5190 /* One for the tree reference */ 5191 free_extent_map(em); 5192 error: 5193 kfree(devices_info); 5194 return ret; 5195 } 5196 5197 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 5198 u64 chunk_offset, u64 chunk_size) 5199 { 5200 struct btrfs_fs_info *fs_info = trans->fs_info; 5201 struct btrfs_root *extent_root = fs_info->extent_root; 5202 struct btrfs_root *chunk_root = fs_info->chunk_root; 5203 struct btrfs_key key; 5204 struct btrfs_device *device; 5205 struct btrfs_chunk *chunk; 5206 struct btrfs_stripe *stripe; 5207 struct extent_map *em; 5208 struct map_lookup *map; 5209 size_t item_size; 5210 u64 dev_offset; 5211 u64 stripe_size; 5212 int i = 0; 5213 int ret = 0; 5214 5215 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 5216 if (IS_ERR(em)) 5217 return PTR_ERR(em); 5218 5219 map = em->map_lookup; 5220 item_size = btrfs_chunk_item_size(map->num_stripes); 5221 stripe_size = em->orig_block_len; 5222 5223 chunk = kzalloc(item_size, GFP_NOFS); 5224 if (!chunk) { 5225 ret = -ENOMEM; 5226 goto out; 5227 } 5228 5229 /* 5230 * Take the device list mutex to prevent races with the final phase of 5231 * a device replace operation that replaces the device object associated 5232 * with the map's stripes, because the device object's id can change 5233 * at any time during that final phase of the device replace operation 5234 * (dev-replace.c:btrfs_dev_replace_finishing()). 5235 */ 5236 mutex_lock(&fs_info->fs_devices->device_list_mutex); 5237 for (i = 0; i < map->num_stripes; i++) { 5238 device = map->stripes[i].dev; 5239 dev_offset = map->stripes[i].physical; 5240 5241 ret = btrfs_update_device(trans, device); 5242 if (ret) 5243 break; 5244 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, 5245 dev_offset, stripe_size); 5246 if (ret) 5247 break; 5248 } 5249 if (ret) { 5250 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5251 goto out; 5252 } 5253 5254 stripe = &chunk->stripe; 5255 for (i = 0; i < map->num_stripes; i++) { 5256 device = map->stripes[i].dev; 5257 dev_offset = map->stripes[i].physical; 5258 5259 btrfs_set_stack_stripe_devid(stripe, device->devid); 5260 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5261 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5262 stripe++; 5263 } 5264 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5265 5266 btrfs_set_stack_chunk_length(chunk, chunk_size); 5267 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5268 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5269 btrfs_set_stack_chunk_type(chunk, map->type); 5270 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5271 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5272 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5273 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5274 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5275 5276 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5277 key.type = BTRFS_CHUNK_ITEM_KEY; 5278 key.offset = chunk_offset; 5279 5280 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5281 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5282 /* 5283 * TODO: Cleanup of inserted chunk root in case of 5284 * failure. 5285 */ 5286 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5287 } 5288 5289 out: 5290 kfree(chunk); 5291 free_extent_map(em); 5292 return ret; 5293 } 5294 5295 /* 5296 * Chunk allocation falls into two parts. The first part does work 5297 * that makes the new allocated chunk usable, but does not do any operation 5298 * that modifies the chunk tree. The second part does the work that 5299 * requires modifying the chunk tree. This division is important for the 5300 * bootstrap process of adding storage to a seed btrfs. 5301 */ 5302 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) 5303 { 5304 u64 chunk_offset; 5305 5306 lockdep_assert_held(&trans->fs_info->chunk_mutex); 5307 chunk_offset = find_next_chunk(trans->fs_info); 5308 return __btrfs_alloc_chunk(trans, chunk_offset, type); 5309 } 5310 5311 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5312 { 5313 struct btrfs_fs_info *fs_info = trans->fs_info; 5314 u64 chunk_offset; 5315 u64 sys_chunk_offset; 5316 u64 alloc_profile; 5317 int ret; 5318 5319 chunk_offset = find_next_chunk(fs_info); 5320 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5321 ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile); 5322 if (ret) 5323 return ret; 5324 5325 sys_chunk_offset = find_next_chunk(fs_info); 5326 alloc_profile = btrfs_system_alloc_profile(fs_info); 5327 ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile); 5328 return ret; 5329 } 5330 5331 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5332 { 5333 const int index = btrfs_bg_flags_to_raid_index(map->type); 5334 5335 return btrfs_raid_array[index].tolerated_failures; 5336 } 5337 5338 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5339 { 5340 struct extent_map *em; 5341 struct map_lookup *map; 5342 int readonly = 0; 5343 int miss_ndevs = 0; 5344 int i; 5345 5346 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5347 if (IS_ERR(em)) 5348 return 1; 5349 5350 map = em->map_lookup; 5351 for (i = 0; i < map->num_stripes; i++) { 5352 if (test_bit(BTRFS_DEV_STATE_MISSING, 5353 &map->stripes[i].dev->dev_state)) { 5354 miss_ndevs++; 5355 continue; 5356 } 5357 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5358 &map->stripes[i].dev->dev_state)) { 5359 readonly = 1; 5360 goto end; 5361 } 5362 } 5363 5364 /* 5365 * If the number of missing devices is larger than max errors, 5366 * we can not write the data into that chunk successfully, so 5367 * set it readonly. 5368 */ 5369 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5370 readonly = 1; 5371 end: 5372 free_extent_map(em); 5373 return readonly; 5374 } 5375 5376 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5377 { 5378 struct extent_map *em; 5379 5380 while (1) { 5381 write_lock(&tree->lock); 5382 em = lookup_extent_mapping(tree, 0, (u64)-1); 5383 if (em) 5384 remove_extent_mapping(tree, em); 5385 write_unlock(&tree->lock); 5386 if (!em) 5387 break; 5388 /* once for us */ 5389 free_extent_map(em); 5390 /* once for the tree */ 5391 free_extent_map(em); 5392 } 5393 } 5394 5395 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5396 { 5397 struct extent_map *em; 5398 struct map_lookup *map; 5399 int ret; 5400 5401 em = btrfs_get_chunk_map(fs_info, logical, len); 5402 if (IS_ERR(em)) 5403 /* 5404 * We could return errors for these cases, but that could get 5405 * ugly and we'd probably do the same thing which is just not do 5406 * anything else and exit, so return 1 so the callers don't try 5407 * to use other copies. 5408 */ 5409 return 1; 5410 5411 map = em->map_lookup; 5412 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) 5413 ret = map->num_stripes; 5414 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5415 ret = map->sub_stripes; 5416 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5417 ret = 2; 5418 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5419 /* 5420 * There could be two corrupted data stripes, we need 5421 * to loop retry in order to rebuild the correct data. 5422 * 5423 * Fail a stripe at a time on every retry except the 5424 * stripe under reconstruction. 5425 */ 5426 ret = map->num_stripes; 5427 else 5428 ret = 1; 5429 free_extent_map(em); 5430 5431 down_read(&fs_info->dev_replace.rwsem); 5432 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5433 fs_info->dev_replace.tgtdev) 5434 ret++; 5435 up_read(&fs_info->dev_replace.rwsem); 5436 5437 return ret; 5438 } 5439 5440 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5441 u64 logical) 5442 { 5443 struct extent_map *em; 5444 struct map_lookup *map; 5445 unsigned long len = fs_info->sectorsize; 5446 5447 em = btrfs_get_chunk_map(fs_info, logical, len); 5448 5449 if (!WARN_ON(IS_ERR(em))) { 5450 map = em->map_lookup; 5451 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5452 len = map->stripe_len * nr_data_stripes(map); 5453 free_extent_map(em); 5454 } 5455 return len; 5456 } 5457 5458 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5459 { 5460 struct extent_map *em; 5461 struct map_lookup *map; 5462 int ret = 0; 5463 5464 em = btrfs_get_chunk_map(fs_info, logical, len); 5465 5466 if(!WARN_ON(IS_ERR(em))) { 5467 map = em->map_lookup; 5468 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5469 ret = 1; 5470 free_extent_map(em); 5471 } 5472 return ret; 5473 } 5474 5475 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5476 struct map_lookup *map, int first, 5477 int dev_replace_is_ongoing) 5478 { 5479 int i; 5480 int num_stripes; 5481 int preferred_mirror; 5482 int tolerance; 5483 struct btrfs_device *srcdev; 5484 5485 ASSERT((map->type & 5486 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5487 5488 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5489 num_stripes = map->sub_stripes; 5490 else 5491 num_stripes = map->num_stripes; 5492 5493 preferred_mirror = first + current->pid % num_stripes; 5494 5495 if (dev_replace_is_ongoing && 5496 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5497 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5498 srcdev = fs_info->dev_replace.srcdev; 5499 else 5500 srcdev = NULL; 5501 5502 /* 5503 * try to avoid the drive that is the source drive for a 5504 * dev-replace procedure, only choose it if no other non-missing 5505 * mirror is available 5506 */ 5507 for (tolerance = 0; tolerance < 2; tolerance++) { 5508 if (map->stripes[preferred_mirror].dev->bdev && 5509 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5510 return preferred_mirror; 5511 for (i = first; i < first + num_stripes; i++) { 5512 if (map->stripes[i].dev->bdev && 5513 (tolerance || map->stripes[i].dev != srcdev)) 5514 return i; 5515 } 5516 } 5517 5518 /* we couldn't find one that doesn't fail. Just return something 5519 * and the io error handling code will clean up eventually 5520 */ 5521 return preferred_mirror; 5522 } 5523 5524 static inline int parity_smaller(u64 a, u64 b) 5525 { 5526 return a > b; 5527 } 5528 5529 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5530 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5531 { 5532 struct btrfs_bio_stripe s; 5533 int i; 5534 u64 l; 5535 int again = 1; 5536 5537 while (again) { 5538 again = 0; 5539 for (i = 0; i < num_stripes - 1; i++) { 5540 if (parity_smaller(bbio->raid_map[i], 5541 bbio->raid_map[i+1])) { 5542 s = bbio->stripes[i]; 5543 l = bbio->raid_map[i]; 5544 bbio->stripes[i] = bbio->stripes[i+1]; 5545 bbio->raid_map[i] = bbio->raid_map[i+1]; 5546 bbio->stripes[i+1] = s; 5547 bbio->raid_map[i+1] = l; 5548 5549 again = 1; 5550 } 5551 } 5552 } 5553 } 5554 5555 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5556 { 5557 struct btrfs_bio *bbio = kzalloc( 5558 /* the size of the btrfs_bio */ 5559 sizeof(struct btrfs_bio) + 5560 /* plus the variable array for the stripes */ 5561 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5562 /* plus the variable array for the tgt dev */ 5563 sizeof(int) * (real_stripes) + 5564 /* 5565 * plus the raid_map, which includes both the tgt dev 5566 * and the stripes 5567 */ 5568 sizeof(u64) * (total_stripes), 5569 GFP_NOFS|__GFP_NOFAIL); 5570 5571 atomic_set(&bbio->error, 0); 5572 refcount_set(&bbio->refs, 1); 5573 5574 return bbio; 5575 } 5576 5577 void btrfs_get_bbio(struct btrfs_bio *bbio) 5578 { 5579 WARN_ON(!refcount_read(&bbio->refs)); 5580 refcount_inc(&bbio->refs); 5581 } 5582 5583 void btrfs_put_bbio(struct btrfs_bio *bbio) 5584 { 5585 if (!bbio) 5586 return; 5587 if (refcount_dec_and_test(&bbio->refs)) 5588 kfree(bbio); 5589 } 5590 5591 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5592 /* 5593 * Please note that, discard won't be sent to target device of device 5594 * replace. 5595 */ 5596 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5597 u64 logical, u64 length, 5598 struct btrfs_bio **bbio_ret) 5599 { 5600 struct extent_map *em; 5601 struct map_lookup *map; 5602 struct btrfs_bio *bbio; 5603 u64 offset; 5604 u64 stripe_nr; 5605 u64 stripe_nr_end; 5606 u64 stripe_end_offset; 5607 u64 stripe_cnt; 5608 u64 stripe_len; 5609 u64 stripe_offset; 5610 u64 num_stripes; 5611 u32 stripe_index; 5612 u32 factor = 0; 5613 u32 sub_stripes = 0; 5614 u64 stripes_per_dev = 0; 5615 u32 remaining_stripes = 0; 5616 u32 last_stripe = 0; 5617 int ret = 0; 5618 int i; 5619 5620 /* discard always return a bbio */ 5621 ASSERT(bbio_ret); 5622 5623 em = btrfs_get_chunk_map(fs_info, logical, length); 5624 if (IS_ERR(em)) 5625 return PTR_ERR(em); 5626 5627 map = em->map_lookup; 5628 /* we don't discard raid56 yet */ 5629 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5630 ret = -EOPNOTSUPP; 5631 goto out; 5632 } 5633 5634 offset = logical - em->start; 5635 length = min_t(u64, em->len - offset, length); 5636 5637 stripe_len = map->stripe_len; 5638 /* 5639 * stripe_nr counts the total number of stripes we have to stride 5640 * to get to this block 5641 */ 5642 stripe_nr = div64_u64(offset, stripe_len); 5643 5644 /* stripe_offset is the offset of this block in its stripe */ 5645 stripe_offset = offset - stripe_nr * stripe_len; 5646 5647 stripe_nr_end = round_up(offset + length, map->stripe_len); 5648 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5649 stripe_cnt = stripe_nr_end - stripe_nr; 5650 stripe_end_offset = stripe_nr_end * map->stripe_len - 5651 (offset + length); 5652 /* 5653 * after this, stripe_nr is the number of stripes on this 5654 * device we have to walk to find the data, and stripe_index is 5655 * the number of our device in the stripe array 5656 */ 5657 num_stripes = 1; 5658 stripe_index = 0; 5659 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5660 BTRFS_BLOCK_GROUP_RAID10)) { 5661 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5662 sub_stripes = 1; 5663 else 5664 sub_stripes = map->sub_stripes; 5665 5666 factor = map->num_stripes / sub_stripes; 5667 num_stripes = min_t(u64, map->num_stripes, 5668 sub_stripes * stripe_cnt); 5669 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5670 stripe_index *= sub_stripes; 5671 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5672 &remaining_stripes); 5673 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5674 last_stripe *= sub_stripes; 5675 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 5676 BTRFS_BLOCK_GROUP_DUP)) { 5677 num_stripes = map->num_stripes; 5678 } else { 5679 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5680 &stripe_index); 5681 } 5682 5683 bbio = alloc_btrfs_bio(num_stripes, 0); 5684 if (!bbio) { 5685 ret = -ENOMEM; 5686 goto out; 5687 } 5688 5689 for (i = 0; i < num_stripes; i++) { 5690 bbio->stripes[i].physical = 5691 map->stripes[stripe_index].physical + 5692 stripe_offset + stripe_nr * map->stripe_len; 5693 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5694 5695 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5696 BTRFS_BLOCK_GROUP_RAID10)) { 5697 bbio->stripes[i].length = stripes_per_dev * 5698 map->stripe_len; 5699 5700 if (i / sub_stripes < remaining_stripes) 5701 bbio->stripes[i].length += 5702 map->stripe_len; 5703 5704 /* 5705 * Special for the first stripe and 5706 * the last stripe: 5707 * 5708 * |-------|...|-------| 5709 * |----------| 5710 * off end_off 5711 */ 5712 if (i < sub_stripes) 5713 bbio->stripes[i].length -= 5714 stripe_offset; 5715 5716 if (stripe_index >= last_stripe && 5717 stripe_index <= (last_stripe + 5718 sub_stripes - 1)) 5719 bbio->stripes[i].length -= 5720 stripe_end_offset; 5721 5722 if (i == sub_stripes - 1) 5723 stripe_offset = 0; 5724 } else { 5725 bbio->stripes[i].length = length; 5726 } 5727 5728 stripe_index++; 5729 if (stripe_index == map->num_stripes) { 5730 stripe_index = 0; 5731 stripe_nr++; 5732 } 5733 } 5734 5735 *bbio_ret = bbio; 5736 bbio->map_type = map->type; 5737 bbio->num_stripes = num_stripes; 5738 out: 5739 free_extent_map(em); 5740 return ret; 5741 } 5742 5743 /* 5744 * In dev-replace case, for repair case (that's the only case where the mirror 5745 * is selected explicitly when calling btrfs_map_block), blocks left of the 5746 * left cursor can also be read from the target drive. 5747 * 5748 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 5749 * array of stripes. 5750 * For READ, it also needs to be supported using the same mirror number. 5751 * 5752 * If the requested block is not left of the left cursor, EIO is returned. This 5753 * can happen because btrfs_num_copies() returns one more in the dev-replace 5754 * case. 5755 */ 5756 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 5757 u64 logical, u64 length, 5758 u64 srcdev_devid, int *mirror_num, 5759 u64 *physical) 5760 { 5761 struct btrfs_bio *bbio = NULL; 5762 int num_stripes; 5763 int index_srcdev = 0; 5764 int found = 0; 5765 u64 physical_of_found = 0; 5766 int i; 5767 int ret = 0; 5768 5769 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 5770 logical, &length, &bbio, 0, 0); 5771 if (ret) { 5772 ASSERT(bbio == NULL); 5773 return ret; 5774 } 5775 5776 num_stripes = bbio->num_stripes; 5777 if (*mirror_num > num_stripes) { 5778 /* 5779 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 5780 * that means that the requested area is not left of the left 5781 * cursor 5782 */ 5783 btrfs_put_bbio(bbio); 5784 return -EIO; 5785 } 5786 5787 /* 5788 * process the rest of the function using the mirror_num of the source 5789 * drive. Therefore look it up first. At the end, patch the device 5790 * pointer to the one of the target drive. 5791 */ 5792 for (i = 0; i < num_stripes; i++) { 5793 if (bbio->stripes[i].dev->devid != srcdev_devid) 5794 continue; 5795 5796 /* 5797 * In case of DUP, in order to keep it simple, only add the 5798 * mirror with the lowest physical address 5799 */ 5800 if (found && 5801 physical_of_found <= bbio->stripes[i].physical) 5802 continue; 5803 5804 index_srcdev = i; 5805 found = 1; 5806 physical_of_found = bbio->stripes[i].physical; 5807 } 5808 5809 btrfs_put_bbio(bbio); 5810 5811 ASSERT(found); 5812 if (!found) 5813 return -EIO; 5814 5815 *mirror_num = index_srcdev + 1; 5816 *physical = physical_of_found; 5817 return ret; 5818 } 5819 5820 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 5821 struct btrfs_bio **bbio_ret, 5822 struct btrfs_dev_replace *dev_replace, 5823 int *num_stripes_ret, int *max_errors_ret) 5824 { 5825 struct btrfs_bio *bbio = *bbio_ret; 5826 u64 srcdev_devid = dev_replace->srcdev->devid; 5827 int tgtdev_indexes = 0; 5828 int num_stripes = *num_stripes_ret; 5829 int max_errors = *max_errors_ret; 5830 int i; 5831 5832 if (op == BTRFS_MAP_WRITE) { 5833 int index_where_to_add; 5834 5835 /* 5836 * duplicate the write operations while the dev replace 5837 * procedure is running. Since the copying of the old disk to 5838 * the new disk takes place at run time while the filesystem is 5839 * mounted writable, the regular write operations to the old 5840 * disk have to be duplicated to go to the new disk as well. 5841 * 5842 * Note that device->missing is handled by the caller, and that 5843 * the write to the old disk is already set up in the stripes 5844 * array. 5845 */ 5846 index_where_to_add = num_stripes; 5847 for (i = 0; i < num_stripes; i++) { 5848 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5849 /* write to new disk, too */ 5850 struct btrfs_bio_stripe *new = 5851 bbio->stripes + index_where_to_add; 5852 struct btrfs_bio_stripe *old = 5853 bbio->stripes + i; 5854 5855 new->physical = old->physical; 5856 new->length = old->length; 5857 new->dev = dev_replace->tgtdev; 5858 bbio->tgtdev_map[i] = index_where_to_add; 5859 index_where_to_add++; 5860 max_errors++; 5861 tgtdev_indexes++; 5862 } 5863 } 5864 num_stripes = index_where_to_add; 5865 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 5866 int index_srcdev = 0; 5867 int found = 0; 5868 u64 physical_of_found = 0; 5869 5870 /* 5871 * During the dev-replace procedure, the target drive can also 5872 * be used to read data in case it is needed to repair a corrupt 5873 * block elsewhere. This is possible if the requested area is 5874 * left of the left cursor. In this area, the target drive is a 5875 * full copy of the source drive. 5876 */ 5877 for (i = 0; i < num_stripes; i++) { 5878 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5879 /* 5880 * In case of DUP, in order to keep it simple, 5881 * only add the mirror with the lowest physical 5882 * address 5883 */ 5884 if (found && 5885 physical_of_found <= 5886 bbio->stripes[i].physical) 5887 continue; 5888 index_srcdev = i; 5889 found = 1; 5890 physical_of_found = bbio->stripes[i].physical; 5891 } 5892 } 5893 if (found) { 5894 struct btrfs_bio_stripe *tgtdev_stripe = 5895 bbio->stripes + num_stripes; 5896 5897 tgtdev_stripe->physical = physical_of_found; 5898 tgtdev_stripe->length = 5899 bbio->stripes[index_srcdev].length; 5900 tgtdev_stripe->dev = dev_replace->tgtdev; 5901 bbio->tgtdev_map[index_srcdev] = num_stripes; 5902 5903 tgtdev_indexes++; 5904 num_stripes++; 5905 } 5906 } 5907 5908 *num_stripes_ret = num_stripes; 5909 *max_errors_ret = max_errors; 5910 bbio->num_tgtdevs = tgtdev_indexes; 5911 *bbio_ret = bbio; 5912 } 5913 5914 static bool need_full_stripe(enum btrfs_map_op op) 5915 { 5916 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 5917 } 5918 5919 /* 5920 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len) 5921 * tuple. This information is used to calculate how big a 5922 * particular bio can get before it straddles a stripe. 5923 * 5924 * @fs_info - the filesystem 5925 * @logical - address that we want to figure out the geometry of 5926 * @len - the length of IO we are going to perform, starting at @logical 5927 * @op - type of operation - write or read 5928 * @io_geom - pointer used to return values 5929 * 5930 * Returns < 0 in case a chunk for the given logical address cannot be found, 5931 * usually shouldn't happen unless @logical is corrupted, 0 otherwise. 5932 */ 5933 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5934 u64 logical, u64 len, struct btrfs_io_geometry *io_geom) 5935 { 5936 struct extent_map *em; 5937 struct map_lookup *map; 5938 u64 offset; 5939 u64 stripe_offset; 5940 u64 stripe_nr; 5941 u64 stripe_len; 5942 u64 raid56_full_stripe_start = (u64)-1; 5943 int data_stripes; 5944 int ret = 0; 5945 5946 ASSERT(op != BTRFS_MAP_DISCARD); 5947 5948 em = btrfs_get_chunk_map(fs_info, logical, len); 5949 if (IS_ERR(em)) 5950 return PTR_ERR(em); 5951 5952 map = em->map_lookup; 5953 /* Offset of this logical address in the chunk */ 5954 offset = logical - em->start; 5955 /* Len of a stripe in a chunk */ 5956 stripe_len = map->stripe_len; 5957 /* Stripe wher this block falls in */ 5958 stripe_nr = div64_u64(offset, stripe_len); 5959 /* Offset of stripe in the chunk */ 5960 stripe_offset = stripe_nr * stripe_len; 5961 if (offset < stripe_offset) { 5962 btrfs_crit(fs_info, 5963 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 5964 stripe_offset, offset, em->start, logical, stripe_len); 5965 ret = -EINVAL; 5966 goto out; 5967 } 5968 5969 /* stripe_offset is the offset of this block in its stripe */ 5970 stripe_offset = offset - stripe_offset; 5971 data_stripes = nr_data_stripes(map); 5972 5973 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5974 u64 max_len = stripe_len - stripe_offset; 5975 5976 /* 5977 * In case of raid56, we need to know the stripe aligned start 5978 */ 5979 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5980 unsigned long full_stripe_len = stripe_len * data_stripes; 5981 raid56_full_stripe_start = offset; 5982 5983 /* 5984 * Allow a write of a full stripe, but make sure we 5985 * don't allow straddling of stripes 5986 */ 5987 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5988 full_stripe_len); 5989 raid56_full_stripe_start *= full_stripe_len; 5990 5991 /* 5992 * For writes to RAID[56], allow a full stripeset across 5993 * all disks. For other RAID types and for RAID[56] 5994 * reads, just allow a single stripe (on a single disk). 5995 */ 5996 if (op == BTRFS_MAP_WRITE) { 5997 max_len = stripe_len * data_stripes - 5998 (offset - raid56_full_stripe_start); 5999 } 6000 } 6001 len = min_t(u64, em->len - offset, max_len); 6002 } else { 6003 len = em->len - offset; 6004 } 6005 6006 io_geom->len = len; 6007 io_geom->offset = offset; 6008 io_geom->stripe_len = stripe_len; 6009 io_geom->stripe_nr = stripe_nr; 6010 io_geom->stripe_offset = stripe_offset; 6011 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 6012 6013 out: 6014 /* once for us */ 6015 free_extent_map(em); 6016 return ret; 6017 } 6018 6019 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6020 enum btrfs_map_op op, 6021 u64 logical, u64 *length, 6022 struct btrfs_bio **bbio_ret, 6023 int mirror_num, int need_raid_map) 6024 { 6025 struct extent_map *em; 6026 struct map_lookup *map; 6027 u64 offset; 6028 u64 stripe_offset; 6029 u64 stripe_nr; 6030 u64 stripe_len; 6031 u32 stripe_index; 6032 int data_stripes; 6033 int i; 6034 int ret = 0; 6035 int num_stripes; 6036 int max_errors = 0; 6037 int tgtdev_indexes = 0; 6038 struct btrfs_bio *bbio = NULL; 6039 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6040 int dev_replace_is_ongoing = 0; 6041 int num_alloc_stripes; 6042 int patch_the_first_stripe_for_dev_replace = 0; 6043 u64 physical_to_patch_in_first_stripe = 0; 6044 u64 raid56_full_stripe_start = (u64)-1; 6045 struct btrfs_io_geometry geom; 6046 6047 ASSERT(bbio_ret); 6048 6049 if (op == BTRFS_MAP_DISCARD) 6050 return __btrfs_map_block_for_discard(fs_info, logical, 6051 *length, bbio_ret); 6052 6053 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom); 6054 if (ret < 0) 6055 return ret; 6056 6057 em = btrfs_get_chunk_map(fs_info, logical, *length); 6058 ASSERT(em); 6059 map = em->map_lookup; 6060 6061 *length = geom.len; 6062 offset = geom.offset; 6063 stripe_len = geom.stripe_len; 6064 stripe_nr = geom.stripe_nr; 6065 stripe_offset = geom.stripe_offset; 6066 raid56_full_stripe_start = geom.raid56_stripe_offset; 6067 data_stripes = nr_data_stripes(map); 6068 6069 down_read(&dev_replace->rwsem); 6070 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6071 /* 6072 * Hold the semaphore for read during the whole operation, write is 6073 * requested at commit time but must wait. 6074 */ 6075 if (!dev_replace_is_ongoing) 6076 up_read(&dev_replace->rwsem); 6077 6078 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 6079 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 6080 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 6081 dev_replace->srcdev->devid, 6082 &mirror_num, 6083 &physical_to_patch_in_first_stripe); 6084 if (ret) 6085 goto out; 6086 else 6087 patch_the_first_stripe_for_dev_replace = 1; 6088 } else if (mirror_num > map->num_stripes) { 6089 mirror_num = 0; 6090 } 6091 6092 num_stripes = 1; 6093 stripe_index = 0; 6094 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6095 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6096 &stripe_index); 6097 if (!need_full_stripe(op)) 6098 mirror_num = 1; 6099 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6100 if (need_full_stripe(op)) 6101 num_stripes = map->num_stripes; 6102 else if (mirror_num) 6103 stripe_index = mirror_num - 1; 6104 else { 6105 stripe_index = find_live_mirror(fs_info, map, 0, 6106 dev_replace_is_ongoing); 6107 mirror_num = stripe_index + 1; 6108 } 6109 6110 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6111 if (need_full_stripe(op)) { 6112 num_stripes = map->num_stripes; 6113 } else if (mirror_num) { 6114 stripe_index = mirror_num - 1; 6115 } else { 6116 mirror_num = 1; 6117 } 6118 6119 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6120 u32 factor = map->num_stripes / map->sub_stripes; 6121 6122 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 6123 stripe_index *= map->sub_stripes; 6124 6125 if (need_full_stripe(op)) 6126 num_stripes = map->sub_stripes; 6127 else if (mirror_num) 6128 stripe_index += mirror_num - 1; 6129 else { 6130 int old_stripe_index = stripe_index; 6131 stripe_index = find_live_mirror(fs_info, map, 6132 stripe_index, 6133 dev_replace_is_ongoing); 6134 mirror_num = stripe_index - old_stripe_index + 1; 6135 } 6136 6137 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6138 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6139 /* push stripe_nr back to the start of the full stripe */ 6140 stripe_nr = div64_u64(raid56_full_stripe_start, 6141 stripe_len * data_stripes); 6142 6143 /* RAID[56] write or recovery. Return all stripes */ 6144 num_stripes = map->num_stripes; 6145 max_errors = nr_parity_stripes(map); 6146 6147 *length = map->stripe_len; 6148 stripe_index = 0; 6149 stripe_offset = 0; 6150 } else { 6151 /* 6152 * Mirror #0 or #1 means the original data block. 6153 * Mirror #2 is RAID5 parity block. 6154 * Mirror #3 is RAID6 Q block. 6155 */ 6156 stripe_nr = div_u64_rem(stripe_nr, 6157 data_stripes, &stripe_index); 6158 if (mirror_num > 1) 6159 stripe_index = data_stripes + mirror_num - 2; 6160 6161 /* We distribute the parity blocks across stripes */ 6162 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 6163 &stripe_index); 6164 if (!need_full_stripe(op) && mirror_num <= 1) 6165 mirror_num = 1; 6166 } 6167 } else { 6168 /* 6169 * after this, stripe_nr is the number of stripes on this 6170 * device we have to walk to find the data, and stripe_index is 6171 * the number of our device in the stripe array 6172 */ 6173 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 6174 &stripe_index); 6175 mirror_num = stripe_index + 1; 6176 } 6177 if (stripe_index >= map->num_stripes) { 6178 btrfs_crit(fs_info, 6179 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6180 stripe_index, map->num_stripes); 6181 ret = -EINVAL; 6182 goto out; 6183 } 6184 6185 num_alloc_stripes = num_stripes; 6186 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 6187 if (op == BTRFS_MAP_WRITE) 6188 num_alloc_stripes <<= 1; 6189 if (op == BTRFS_MAP_GET_READ_MIRRORS) 6190 num_alloc_stripes++; 6191 tgtdev_indexes = num_stripes; 6192 } 6193 6194 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 6195 if (!bbio) { 6196 ret = -ENOMEM; 6197 goto out; 6198 } 6199 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) 6200 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 6201 6202 /* build raid_map */ 6203 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6204 (need_full_stripe(op) || mirror_num > 1)) { 6205 u64 tmp; 6206 unsigned rot; 6207 6208 bbio->raid_map = (u64 *)((void *)bbio->stripes + 6209 sizeof(struct btrfs_bio_stripe) * 6210 num_alloc_stripes + 6211 sizeof(int) * tgtdev_indexes); 6212 6213 /* Work out the disk rotation on this stripe-set */ 6214 div_u64_rem(stripe_nr, num_stripes, &rot); 6215 6216 /* Fill in the logical address of each stripe */ 6217 tmp = stripe_nr * data_stripes; 6218 for (i = 0; i < data_stripes; i++) 6219 bbio->raid_map[(i+rot) % num_stripes] = 6220 em->start + (tmp + i) * map->stripe_len; 6221 6222 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 6223 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 6224 bbio->raid_map[(i+rot+1) % num_stripes] = 6225 RAID6_Q_STRIPE; 6226 } 6227 6228 6229 for (i = 0; i < num_stripes; i++) { 6230 bbio->stripes[i].physical = 6231 map->stripes[stripe_index].physical + 6232 stripe_offset + 6233 stripe_nr * map->stripe_len; 6234 bbio->stripes[i].dev = 6235 map->stripes[stripe_index].dev; 6236 stripe_index++; 6237 } 6238 6239 if (need_full_stripe(op)) 6240 max_errors = btrfs_chunk_max_errors(map); 6241 6242 if (bbio->raid_map) 6243 sort_parity_stripes(bbio, num_stripes); 6244 6245 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6246 need_full_stripe(op)) { 6247 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, 6248 &max_errors); 6249 } 6250 6251 *bbio_ret = bbio; 6252 bbio->map_type = map->type; 6253 bbio->num_stripes = num_stripes; 6254 bbio->max_errors = max_errors; 6255 bbio->mirror_num = mirror_num; 6256 6257 /* 6258 * this is the case that REQ_READ && dev_replace_is_ongoing && 6259 * mirror_num == num_stripes + 1 && dev_replace target drive is 6260 * available as a mirror 6261 */ 6262 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 6263 WARN_ON(num_stripes > 1); 6264 bbio->stripes[0].dev = dev_replace->tgtdev; 6265 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 6266 bbio->mirror_num = map->num_stripes + 1; 6267 } 6268 out: 6269 if (dev_replace_is_ongoing) { 6270 lockdep_assert_held(&dev_replace->rwsem); 6271 /* Unlock and let waiting writers proceed */ 6272 up_read(&dev_replace->rwsem); 6273 } 6274 free_extent_map(em); 6275 return ret; 6276 } 6277 6278 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6279 u64 logical, u64 *length, 6280 struct btrfs_bio **bbio_ret, int mirror_num) 6281 { 6282 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 6283 mirror_num, 0); 6284 } 6285 6286 /* For Scrub/replace */ 6287 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6288 u64 logical, u64 *length, 6289 struct btrfs_bio **bbio_ret) 6290 { 6291 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); 6292 } 6293 6294 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 6295 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 6296 { 6297 struct extent_map *em; 6298 struct map_lookup *map; 6299 u64 *buf; 6300 u64 bytenr; 6301 u64 length; 6302 u64 stripe_nr; 6303 u64 rmap_len; 6304 int i, j, nr = 0; 6305 6306 em = btrfs_get_chunk_map(fs_info, chunk_start, 1); 6307 if (IS_ERR(em)) 6308 return -EIO; 6309 6310 map = em->map_lookup; 6311 length = em->len; 6312 rmap_len = map->stripe_len; 6313 6314 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 6315 length = div_u64(length, map->num_stripes / map->sub_stripes); 6316 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6317 length = div_u64(length, map->num_stripes); 6318 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6319 length = div_u64(length, nr_data_stripes(map)); 6320 rmap_len = map->stripe_len * nr_data_stripes(map); 6321 } 6322 6323 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 6324 BUG_ON(!buf); /* -ENOMEM */ 6325 6326 for (i = 0; i < map->num_stripes; i++) { 6327 if (map->stripes[i].physical > physical || 6328 map->stripes[i].physical + length <= physical) 6329 continue; 6330 6331 stripe_nr = physical - map->stripes[i].physical; 6332 stripe_nr = div64_u64(stripe_nr, map->stripe_len); 6333 6334 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6335 stripe_nr = stripe_nr * map->num_stripes + i; 6336 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 6337 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6338 stripe_nr = stripe_nr * map->num_stripes + i; 6339 } /* else if RAID[56], multiply by nr_data_stripes(). 6340 * Alternatively, just use rmap_len below instead of 6341 * map->stripe_len */ 6342 6343 bytenr = chunk_start + stripe_nr * rmap_len; 6344 WARN_ON(nr >= map->num_stripes); 6345 for (j = 0; j < nr; j++) { 6346 if (buf[j] == bytenr) 6347 break; 6348 } 6349 if (j == nr) { 6350 WARN_ON(nr >= map->num_stripes); 6351 buf[nr++] = bytenr; 6352 } 6353 } 6354 6355 *logical = buf; 6356 *naddrs = nr; 6357 *stripe_len = rmap_len; 6358 6359 free_extent_map(em); 6360 return 0; 6361 } 6362 6363 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 6364 { 6365 bio->bi_private = bbio->private; 6366 bio->bi_end_io = bbio->end_io; 6367 bio_endio(bio); 6368 6369 btrfs_put_bbio(bbio); 6370 } 6371 6372 static void btrfs_end_bio(struct bio *bio) 6373 { 6374 struct btrfs_bio *bbio = bio->bi_private; 6375 int is_orig_bio = 0; 6376 6377 if (bio->bi_status) { 6378 atomic_inc(&bbio->error); 6379 if (bio->bi_status == BLK_STS_IOERR || 6380 bio->bi_status == BLK_STS_TARGET) { 6381 unsigned int stripe_index = 6382 btrfs_io_bio(bio)->stripe_index; 6383 struct btrfs_device *dev; 6384 6385 BUG_ON(stripe_index >= bbio->num_stripes); 6386 dev = bbio->stripes[stripe_index].dev; 6387 if (dev->bdev) { 6388 if (bio_op(bio) == REQ_OP_WRITE) 6389 btrfs_dev_stat_inc_and_print(dev, 6390 BTRFS_DEV_STAT_WRITE_ERRS); 6391 else if (!(bio->bi_opf & REQ_RAHEAD)) 6392 btrfs_dev_stat_inc_and_print(dev, 6393 BTRFS_DEV_STAT_READ_ERRS); 6394 if (bio->bi_opf & REQ_PREFLUSH) 6395 btrfs_dev_stat_inc_and_print(dev, 6396 BTRFS_DEV_STAT_FLUSH_ERRS); 6397 } 6398 } 6399 } 6400 6401 if (bio == bbio->orig_bio) 6402 is_orig_bio = 1; 6403 6404 btrfs_bio_counter_dec(bbio->fs_info); 6405 6406 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6407 if (!is_orig_bio) { 6408 bio_put(bio); 6409 bio = bbio->orig_bio; 6410 } 6411 6412 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6413 /* only send an error to the higher layers if it is 6414 * beyond the tolerance of the btrfs bio 6415 */ 6416 if (atomic_read(&bbio->error) > bbio->max_errors) { 6417 bio->bi_status = BLK_STS_IOERR; 6418 } else { 6419 /* 6420 * this bio is actually up to date, we didn't 6421 * go over the max number of errors 6422 */ 6423 bio->bi_status = BLK_STS_OK; 6424 } 6425 6426 btrfs_end_bbio(bbio, bio); 6427 } else if (!is_orig_bio) { 6428 bio_put(bio); 6429 } 6430 } 6431 6432 /* 6433 * see run_scheduled_bios for a description of why bios are collected for 6434 * async submit. 6435 * 6436 * This will add one bio to the pending list for a device and make sure 6437 * the work struct is scheduled. 6438 */ 6439 static noinline void btrfs_schedule_bio(struct btrfs_device *device, 6440 struct bio *bio) 6441 { 6442 struct btrfs_fs_info *fs_info = device->fs_info; 6443 int should_queue = 1; 6444 struct btrfs_pending_bios *pending_bios; 6445 6446 /* don't bother with additional async steps for reads, right now */ 6447 if (bio_op(bio) == REQ_OP_READ) { 6448 btrfsic_submit_bio(bio); 6449 return; 6450 } 6451 6452 WARN_ON(bio->bi_next); 6453 bio->bi_next = NULL; 6454 6455 spin_lock(&device->io_lock); 6456 if (op_is_sync(bio->bi_opf)) 6457 pending_bios = &device->pending_sync_bios; 6458 else 6459 pending_bios = &device->pending_bios; 6460 6461 if (pending_bios->tail) 6462 pending_bios->tail->bi_next = bio; 6463 6464 pending_bios->tail = bio; 6465 if (!pending_bios->head) 6466 pending_bios->head = bio; 6467 if (device->running_pending) 6468 should_queue = 0; 6469 6470 spin_unlock(&device->io_lock); 6471 6472 if (should_queue) 6473 btrfs_queue_work(fs_info->submit_workers, &device->work); 6474 } 6475 6476 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, 6477 u64 physical, int dev_nr, int async) 6478 { 6479 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 6480 struct btrfs_fs_info *fs_info = bbio->fs_info; 6481 6482 bio->bi_private = bbio; 6483 btrfs_io_bio(bio)->stripe_index = dev_nr; 6484 bio->bi_end_io = btrfs_end_bio; 6485 bio->bi_iter.bi_sector = physical >> 9; 6486 btrfs_debug_in_rcu(fs_info, 6487 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6488 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector, 6489 (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid, 6490 bio->bi_iter.bi_size); 6491 bio_set_dev(bio, dev->bdev); 6492 6493 btrfs_bio_counter_inc_noblocked(fs_info); 6494 6495 if (async) 6496 btrfs_schedule_bio(dev, bio); 6497 else 6498 btrfsic_submit_bio(bio); 6499 } 6500 6501 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6502 { 6503 atomic_inc(&bbio->error); 6504 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6505 /* Should be the original bio. */ 6506 WARN_ON(bio != bbio->orig_bio); 6507 6508 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6509 bio->bi_iter.bi_sector = logical >> 9; 6510 if (atomic_read(&bbio->error) > bbio->max_errors) 6511 bio->bi_status = BLK_STS_IOERR; 6512 else 6513 bio->bi_status = BLK_STS_OK; 6514 btrfs_end_bbio(bbio, bio); 6515 } 6516 } 6517 6518 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6519 int mirror_num, int async_submit) 6520 { 6521 struct btrfs_device *dev; 6522 struct bio *first_bio = bio; 6523 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 6524 u64 length = 0; 6525 u64 map_length; 6526 int ret; 6527 int dev_nr; 6528 int total_devs; 6529 struct btrfs_bio *bbio = NULL; 6530 6531 length = bio->bi_iter.bi_size; 6532 map_length = length; 6533 6534 btrfs_bio_counter_inc_blocked(fs_info); 6535 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6536 &map_length, &bbio, mirror_num, 1); 6537 if (ret) { 6538 btrfs_bio_counter_dec(fs_info); 6539 return errno_to_blk_status(ret); 6540 } 6541 6542 total_devs = bbio->num_stripes; 6543 bbio->orig_bio = first_bio; 6544 bbio->private = first_bio->bi_private; 6545 bbio->end_io = first_bio->bi_end_io; 6546 bbio->fs_info = fs_info; 6547 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6548 6549 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6550 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { 6551 /* In this case, map_length has been set to the length of 6552 a single stripe; not the whole write */ 6553 if (bio_op(bio) == REQ_OP_WRITE) { 6554 ret = raid56_parity_write(fs_info, bio, bbio, 6555 map_length); 6556 } else { 6557 ret = raid56_parity_recover(fs_info, bio, bbio, 6558 map_length, mirror_num, 1); 6559 } 6560 6561 btrfs_bio_counter_dec(fs_info); 6562 return errno_to_blk_status(ret); 6563 } 6564 6565 if (map_length < length) { 6566 btrfs_crit(fs_info, 6567 "mapping failed logical %llu bio len %llu len %llu", 6568 logical, length, map_length); 6569 BUG(); 6570 } 6571 6572 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6573 dev = bbio->stripes[dev_nr].dev; 6574 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, 6575 &dev->dev_state) || 6576 (bio_op(first_bio) == REQ_OP_WRITE && 6577 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6578 bbio_error(bbio, first_bio, logical); 6579 continue; 6580 } 6581 6582 if (dev_nr < total_devs - 1) 6583 bio = btrfs_bio_clone(first_bio); 6584 else 6585 bio = first_bio; 6586 6587 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, 6588 dev_nr, async_submit); 6589 } 6590 btrfs_bio_counter_dec(fs_info); 6591 return BLK_STS_OK; 6592 } 6593 6594 /* 6595 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6596 * return NULL. 6597 * 6598 * If devid and uuid are both specified, the match must be exact, otherwise 6599 * only devid is used. 6600 * 6601 * If @seed is true, traverse through the seed devices. 6602 */ 6603 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, 6604 u64 devid, u8 *uuid, u8 *fsid, 6605 bool seed) 6606 { 6607 struct btrfs_device *device; 6608 6609 while (fs_devices) { 6610 if (!fsid || 6611 !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { 6612 list_for_each_entry(device, &fs_devices->devices, 6613 dev_list) { 6614 if (device->devid == devid && 6615 (!uuid || memcmp(device->uuid, uuid, 6616 BTRFS_UUID_SIZE) == 0)) 6617 return device; 6618 } 6619 } 6620 if (seed) 6621 fs_devices = fs_devices->seed; 6622 else 6623 return NULL; 6624 } 6625 return NULL; 6626 } 6627 6628 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6629 u64 devid, u8 *dev_uuid) 6630 { 6631 struct btrfs_device *device; 6632 6633 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6634 if (IS_ERR(device)) 6635 return device; 6636 6637 list_add(&device->dev_list, &fs_devices->devices); 6638 device->fs_devices = fs_devices; 6639 fs_devices->num_devices++; 6640 6641 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6642 fs_devices->missing_devices++; 6643 6644 return device; 6645 } 6646 6647 /** 6648 * btrfs_alloc_device - allocate struct btrfs_device 6649 * @fs_info: used only for generating a new devid, can be NULL if 6650 * devid is provided (i.e. @devid != NULL). 6651 * @devid: a pointer to devid for this device. If NULL a new devid 6652 * is generated. 6653 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6654 * is generated. 6655 * 6656 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6657 * on error. Returned struct is not linked onto any lists and must be 6658 * destroyed with btrfs_free_device. 6659 */ 6660 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6661 const u64 *devid, 6662 const u8 *uuid) 6663 { 6664 struct btrfs_device *dev; 6665 u64 tmp; 6666 6667 if (WARN_ON(!devid && !fs_info)) 6668 return ERR_PTR(-EINVAL); 6669 6670 dev = __alloc_device(); 6671 if (IS_ERR(dev)) 6672 return dev; 6673 6674 if (devid) 6675 tmp = *devid; 6676 else { 6677 int ret; 6678 6679 ret = find_next_devid(fs_info, &tmp); 6680 if (ret) { 6681 btrfs_free_device(dev); 6682 return ERR_PTR(ret); 6683 } 6684 } 6685 dev->devid = tmp; 6686 6687 if (uuid) 6688 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6689 else 6690 generate_random_uuid(dev->uuid); 6691 6692 btrfs_init_work(&dev->work, btrfs_submit_helper, 6693 pending_bios_fn, NULL, NULL); 6694 6695 return dev; 6696 } 6697 6698 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6699 u64 devid, u8 *uuid, bool error) 6700 { 6701 if (error) 6702 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6703 devid, uuid); 6704 else 6705 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6706 devid, uuid); 6707 } 6708 6709 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) 6710 { 6711 int index = btrfs_bg_flags_to_raid_index(type); 6712 int ncopies = btrfs_raid_array[index].ncopies; 6713 int data_stripes; 6714 6715 switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6716 case BTRFS_BLOCK_GROUP_RAID5: 6717 data_stripes = num_stripes - 1; 6718 break; 6719 case BTRFS_BLOCK_GROUP_RAID6: 6720 data_stripes = num_stripes - 2; 6721 break; 6722 default: 6723 data_stripes = num_stripes / ncopies; 6724 break; 6725 } 6726 return div_u64(chunk_len, data_stripes); 6727 } 6728 6729 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6730 struct btrfs_chunk *chunk) 6731 { 6732 struct btrfs_fs_info *fs_info = leaf->fs_info; 6733 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6734 struct map_lookup *map; 6735 struct extent_map *em; 6736 u64 logical; 6737 u64 length; 6738 u64 devid; 6739 u8 uuid[BTRFS_UUID_SIZE]; 6740 int num_stripes; 6741 int ret; 6742 int i; 6743 6744 logical = key->offset; 6745 length = btrfs_chunk_length(leaf, chunk); 6746 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6747 6748 /* 6749 * Only need to verify chunk item if we're reading from sys chunk array, 6750 * as chunk item in tree block is already verified by tree-checker. 6751 */ 6752 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6753 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6754 if (ret) 6755 return ret; 6756 } 6757 6758 read_lock(&map_tree->lock); 6759 em = lookup_extent_mapping(map_tree, logical, 1); 6760 read_unlock(&map_tree->lock); 6761 6762 /* already mapped? */ 6763 if (em && em->start <= logical && em->start + em->len > logical) { 6764 free_extent_map(em); 6765 return 0; 6766 } else if (em) { 6767 free_extent_map(em); 6768 } 6769 6770 em = alloc_extent_map(); 6771 if (!em) 6772 return -ENOMEM; 6773 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6774 if (!map) { 6775 free_extent_map(em); 6776 return -ENOMEM; 6777 } 6778 6779 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6780 em->map_lookup = map; 6781 em->start = logical; 6782 em->len = length; 6783 em->orig_start = 0; 6784 em->block_start = 0; 6785 em->block_len = em->len; 6786 6787 map->num_stripes = num_stripes; 6788 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6789 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6790 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6791 map->type = btrfs_chunk_type(leaf, chunk); 6792 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6793 map->verified_stripes = 0; 6794 em->orig_block_len = calc_stripe_length(map->type, em->len, 6795 map->num_stripes); 6796 for (i = 0; i < num_stripes; i++) { 6797 map->stripes[i].physical = 6798 btrfs_stripe_offset_nr(leaf, chunk, i); 6799 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6800 read_extent_buffer(leaf, uuid, (unsigned long) 6801 btrfs_stripe_dev_uuid_nr(chunk, i), 6802 BTRFS_UUID_SIZE); 6803 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, 6804 devid, uuid, NULL, true); 6805 if (!map->stripes[i].dev && 6806 !btrfs_test_opt(fs_info, DEGRADED)) { 6807 free_extent_map(em); 6808 btrfs_report_missing_device(fs_info, devid, uuid, true); 6809 return -ENOENT; 6810 } 6811 if (!map->stripes[i].dev) { 6812 map->stripes[i].dev = 6813 add_missing_dev(fs_info->fs_devices, devid, 6814 uuid); 6815 if (IS_ERR(map->stripes[i].dev)) { 6816 free_extent_map(em); 6817 btrfs_err(fs_info, 6818 "failed to init missing dev %llu: %ld", 6819 devid, PTR_ERR(map->stripes[i].dev)); 6820 return PTR_ERR(map->stripes[i].dev); 6821 } 6822 btrfs_report_missing_device(fs_info, devid, uuid, false); 6823 } 6824 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6825 &(map->stripes[i].dev->dev_state)); 6826 6827 } 6828 6829 write_lock(&map_tree->lock); 6830 ret = add_extent_mapping(map_tree, em, 0); 6831 write_unlock(&map_tree->lock); 6832 if (ret < 0) { 6833 btrfs_err(fs_info, 6834 "failed to add chunk map, start=%llu len=%llu: %d", 6835 em->start, em->len, ret); 6836 } 6837 free_extent_map(em); 6838 6839 return ret; 6840 } 6841 6842 static void fill_device_from_item(struct extent_buffer *leaf, 6843 struct btrfs_dev_item *dev_item, 6844 struct btrfs_device *device) 6845 { 6846 unsigned long ptr; 6847 6848 device->devid = btrfs_device_id(leaf, dev_item); 6849 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6850 device->total_bytes = device->disk_total_bytes; 6851 device->commit_total_bytes = device->disk_total_bytes; 6852 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6853 device->commit_bytes_used = device->bytes_used; 6854 device->type = btrfs_device_type(leaf, dev_item); 6855 device->io_align = btrfs_device_io_align(leaf, dev_item); 6856 device->io_width = btrfs_device_io_width(leaf, dev_item); 6857 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6858 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6859 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6860 6861 ptr = btrfs_device_uuid(dev_item); 6862 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6863 } 6864 6865 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6866 u8 *fsid) 6867 { 6868 struct btrfs_fs_devices *fs_devices; 6869 int ret; 6870 6871 lockdep_assert_held(&uuid_mutex); 6872 ASSERT(fsid); 6873 6874 fs_devices = fs_info->fs_devices->seed; 6875 while (fs_devices) { 6876 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6877 return fs_devices; 6878 6879 fs_devices = fs_devices->seed; 6880 } 6881 6882 fs_devices = find_fsid(fsid, NULL); 6883 if (!fs_devices) { 6884 if (!btrfs_test_opt(fs_info, DEGRADED)) 6885 return ERR_PTR(-ENOENT); 6886 6887 fs_devices = alloc_fs_devices(fsid, NULL); 6888 if (IS_ERR(fs_devices)) 6889 return fs_devices; 6890 6891 fs_devices->seeding = 1; 6892 fs_devices->opened = 1; 6893 return fs_devices; 6894 } 6895 6896 fs_devices = clone_fs_devices(fs_devices); 6897 if (IS_ERR(fs_devices)) 6898 return fs_devices; 6899 6900 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 6901 if (ret) { 6902 free_fs_devices(fs_devices); 6903 fs_devices = ERR_PTR(ret); 6904 goto out; 6905 } 6906 6907 if (!fs_devices->seeding) { 6908 close_fs_devices(fs_devices); 6909 free_fs_devices(fs_devices); 6910 fs_devices = ERR_PTR(-EINVAL); 6911 goto out; 6912 } 6913 6914 fs_devices->seed = fs_info->fs_devices->seed; 6915 fs_info->fs_devices->seed = fs_devices; 6916 out: 6917 return fs_devices; 6918 } 6919 6920 static int read_one_dev(struct extent_buffer *leaf, 6921 struct btrfs_dev_item *dev_item) 6922 { 6923 struct btrfs_fs_info *fs_info = leaf->fs_info; 6924 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6925 struct btrfs_device *device; 6926 u64 devid; 6927 int ret; 6928 u8 fs_uuid[BTRFS_FSID_SIZE]; 6929 u8 dev_uuid[BTRFS_UUID_SIZE]; 6930 6931 devid = btrfs_device_id(leaf, dev_item); 6932 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6933 BTRFS_UUID_SIZE); 6934 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6935 BTRFS_FSID_SIZE); 6936 6937 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 6938 fs_devices = open_seed_devices(fs_info, fs_uuid); 6939 if (IS_ERR(fs_devices)) 6940 return PTR_ERR(fs_devices); 6941 } 6942 6943 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, 6944 fs_uuid, true); 6945 if (!device) { 6946 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6947 btrfs_report_missing_device(fs_info, devid, 6948 dev_uuid, true); 6949 return -ENOENT; 6950 } 6951 6952 device = add_missing_dev(fs_devices, devid, dev_uuid); 6953 if (IS_ERR(device)) { 6954 btrfs_err(fs_info, 6955 "failed to add missing dev %llu: %ld", 6956 devid, PTR_ERR(device)); 6957 return PTR_ERR(device); 6958 } 6959 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6960 } else { 6961 if (!device->bdev) { 6962 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6963 btrfs_report_missing_device(fs_info, 6964 devid, dev_uuid, true); 6965 return -ENOENT; 6966 } 6967 btrfs_report_missing_device(fs_info, devid, 6968 dev_uuid, false); 6969 } 6970 6971 if (!device->bdev && 6972 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 6973 /* 6974 * this happens when a device that was properly setup 6975 * in the device info lists suddenly goes bad. 6976 * device->bdev is NULL, and so we have to set 6977 * device->missing to one here 6978 */ 6979 device->fs_devices->missing_devices++; 6980 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6981 } 6982 6983 /* Move the device to its own fs_devices */ 6984 if (device->fs_devices != fs_devices) { 6985 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 6986 &device->dev_state)); 6987 6988 list_move(&device->dev_list, &fs_devices->devices); 6989 device->fs_devices->num_devices--; 6990 fs_devices->num_devices++; 6991 6992 device->fs_devices->missing_devices--; 6993 fs_devices->missing_devices++; 6994 6995 device->fs_devices = fs_devices; 6996 } 6997 } 6998 6999 if (device->fs_devices != fs_info->fs_devices) { 7000 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7001 if (device->generation != 7002 btrfs_device_generation(leaf, dev_item)) 7003 return -EINVAL; 7004 } 7005 7006 fill_device_from_item(leaf, dev_item, device); 7007 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7008 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7009 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7010 device->fs_devices->total_rw_bytes += device->total_bytes; 7011 atomic64_add(device->total_bytes - device->bytes_used, 7012 &fs_info->free_chunk_space); 7013 } 7014 ret = 0; 7015 return ret; 7016 } 7017 7018 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7019 { 7020 struct btrfs_root *root = fs_info->tree_root; 7021 struct btrfs_super_block *super_copy = fs_info->super_copy; 7022 struct extent_buffer *sb; 7023 struct btrfs_disk_key *disk_key; 7024 struct btrfs_chunk *chunk; 7025 u8 *array_ptr; 7026 unsigned long sb_array_offset; 7027 int ret = 0; 7028 u32 num_stripes; 7029 u32 array_size; 7030 u32 len = 0; 7031 u32 cur_offset; 7032 u64 type; 7033 struct btrfs_key key; 7034 7035 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7036 /* 7037 * This will create extent buffer of nodesize, superblock size is 7038 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 7039 * overallocate but we can keep it as-is, only the first page is used. 7040 */ 7041 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET); 7042 if (IS_ERR(sb)) 7043 return PTR_ERR(sb); 7044 set_extent_buffer_uptodate(sb); 7045 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 7046 /* 7047 * The sb extent buffer is artificial and just used to read the system array. 7048 * set_extent_buffer_uptodate() call does not properly mark all it's 7049 * pages up-to-date when the page is larger: extent does not cover the 7050 * whole page and consequently check_page_uptodate does not find all 7051 * the page's extents up-to-date (the hole beyond sb), 7052 * write_extent_buffer then triggers a WARN_ON. 7053 * 7054 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 7055 * but sb spans only this function. Add an explicit SetPageUptodate call 7056 * to silence the warning eg. on PowerPC 64. 7057 */ 7058 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 7059 SetPageUptodate(sb->pages[0]); 7060 7061 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7062 array_size = btrfs_super_sys_array_size(super_copy); 7063 7064 array_ptr = super_copy->sys_chunk_array; 7065 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7066 cur_offset = 0; 7067 7068 while (cur_offset < array_size) { 7069 disk_key = (struct btrfs_disk_key *)array_ptr; 7070 len = sizeof(*disk_key); 7071 if (cur_offset + len > array_size) 7072 goto out_short_read; 7073 7074 btrfs_disk_key_to_cpu(&key, disk_key); 7075 7076 array_ptr += len; 7077 sb_array_offset += len; 7078 cur_offset += len; 7079 7080 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 7081 chunk = (struct btrfs_chunk *)sb_array_offset; 7082 /* 7083 * At least one btrfs_chunk with one stripe must be 7084 * present, exact stripe count check comes afterwards 7085 */ 7086 len = btrfs_chunk_item_size(1); 7087 if (cur_offset + len > array_size) 7088 goto out_short_read; 7089 7090 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7091 if (!num_stripes) { 7092 btrfs_err(fs_info, 7093 "invalid number of stripes %u in sys_array at offset %u", 7094 num_stripes, cur_offset); 7095 ret = -EIO; 7096 break; 7097 } 7098 7099 type = btrfs_chunk_type(sb, chunk); 7100 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7101 btrfs_err(fs_info, 7102 "invalid chunk type %llu in sys_array at offset %u", 7103 type, cur_offset); 7104 ret = -EIO; 7105 break; 7106 } 7107 7108 len = btrfs_chunk_item_size(num_stripes); 7109 if (cur_offset + len > array_size) 7110 goto out_short_read; 7111 7112 ret = read_one_chunk(&key, sb, chunk); 7113 if (ret) 7114 break; 7115 } else { 7116 btrfs_err(fs_info, 7117 "unexpected item type %u in sys_array at offset %u", 7118 (u32)key.type, cur_offset); 7119 ret = -EIO; 7120 break; 7121 } 7122 array_ptr += len; 7123 sb_array_offset += len; 7124 cur_offset += len; 7125 } 7126 clear_extent_buffer_uptodate(sb); 7127 free_extent_buffer_stale(sb); 7128 return ret; 7129 7130 out_short_read: 7131 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7132 len, cur_offset); 7133 clear_extent_buffer_uptodate(sb); 7134 free_extent_buffer_stale(sb); 7135 return -EIO; 7136 } 7137 7138 /* 7139 * Check if all chunks in the fs are OK for read-write degraded mount 7140 * 7141 * If the @failing_dev is specified, it's accounted as missing. 7142 * 7143 * Return true if all chunks meet the minimal RW mount requirements. 7144 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7145 */ 7146 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7147 struct btrfs_device *failing_dev) 7148 { 7149 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7150 struct extent_map *em; 7151 u64 next_start = 0; 7152 bool ret = true; 7153 7154 read_lock(&map_tree->lock); 7155 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7156 read_unlock(&map_tree->lock); 7157 /* No chunk at all? Return false anyway */ 7158 if (!em) { 7159 ret = false; 7160 goto out; 7161 } 7162 while (em) { 7163 struct map_lookup *map; 7164 int missing = 0; 7165 int max_tolerated; 7166 int i; 7167 7168 map = em->map_lookup; 7169 max_tolerated = 7170 btrfs_get_num_tolerated_disk_barrier_failures( 7171 map->type); 7172 for (i = 0; i < map->num_stripes; i++) { 7173 struct btrfs_device *dev = map->stripes[i].dev; 7174 7175 if (!dev || !dev->bdev || 7176 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7177 dev->last_flush_error) 7178 missing++; 7179 else if (failing_dev && failing_dev == dev) 7180 missing++; 7181 } 7182 if (missing > max_tolerated) { 7183 if (!failing_dev) 7184 btrfs_warn(fs_info, 7185 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7186 em->start, missing, max_tolerated); 7187 free_extent_map(em); 7188 ret = false; 7189 goto out; 7190 } 7191 next_start = extent_map_end(em); 7192 free_extent_map(em); 7193 7194 read_lock(&map_tree->lock); 7195 em = lookup_extent_mapping(map_tree, next_start, 7196 (u64)(-1) - next_start); 7197 read_unlock(&map_tree->lock); 7198 } 7199 out: 7200 return ret; 7201 } 7202 7203 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7204 { 7205 struct btrfs_root *root = fs_info->chunk_root; 7206 struct btrfs_path *path; 7207 struct extent_buffer *leaf; 7208 struct btrfs_key key; 7209 struct btrfs_key found_key; 7210 int ret; 7211 int slot; 7212 u64 total_dev = 0; 7213 7214 path = btrfs_alloc_path(); 7215 if (!path) 7216 return -ENOMEM; 7217 7218 /* 7219 * uuid_mutex is needed only if we are mounting a sprout FS 7220 * otherwise we don't need it. 7221 */ 7222 mutex_lock(&uuid_mutex); 7223 mutex_lock(&fs_info->chunk_mutex); 7224 7225 /* 7226 * Read all device items, and then all the chunk items. All 7227 * device items are found before any chunk item (their object id 7228 * is smaller than the lowest possible object id for a chunk 7229 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7230 */ 7231 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7232 key.offset = 0; 7233 key.type = 0; 7234 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7235 if (ret < 0) 7236 goto error; 7237 while (1) { 7238 leaf = path->nodes[0]; 7239 slot = path->slots[0]; 7240 if (slot >= btrfs_header_nritems(leaf)) { 7241 ret = btrfs_next_leaf(root, path); 7242 if (ret == 0) 7243 continue; 7244 if (ret < 0) 7245 goto error; 7246 break; 7247 } 7248 btrfs_item_key_to_cpu(leaf, &found_key, slot); 7249 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7250 struct btrfs_dev_item *dev_item; 7251 dev_item = btrfs_item_ptr(leaf, slot, 7252 struct btrfs_dev_item); 7253 ret = read_one_dev(leaf, dev_item); 7254 if (ret) 7255 goto error; 7256 total_dev++; 7257 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7258 struct btrfs_chunk *chunk; 7259 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7260 ret = read_one_chunk(&found_key, leaf, chunk); 7261 if (ret) 7262 goto error; 7263 } 7264 path->slots[0]++; 7265 } 7266 7267 /* 7268 * After loading chunk tree, we've got all device information, 7269 * do another round of validation checks. 7270 */ 7271 if (total_dev != fs_info->fs_devices->total_devices) { 7272 btrfs_err(fs_info, 7273 "super_num_devices %llu mismatch with num_devices %llu found here", 7274 btrfs_super_num_devices(fs_info->super_copy), 7275 total_dev); 7276 ret = -EINVAL; 7277 goto error; 7278 } 7279 if (btrfs_super_total_bytes(fs_info->super_copy) < 7280 fs_info->fs_devices->total_rw_bytes) { 7281 btrfs_err(fs_info, 7282 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7283 btrfs_super_total_bytes(fs_info->super_copy), 7284 fs_info->fs_devices->total_rw_bytes); 7285 ret = -EINVAL; 7286 goto error; 7287 } 7288 ret = 0; 7289 error: 7290 mutex_unlock(&fs_info->chunk_mutex); 7291 mutex_unlock(&uuid_mutex); 7292 7293 btrfs_free_path(path); 7294 return ret; 7295 } 7296 7297 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7298 { 7299 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7300 struct btrfs_device *device; 7301 7302 while (fs_devices) { 7303 mutex_lock(&fs_devices->device_list_mutex); 7304 list_for_each_entry(device, &fs_devices->devices, dev_list) 7305 device->fs_info = fs_info; 7306 mutex_unlock(&fs_devices->device_list_mutex); 7307 7308 fs_devices = fs_devices->seed; 7309 } 7310 } 7311 7312 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 7313 { 7314 int i; 7315 7316 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7317 btrfs_dev_stat_reset(dev, i); 7318 } 7319 7320 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7321 { 7322 struct btrfs_key key; 7323 struct btrfs_key found_key; 7324 struct btrfs_root *dev_root = fs_info->dev_root; 7325 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7326 struct extent_buffer *eb; 7327 int slot; 7328 int ret = 0; 7329 struct btrfs_device *device; 7330 struct btrfs_path *path = NULL; 7331 int i; 7332 7333 path = btrfs_alloc_path(); 7334 if (!path) { 7335 ret = -ENOMEM; 7336 goto out; 7337 } 7338 7339 mutex_lock(&fs_devices->device_list_mutex); 7340 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7341 int item_size; 7342 struct btrfs_dev_stats_item *ptr; 7343 7344 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7345 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7346 key.offset = device->devid; 7347 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 7348 if (ret) { 7349 __btrfs_reset_dev_stats(device); 7350 device->dev_stats_valid = 1; 7351 btrfs_release_path(path); 7352 continue; 7353 } 7354 slot = path->slots[0]; 7355 eb = path->nodes[0]; 7356 btrfs_item_key_to_cpu(eb, &found_key, slot); 7357 item_size = btrfs_item_size_nr(eb, slot); 7358 7359 ptr = btrfs_item_ptr(eb, slot, 7360 struct btrfs_dev_stats_item); 7361 7362 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7363 if (item_size >= (1 + i) * sizeof(__le64)) 7364 btrfs_dev_stat_set(device, i, 7365 btrfs_dev_stats_value(eb, ptr, i)); 7366 else 7367 btrfs_dev_stat_reset(device, i); 7368 } 7369 7370 device->dev_stats_valid = 1; 7371 btrfs_dev_stat_print_on_load(device); 7372 btrfs_release_path(path); 7373 } 7374 mutex_unlock(&fs_devices->device_list_mutex); 7375 7376 out: 7377 btrfs_free_path(path); 7378 return ret < 0 ? ret : 0; 7379 } 7380 7381 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7382 struct btrfs_device *device) 7383 { 7384 struct btrfs_fs_info *fs_info = trans->fs_info; 7385 struct btrfs_root *dev_root = fs_info->dev_root; 7386 struct btrfs_path *path; 7387 struct btrfs_key key; 7388 struct extent_buffer *eb; 7389 struct btrfs_dev_stats_item *ptr; 7390 int ret; 7391 int i; 7392 7393 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7394 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7395 key.offset = device->devid; 7396 7397 path = btrfs_alloc_path(); 7398 if (!path) 7399 return -ENOMEM; 7400 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7401 if (ret < 0) { 7402 btrfs_warn_in_rcu(fs_info, 7403 "error %d while searching for dev_stats item for device %s", 7404 ret, rcu_str_deref(device->name)); 7405 goto out; 7406 } 7407 7408 if (ret == 0 && 7409 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7410 /* need to delete old one and insert a new one */ 7411 ret = btrfs_del_item(trans, dev_root, path); 7412 if (ret != 0) { 7413 btrfs_warn_in_rcu(fs_info, 7414 "delete too small dev_stats item for device %s failed %d", 7415 rcu_str_deref(device->name), ret); 7416 goto out; 7417 } 7418 ret = 1; 7419 } 7420 7421 if (ret == 1) { 7422 /* need to insert a new item */ 7423 btrfs_release_path(path); 7424 ret = btrfs_insert_empty_item(trans, dev_root, path, 7425 &key, sizeof(*ptr)); 7426 if (ret < 0) { 7427 btrfs_warn_in_rcu(fs_info, 7428 "insert dev_stats item for device %s failed %d", 7429 rcu_str_deref(device->name), ret); 7430 goto out; 7431 } 7432 } 7433 7434 eb = path->nodes[0]; 7435 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7436 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7437 btrfs_set_dev_stats_value(eb, ptr, i, 7438 btrfs_dev_stat_read(device, i)); 7439 btrfs_mark_buffer_dirty(eb); 7440 7441 out: 7442 btrfs_free_path(path); 7443 return ret; 7444 } 7445 7446 /* 7447 * called from commit_transaction. Writes all changed device stats to disk. 7448 */ 7449 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7450 { 7451 struct btrfs_fs_info *fs_info = trans->fs_info; 7452 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7453 struct btrfs_device *device; 7454 int stats_cnt; 7455 int ret = 0; 7456 7457 mutex_lock(&fs_devices->device_list_mutex); 7458 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7459 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7460 if (!device->dev_stats_valid || stats_cnt == 0) 7461 continue; 7462 7463 7464 /* 7465 * There is a LOAD-LOAD control dependency between the value of 7466 * dev_stats_ccnt and updating the on-disk values which requires 7467 * reading the in-memory counters. Such control dependencies 7468 * require explicit read memory barriers. 7469 * 7470 * This memory barriers pairs with smp_mb__before_atomic in 7471 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7472 * barrier implied by atomic_xchg in 7473 * btrfs_dev_stats_read_and_reset 7474 */ 7475 smp_rmb(); 7476 7477 ret = update_dev_stat_item(trans, device); 7478 if (!ret) 7479 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7480 } 7481 mutex_unlock(&fs_devices->device_list_mutex); 7482 7483 return ret; 7484 } 7485 7486 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7487 { 7488 btrfs_dev_stat_inc(dev, index); 7489 btrfs_dev_stat_print_on_error(dev); 7490 } 7491 7492 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7493 { 7494 if (!dev->dev_stats_valid) 7495 return; 7496 btrfs_err_rl_in_rcu(dev->fs_info, 7497 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7498 rcu_str_deref(dev->name), 7499 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7500 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7501 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7502 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7503 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7504 } 7505 7506 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7507 { 7508 int i; 7509 7510 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7511 if (btrfs_dev_stat_read(dev, i) != 0) 7512 break; 7513 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7514 return; /* all values == 0, suppress message */ 7515 7516 btrfs_info_in_rcu(dev->fs_info, 7517 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7518 rcu_str_deref(dev->name), 7519 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7520 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7521 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7522 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7523 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7524 } 7525 7526 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7527 struct btrfs_ioctl_get_dev_stats *stats) 7528 { 7529 struct btrfs_device *dev; 7530 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7531 int i; 7532 7533 mutex_lock(&fs_devices->device_list_mutex); 7534 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL, 7535 true); 7536 mutex_unlock(&fs_devices->device_list_mutex); 7537 7538 if (!dev) { 7539 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7540 return -ENODEV; 7541 } else if (!dev->dev_stats_valid) { 7542 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7543 return -ENODEV; 7544 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7545 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7546 if (stats->nr_items > i) 7547 stats->values[i] = 7548 btrfs_dev_stat_read_and_reset(dev, i); 7549 else 7550 btrfs_dev_stat_reset(dev, i); 7551 } 7552 } else { 7553 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7554 if (stats->nr_items > i) 7555 stats->values[i] = btrfs_dev_stat_read(dev, i); 7556 } 7557 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7558 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7559 return 0; 7560 } 7561 7562 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) 7563 { 7564 struct buffer_head *bh; 7565 struct btrfs_super_block *disk_super; 7566 int copy_num; 7567 7568 if (!bdev) 7569 return; 7570 7571 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; 7572 copy_num++) { 7573 7574 if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) 7575 continue; 7576 7577 disk_super = (struct btrfs_super_block *)bh->b_data; 7578 7579 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 7580 set_buffer_dirty(bh); 7581 sync_dirty_buffer(bh); 7582 brelse(bh); 7583 } 7584 7585 /* Notify udev that device has changed */ 7586 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 7587 7588 /* Update ctime/mtime for device path for libblkid */ 7589 update_dev_time(device_path); 7590 } 7591 7592 /* 7593 * Update the size and bytes used for each device where it changed. This is 7594 * delayed since we would otherwise get errors while writing out the 7595 * superblocks. 7596 * 7597 * Must be invoked during transaction commit. 7598 */ 7599 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7600 { 7601 struct btrfs_device *curr, *next; 7602 7603 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7604 7605 if (list_empty(&trans->dev_update_list)) 7606 return; 7607 7608 /* 7609 * We don't need the device_list_mutex here. This list is owned by the 7610 * transaction and the transaction must complete before the device is 7611 * released. 7612 */ 7613 mutex_lock(&trans->fs_info->chunk_mutex); 7614 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7615 post_commit_list) { 7616 list_del_init(&curr->post_commit_list); 7617 curr->commit_total_bytes = curr->disk_total_bytes; 7618 curr->commit_bytes_used = curr->bytes_used; 7619 } 7620 mutex_unlock(&trans->fs_info->chunk_mutex); 7621 } 7622 7623 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 7624 { 7625 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7626 while (fs_devices) { 7627 fs_devices->fs_info = fs_info; 7628 fs_devices = fs_devices->seed; 7629 } 7630 } 7631 7632 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 7633 { 7634 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7635 while (fs_devices) { 7636 fs_devices->fs_info = NULL; 7637 fs_devices = fs_devices->seed; 7638 } 7639 } 7640 7641 /* 7642 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7643 */ 7644 int btrfs_bg_type_to_factor(u64 flags) 7645 { 7646 const int index = btrfs_bg_flags_to_raid_index(flags); 7647 7648 return btrfs_raid_array[index].ncopies; 7649 } 7650 7651 7652 7653 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7654 u64 chunk_offset, u64 devid, 7655 u64 physical_offset, u64 physical_len) 7656 { 7657 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7658 struct extent_map *em; 7659 struct map_lookup *map; 7660 struct btrfs_device *dev; 7661 u64 stripe_len; 7662 bool found = false; 7663 int ret = 0; 7664 int i; 7665 7666 read_lock(&em_tree->lock); 7667 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7668 read_unlock(&em_tree->lock); 7669 7670 if (!em) { 7671 btrfs_err(fs_info, 7672 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7673 physical_offset, devid); 7674 ret = -EUCLEAN; 7675 goto out; 7676 } 7677 7678 map = em->map_lookup; 7679 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); 7680 if (physical_len != stripe_len) { 7681 btrfs_err(fs_info, 7682 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7683 physical_offset, devid, em->start, physical_len, 7684 stripe_len); 7685 ret = -EUCLEAN; 7686 goto out; 7687 } 7688 7689 for (i = 0; i < map->num_stripes; i++) { 7690 if (map->stripes[i].dev->devid == devid && 7691 map->stripes[i].physical == physical_offset) { 7692 found = true; 7693 if (map->verified_stripes >= map->num_stripes) { 7694 btrfs_err(fs_info, 7695 "too many dev extents for chunk %llu found", 7696 em->start); 7697 ret = -EUCLEAN; 7698 goto out; 7699 } 7700 map->verified_stripes++; 7701 break; 7702 } 7703 } 7704 if (!found) { 7705 btrfs_err(fs_info, 7706 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7707 physical_offset, devid); 7708 ret = -EUCLEAN; 7709 } 7710 7711 /* Make sure no dev extent is beyond device bondary */ 7712 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); 7713 if (!dev) { 7714 btrfs_err(fs_info, "failed to find devid %llu", devid); 7715 ret = -EUCLEAN; 7716 goto out; 7717 } 7718 7719 /* It's possible this device is a dummy for seed device */ 7720 if (dev->disk_total_bytes == 0) { 7721 dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL, 7722 NULL, false); 7723 if (!dev) { 7724 btrfs_err(fs_info, "failed to find seed devid %llu", 7725 devid); 7726 ret = -EUCLEAN; 7727 goto out; 7728 } 7729 } 7730 7731 if (physical_offset + physical_len > dev->disk_total_bytes) { 7732 btrfs_err(fs_info, 7733 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7734 devid, physical_offset, physical_len, 7735 dev->disk_total_bytes); 7736 ret = -EUCLEAN; 7737 goto out; 7738 } 7739 out: 7740 free_extent_map(em); 7741 return ret; 7742 } 7743 7744 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7745 { 7746 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7747 struct extent_map *em; 7748 struct rb_node *node; 7749 int ret = 0; 7750 7751 read_lock(&em_tree->lock); 7752 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7753 em = rb_entry(node, struct extent_map, rb_node); 7754 if (em->map_lookup->num_stripes != 7755 em->map_lookup->verified_stripes) { 7756 btrfs_err(fs_info, 7757 "chunk %llu has missing dev extent, have %d expect %d", 7758 em->start, em->map_lookup->verified_stripes, 7759 em->map_lookup->num_stripes); 7760 ret = -EUCLEAN; 7761 goto out; 7762 } 7763 } 7764 out: 7765 read_unlock(&em_tree->lock); 7766 return ret; 7767 } 7768 7769 /* 7770 * Ensure that all dev extents are mapped to correct chunk, otherwise 7771 * later chunk allocation/free would cause unexpected behavior. 7772 * 7773 * NOTE: This will iterate through the whole device tree, which should be of 7774 * the same size level as the chunk tree. This slightly increases mount time. 7775 */ 7776 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7777 { 7778 struct btrfs_path *path; 7779 struct btrfs_root *root = fs_info->dev_root; 7780 struct btrfs_key key; 7781 u64 prev_devid = 0; 7782 u64 prev_dev_ext_end = 0; 7783 int ret = 0; 7784 7785 key.objectid = 1; 7786 key.type = BTRFS_DEV_EXTENT_KEY; 7787 key.offset = 0; 7788 7789 path = btrfs_alloc_path(); 7790 if (!path) 7791 return -ENOMEM; 7792 7793 path->reada = READA_FORWARD; 7794 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7795 if (ret < 0) 7796 goto out; 7797 7798 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7799 ret = btrfs_next_item(root, path); 7800 if (ret < 0) 7801 goto out; 7802 /* No dev extents at all? Not good */ 7803 if (ret > 0) { 7804 ret = -EUCLEAN; 7805 goto out; 7806 } 7807 } 7808 while (1) { 7809 struct extent_buffer *leaf = path->nodes[0]; 7810 struct btrfs_dev_extent *dext; 7811 int slot = path->slots[0]; 7812 u64 chunk_offset; 7813 u64 physical_offset; 7814 u64 physical_len; 7815 u64 devid; 7816 7817 btrfs_item_key_to_cpu(leaf, &key, slot); 7818 if (key.type != BTRFS_DEV_EXTENT_KEY) 7819 break; 7820 devid = key.objectid; 7821 physical_offset = key.offset; 7822 7823 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 7824 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 7825 physical_len = btrfs_dev_extent_length(leaf, dext); 7826 7827 /* Check if this dev extent overlaps with the previous one */ 7828 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 7829 btrfs_err(fs_info, 7830 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 7831 devid, physical_offset, prev_dev_ext_end); 7832 ret = -EUCLEAN; 7833 goto out; 7834 } 7835 7836 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 7837 physical_offset, physical_len); 7838 if (ret < 0) 7839 goto out; 7840 prev_devid = devid; 7841 prev_dev_ext_end = physical_offset + physical_len; 7842 7843 ret = btrfs_next_item(root, path); 7844 if (ret < 0) 7845 goto out; 7846 if (ret > 0) { 7847 ret = 0; 7848 break; 7849 } 7850 } 7851 7852 /* Ensure all chunks have corresponding dev extents */ 7853 ret = verify_chunk_dev_extent_mapping(fs_info); 7854 out: 7855 btrfs_free_path(path); 7856 return ret; 7857 } 7858 7859 /* 7860 * Check whether the given block group or device is pinned by any inode being 7861 * used as a swapfile. 7862 */ 7863 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 7864 { 7865 struct btrfs_swapfile_pin *sp; 7866 struct rb_node *node; 7867 7868 spin_lock(&fs_info->swapfile_pins_lock); 7869 node = fs_info->swapfile_pins.rb_node; 7870 while (node) { 7871 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 7872 if (ptr < sp->ptr) 7873 node = node->rb_left; 7874 else if (ptr > sp->ptr) 7875 node = node->rb_right; 7876 else 7877 break; 7878 } 7879 spin_unlock(&fs_info->swapfile_pins_lock); 7880 return node != NULL; 7881 } 7882