1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/iocontext.h> 24 #include <linux/capability.h> 25 #include <linux/ratelimit.h> 26 #include <linux/kthread.h> 27 #include <linux/raid/pq.h> 28 #include <linux/semaphore.h> 29 #include <linux/uuid.h> 30 #include <asm/div64.h> 31 #include "ctree.h" 32 #include "extent_map.h" 33 #include "disk-io.h" 34 #include "transaction.h" 35 #include "print-tree.h" 36 #include "volumes.h" 37 #include "raid56.h" 38 #include "async-thread.h" 39 #include "check-integrity.h" 40 #include "rcu-string.h" 41 #include "math.h" 42 #include "dev-replace.h" 43 #include "sysfs.h" 44 45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 46 [BTRFS_RAID_RAID10] = { 47 .sub_stripes = 2, 48 .dev_stripes = 1, 49 .devs_max = 0, /* 0 == as many as possible */ 50 .devs_min = 4, 51 .tolerated_failures = 1, 52 .devs_increment = 2, 53 .ncopies = 2, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 }, 64 [BTRFS_RAID_DUP] = { 65 .sub_stripes = 1, 66 .dev_stripes = 2, 67 .devs_max = 1, 68 .devs_min = 1, 69 .tolerated_failures = 0, 70 .devs_increment = 1, 71 .ncopies = 2, 72 }, 73 [BTRFS_RAID_RAID0] = { 74 .sub_stripes = 1, 75 .dev_stripes = 1, 76 .devs_max = 0, 77 .devs_min = 2, 78 .tolerated_failures = 0, 79 .devs_increment = 1, 80 .ncopies = 1, 81 }, 82 [BTRFS_RAID_SINGLE] = { 83 .sub_stripes = 1, 84 .dev_stripes = 1, 85 .devs_max = 1, 86 .devs_min = 1, 87 .tolerated_failures = 0, 88 .devs_increment = 1, 89 .ncopies = 1, 90 }, 91 [BTRFS_RAID_RAID5] = { 92 .sub_stripes = 1, 93 .dev_stripes = 1, 94 .devs_max = 0, 95 .devs_min = 2, 96 .tolerated_failures = 1, 97 .devs_increment = 1, 98 .ncopies = 2, 99 }, 100 [BTRFS_RAID_RAID6] = { 101 .sub_stripes = 1, 102 .dev_stripes = 1, 103 .devs_max = 0, 104 .devs_min = 3, 105 .tolerated_failures = 2, 106 .devs_increment = 1, 107 .ncopies = 3, 108 }, 109 }; 110 111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { 112 [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10, 113 [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1, 114 [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP, 115 [BTRFS_RAID_RAID0] = BTRFS_BLOCK_GROUP_RAID0, 116 [BTRFS_RAID_SINGLE] = 0, 117 [BTRFS_RAID_RAID5] = BTRFS_BLOCK_GROUP_RAID5, 118 [BTRFS_RAID_RAID6] = BTRFS_BLOCK_GROUP_RAID6, 119 }; 120 121 /* 122 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices 123 * condition is not met. Zero means there's no corresponding 124 * BTRFS_ERROR_DEV_*_NOT_MET value. 125 */ 126 const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = { 127 [BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 128 [BTRFS_RAID_RAID1] = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 129 [BTRFS_RAID_DUP] = 0, 130 [BTRFS_RAID_RAID0] = 0, 131 [BTRFS_RAID_SINGLE] = 0, 132 [BTRFS_RAID_RAID5] = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 133 [BTRFS_RAID_RAID6] = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 134 }; 135 136 static int init_first_rw_device(struct btrfs_trans_handle *trans, 137 struct btrfs_root *root, 138 struct btrfs_device *device); 139 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 140 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 141 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 142 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 143 144 DEFINE_MUTEX(uuid_mutex); 145 static LIST_HEAD(fs_uuids); 146 struct list_head *btrfs_get_fs_uuids(void) 147 { 148 return &fs_uuids; 149 } 150 151 static struct btrfs_fs_devices *__alloc_fs_devices(void) 152 { 153 struct btrfs_fs_devices *fs_devs; 154 155 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 156 if (!fs_devs) 157 return ERR_PTR(-ENOMEM); 158 159 mutex_init(&fs_devs->device_list_mutex); 160 161 INIT_LIST_HEAD(&fs_devs->devices); 162 INIT_LIST_HEAD(&fs_devs->resized_devices); 163 INIT_LIST_HEAD(&fs_devs->alloc_list); 164 INIT_LIST_HEAD(&fs_devs->list); 165 166 return fs_devs; 167 } 168 169 /** 170 * alloc_fs_devices - allocate struct btrfs_fs_devices 171 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is 172 * generated. 173 * 174 * Return: a pointer to a new &struct btrfs_fs_devices on success; 175 * ERR_PTR() on error. Returned struct is not linked onto any lists and 176 * can be destroyed with kfree() right away. 177 */ 178 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 179 { 180 struct btrfs_fs_devices *fs_devs; 181 182 fs_devs = __alloc_fs_devices(); 183 if (IS_ERR(fs_devs)) 184 return fs_devs; 185 186 if (fsid) 187 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 188 else 189 generate_random_uuid(fs_devs->fsid); 190 191 return fs_devs; 192 } 193 194 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 195 { 196 struct btrfs_device *device; 197 WARN_ON(fs_devices->opened); 198 while (!list_empty(&fs_devices->devices)) { 199 device = list_entry(fs_devices->devices.next, 200 struct btrfs_device, dev_list); 201 list_del(&device->dev_list); 202 rcu_string_free(device->name); 203 kfree(device); 204 } 205 kfree(fs_devices); 206 } 207 208 static void btrfs_kobject_uevent(struct block_device *bdev, 209 enum kobject_action action) 210 { 211 int ret; 212 213 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 214 if (ret) 215 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 216 action, 217 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 218 &disk_to_dev(bdev->bd_disk)->kobj); 219 } 220 221 void btrfs_cleanup_fs_uuids(void) 222 { 223 struct btrfs_fs_devices *fs_devices; 224 225 while (!list_empty(&fs_uuids)) { 226 fs_devices = list_entry(fs_uuids.next, 227 struct btrfs_fs_devices, list); 228 list_del(&fs_devices->list); 229 free_fs_devices(fs_devices); 230 } 231 } 232 233 static struct btrfs_device *__alloc_device(void) 234 { 235 struct btrfs_device *dev; 236 237 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 238 if (!dev) 239 return ERR_PTR(-ENOMEM); 240 241 INIT_LIST_HEAD(&dev->dev_list); 242 INIT_LIST_HEAD(&dev->dev_alloc_list); 243 INIT_LIST_HEAD(&dev->resized_list); 244 245 spin_lock_init(&dev->io_lock); 246 247 spin_lock_init(&dev->reada_lock); 248 atomic_set(&dev->reada_in_flight, 0); 249 atomic_set(&dev->dev_stats_ccnt, 0); 250 btrfs_device_data_ordered_init(dev); 251 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 252 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 253 254 return dev; 255 } 256 257 static noinline struct btrfs_device *__find_device(struct list_head *head, 258 u64 devid, u8 *uuid) 259 { 260 struct btrfs_device *dev; 261 262 list_for_each_entry(dev, head, dev_list) { 263 if (dev->devid == devid && 264 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 265 return dev; 266 } 267 } 268 return NULL; 269 } 270 271 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 272 { 273 struct btrfs_fs_devices *fs_devices; 274 275 list_for_each_entry(fs_devices, &fs_uuids, list) { 276 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 277 return fs_devices; 278 } 279 return NULL; 280 } 281 282 static int 283 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 284 int flush, struct block_device **bdev, 285 struct buffer_head **bh) 286 { 287 int ret; 288 289 *bdev = blkdev_get_by_path(device_path, flags, holder); 290 291 if (IS_ERR(*bdev)) { 292 ret = PTR_ERR(*bdev); 293 goto error; 294 } 295 296 if (flush) 297 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 298 ret = set_blocksize(*bdev, 4096); 299 if (ret) { 300 blkdev_put(*bdev, flags); 301 goto error; 302 } 303 invalidate_bdev(*bdev); 304 *bh = btrfs_read_dev_super(*bdev); 305 if (IS_ERR(*bh)) { 306 ret = PTR_ERR(*bh); 307 blkdev_put(*bdev, flags); 308 goto error; 309 } 310 311 return 0; 312 313 error: 314 *bdev = NULL; 315 *bh = NULL; 316 return ret; 317 } 318 319 static void requeue_list(struct btrfs_pending_bios *pending_bios, 320 struct bio *head, struct bio *tail) 321 { 322 323 struct bio *old_head; 324 325 old_head = pending_bios->head; 326 pending_bios->head = head; 327 if (pending_bios->tail) 328 tail->bi_next = old_head; 329 else 330 pending_bios->tail = tail; 331 } 332 333 /* 334 * we try to collect pending bios for a device so we don't get a large 335 * number of procs sending bios down to the same device. This greatly 336 * improves the schedulers ability to collect and merge the bios. 337 * 338 * But, it also turns into a long list of bios to process and that is sure 339 * to eventually make the worker thread block. The solution here is to 340 * make some progress and then put this work struct back at the end of 341 * the list if the block device is congested. This way, multiple devices 342 * can make progress from a single worker thread. 343 */ 344 static noinline void run_scheduled_bios(struct btrfs_device *device) 345 { 346 struct bio *pending; 347 struct backing_dev_info *bdi; 348 struct btrfs_fs_info *fs_info; 349 struct btrfs_pending_bios *pending_bios; 350 struct bio *tail; 351 struct bio *cur; 352 int again = 0; 353 unsigned long num_run; 354 unsigned long batch_run = 0; 355 unsigned long limit; 356 unsigned long last_waited = 0; 357 int force_reg = 0; 358 int sync_pending = 0; 359 struct blk_plug plug; 360 361 /* 362 * this function runs all the bios we've collected for 363 * a particular device. We don't want to wander off to 364 * another device without first sending all of these down. 365 * So, setup a plug here and finish it off before we return 366 */ 367 blk_start_plug(&plug); 368 369 bdi = blk_get_backing_dev_info(device->bdev); 370 fs_info = device->dev_root->fs_info; 371 limit = btrfs_async_submit_limit(fs_info); 372 limit = limit * 2 / 3; 373 374 loop: 375 spin_lock(&device->io_lock); 376 377 loop_lock: 378 num_run = 0; 379 380 /* take all the bios off the list at once and process them 381 * later on (without the lock held). But, remember the 382 * tail and other pointers so the bios can be properly reinserted 383 * into the list if we hit congestion 384 */ 385 if (!force_reg && device->pending_sync_bios.head) { 386 pending_bios = &device->pending_sync_bios; 387 force_reg = 1; 388 } else { 389 pending_bios = &device->pending_bios; 390 force_reg = 0; 391 } 392 393 pending = pending_bios->head; 394 tail = pending_bios->tail; 395 WARN_ON(pending && !tail); 396 397 /* 398 * if pending was null this time around, no bios need processing 399 * at all and we can stop. Otherwise it'll loop back up again 400 * and do an additional check so no bios are missed. 401 * 402 * device->running_pending is used to synchronize with the 403 * schedule_bio code. 404 */ 405 if (device->pending_sync_bios.head == NULL && 406 device->pending_bios.head == NULL) { 407 again = 0; 408 device->running_pending = 0; 409 } else { 410 again = 1; 411 device->running_pending = 1; 412 } 413 414 pending_bios->head = NULL; 415 pending_bios->tail = NULL; 416 417 spin_unlock(&device->io_lock); 418 419 while (pending) { 420 421 rmb(); 422 /* we want to work on both lists, but do more bios on the 423 * sync list than the regular list 424 */ 425 if ((num_run > 32 && 426 pending_bios != &device->pending_sync_bios && 427 device->pending_sync_bios.head) || 428 (num_run > 64 && pending_bios == &device->pending_sync_bios && 429 device->pending_bios.head)) { 430 spin_lock(&device->io_lock); 431 requeue_list(pending_bios, pending, tail); 432 goto loop_lock; 433 } 434 435 cur = pending; 436 pending = pending->bi_next; 437 cur->bi_next = NULL; 438 439 /* 440 * atomic_dec_return implies a barrier for waitqueue_active 441 */ 442 if (atomic_dec_return(&fs_info->nr_async_bios) < limit && 443 waitqueue_active(&fs_info->async_submit_wait)) 444 wake_up(&fs_info->async_submit_wait); 445 446 BUG_ON(atomic_read(&cur->__bi_cnt) == 0); 447 448 /* 449 * if we're doing the sync list, record that our 450 * plug has some sync requests on it 451 * 452 * If we're doing the regular list and there are 453 * sync requests sitting around, unplug before 454 * we add more 455 */ 456 if (pending_bios == &device->pending_sync_bios) { 457 sync_pending = 1; 458 } else if (sync_pending) { 459 blk_finish_plug(&plug); 460 blk_start_plug(&plug); 461 sync_pending = 0; 462 } 463 464 btrfsic_submit_bio(cur); 465 num_run++; 466 batch_run++; 467 468 cond_resched(); 469 470 /* 471 * we made progress, there is more work to do and the bdi 472 * is now congested. Back off and let other work structs 473 * run instead 474 */ 475 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 476 fs_info->fs_devices->open_devices > 1) { 477 struct io_context *ioc; 478 479 ioc = current->io_context; 480 481 /* 482 * the main goal here is that we don't want to 483 * block if we're going to be able to submit 484 * more requests without blocking. 485 * 486 * This code does two great things, it pokes into 487 * the elevator code from a filesystem _and_ 488 * it makes assumptions about how batching works. 489 */ 490 if (ioc && ioc->nr_batch_requests > 0 && 491 time_before(jiffies, ioc->last_waited + HZ/50UL) && 492 (last_waited == 0 || 493 ioc->last_waited == last_waited)) { 494 /* 495 * we want to go through our batch of 496 * requests and stop. So, we copy out 497 * the ioc->last_waited time and test 498 * against it before looping 499 */ 500 last_waited = ioc->last_waited; 501 cond_resched(); 502 continue; 503 } 504 spin_lock(&device->io_lock); 505 requeue_list(pending_bios, pending, tail); 506 device->running_pending = 1; 507 508 spin_unlock(&device->io_lock); 509 btrfs_queue_work(fs_info->submit_workers, 510 &device->work); 511 goto done; 512 } 513 /* unplug every 64 requests just for good measure */ 514 if (batch_run % 64 == 0) { 515 blk_finish_plug(&plug); 516 blk_start_plug(&plug); 517 sync_pending = 0; 518 } 519 } 520 521 cond_resched(); 522 if (again) 523 goto loop; 524 525 spin_lock(&device->io_lock); 526 if (device->pending_bios.head || device->pending_sync_bios.head) 527 goto loop_lock; 528 spin_unlock(&device->io_lock); 529 530 done: 531 blk_finish_plug(&plug); 532 } 533 534 static void pending_bios_fn(struct btrfs_work *work) 535 { 536 struct btrfs_device *device; 537 538 device = container_of(work, struct btrfs_device, work); 539 run_scheduled_bios(device); 540 } 541 542 543 void btrfs_free_stale_device(struct btrfs_device *cur_dev) 544 { 545 struct btrfs_fs_devices *fs_devs; 546 struct btrfs_device *dev; 547 548 if (!cur_dev->name) 549 return; 550 551 list_for_each_entry(fs_devs, &fs_uuids, list) { 552 int del = 1; 553 554 if (fs_devs->opened) 555 continue; 556 if (fs_devs->seeding) 557 continue; 558 559 list_for_each_entry(dev, &fs_devs->devices, dev_list) { 560 561 if (dev == cur_dev) 562 continue; 563 if (!dev->name) 564 continue; 565 566 /* 567 * Todo: This won't be enough. What if the same device 568 * comes back (with new uuid and) with its mapper path? 569 * But for now, this does help as mostly an admin will 570 * either use mapper or non mapper path throughout. 571 */ 572 rcu_read_lock(); 573 del = strcmp(rcu_str_deref(dev->name), 574 rcu_str_deref(cur_dev->name)); 575 rcu_read_unlock(); 576 if (!del) 577 break; 578 } 579 580 if (!del) { 581 /* delete the stale device */ 582 if (fs_devs->num_devices == 1) { 583 btrfs_sysfs_remove_fsid(fs_devs); 584 list_del(&fs_devs->list); 585 free_fs_devices(fs_devs); 586 } else { 587 fs_devs->num_devices--; 588 list_del(&dev->dev_list); 589 rcu_string_free(dev->name); 590 kfree(dev); 591 } 592 break; 593 } 594 } 595 } 596 597 /* 598 * Add new device to list of registered devices 599 * 600 * Returns: 601 * 1 - first time device is seen 602 * 0 - device already known 603 * < 0 - error 604 */ 605 static noinline int device_list_add(const char *path, 606 struct btrfs_super_block *disk_super, 607 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 608 { 609 struct btrfs_device *device; 610 struct btrfs_fs_devices *fs_devices; 611 struct rcu_string *name; 612 int ret = 0; 613 u64 found_transid = btrfs_super_generation(disk_super); 614 615 fs_devices = find_fsid(disk_super->fsid); 616 if (!fs_devices) { 617 fs_devices = alloc_fs_devices(disk_super->fsid); 618 if (IS_ERR(fs_devices)) 619 return PTR_ERR(fs_devices); 620 621 list_add(&fs_devices->list, &fs_uuids); 622 623 device = NULL; 624 } else { 625 device = __find_device(&fs_devices->devices, devid, 626 disk_super->dev_item.uuid); 627 } 628 629 if (!device) { 630 if (fs_devices->opened) 631 return -EBUSY; 632 633 device = btrfs_alloc_device(NULL, &devid, 634 disk_super->dev_item.uuid); 635 if (IS_ERR(device)) { 636 /* we can safely leave the fs_devices entry around */ 637 return PTR_ERR(device); 638 } 639 640 name = rcu_string_strdup(path, GFP_NOFS); 641 if (!name) { 642 kfree(device); 643 return -ENOMEM; 644 } 645 rcu_assign_pointer(device->name, name); 646 647 mutex_lock(&fs_devices->device_list_mutex); 648 list_add_rcu(&device->dev_list, &fs_devices->devices); 649 fs_devices->num_devices++; 650 mutex_unlock(&fs_devices->device_list_mutex); 651 652 ret = 1; 653 device->fs_devices = fs_devices; 654 } else if (!device->name || strcmp(device->name->str, path)) { 655 /* 656 * When FS is already mounted. 657 * 1. If you are here and if the device->name is NULL that 658 * means this device was missing at time of FS mount. 659 * 2. If you are here and if the device->name is different 660 * from 'path' that means either 661 * a. The same device disappeared and reappeared with 662 * different name. or 663 * b. The missing-disk-which-was-replaced, has 664 * reappeared now. 665 * 666 * We must allow 1 and 2a above. But 2b would be a spurious 667 * and unintentional. 668 * 669 * Further in case of 1 and 2a above, the disk at 'path' 670 * would have missed some transaction when it was away and 671 * in case of 2a the stale bdev has to be updated as well. 672 * 2b must not be allowed at all time. 673 */ 674 675 /* 676 * For now, we do allow update to btrfs_fs_device through the 677 * btrfs dev scan cli after FS has been mounted. We're still 678 * tracking a problem where systems fail mount by subvolume id 679 * when we reject replacement on a mounted FS. 680 */ 681 if (!fs_devices->opened && found_transid < device->generation) { 682 /* 683 * That is if the FS is _not_ mounted and if you 684 * are here, that means there is more than one 685 * disk with same uuid and devid.We keep the one 686 * with larger generation number or the last-in if 687 * generation are equal. 688 */ 689 return -EEXIST; 690 } 691 692 name = rcu_string_strdup(path, GFP_NOFS); 693 if (!name) 694 return -ENOMEM; 695 rcu_string_free(device->name); 696 rcu_assign_pointer(device->name, name); 697 if (device->missing) { 698 fs_devices->missing_devices--; 699 device->missing = 0; 700 } 701 } 702 703 /* 704 * Unmount does not free the btrfs_device struct but would zero 705 * generation along with most of the other members. So just update 706 * it back. We need it to pick the disk with largest generation 707 * (as above). 708 */ 709 if (!fs_devices->opened) 710 device->generation = found_transid; 711 712 /* 713 * if there is new btrfs on an already registered device, 714 * then remove the stale device entry. 715 */ 716 if (ret > 0) 717 btrfs_free_stale_device(device); 718 719 *fs_devices_ret = fs_devices; 720 721 return ret; 722 } 723 724 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 725 { 726 struct btrfs_fs_devices *fs_devices; 727 struct btrfs_device *device; 728 struct btrfs_device *orig_dev; 729 730 fs_devices = alloc_fs_devices(orig->fsid); 731 if (IS_ERR(fs_devices)) 732 return fs_devices; 733 734 mutex_lock(&orig->device_list_mutex); 735 fs_devices->total_devices = orig->total_devices; 736 737 /* We have held the volume lock, it is safe to get the devices. */ 738 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 739 struct rcu_string *name; 740 741 device = btrfs_alloc_device(NULL, &orig_dev->devid, 742 orig_dev->uuid); 743 if (IS_ERR(device)) 744 goto error; 745 746 /* 747 * This is ok to do without rcu read locked because we hold the 748 * uuid mutex so nothing we touch in here is going to disappear. 749 */ 750 if (orig_dev->name) { 751 name = rcu_string_strdup(orig_dev->name->str, 752 GFP_KERNEL); 753 if (!name) { 754 kfree(device); 755 goto error; 756 } 757 rcu_assign_pointer(device->name, name); 758 } 759 760 list_add(&device->dev_list, &fs_devices->devices); 761 device->fs_devices = fs_devices; 762 fs_devices->num_devices++; 763 } 764 mutex_unlock(&orig->device_list_mutex); 765 return fs_devices; 766 error: 767 mutex_unlock(&orig->device_list_mutex); 768 free_fs_devices(fs_devices); 769 return ERR_PTR(-ENOMEM); 770 } 771 772 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step) 773 { 774 struct btrfs_device *device, *next; 775 struct btrfs_device *latest_dev = NULL; 776 777 mutex_lock(&uuid_mutex); 778 again: 779 /* This is the initialized path, it is safe to release the devices. */ 780 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 781 if (device->in_fs_metadata) { 782 if (!device->is_tgtdev_for_dev_replace && 783 (!latest_dev || 784 device->generation > latest_dev->generation)) { 785 latest_dev = device; 786 } 787 continue; 788 } 789 790 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 791 /* 792 * In the first step, keep the device which has 793 * the correct fsid and the devid that is used 794 * for the dev_replace procedure. 795 * In the second step, the dev_replace state is 796 * read from the device tree and it is known 797 * whether the procedure is really active or 798 * not, which means whether this device is 799 * used or whether it should be removed. 800 */ 801 if (step == 0 || device->is_tgtdev_for_dev_replace) { 802 continue; 803 } 804 } 805 if (device->bdev) { 806 blkdev_put(device->bdev, device->mode); 807 device->bdev = NULL; 808 fs_devices->open_devices--; 809 } 810 if (device->writeable) { 811 list_del_init(&device->dev_alloc_list); 812 device->writeable = 0; 813 if (!device->is_tgtdev_for_dev_replace) 814 fs_devices->rw_devices--; 815 } 816 list_del_init(&device->dev_list); 817 fs_devices->num_devices--; 818 rcu_string_free(device->name); 819 kfree(device); 820 } 821 822 if (fs_devices->seed) { 823 fs_devices = fs_devices->seed; 824 goto again; 825 } 826 827 fs_devices->latest_bdev = latest_dev->bdev; 828 829 mutex_unlock(&uuid_mutex); 830 } 831 832 static void __free_device(struct work_struct *work) 833 { 834 struct btrfs_device *device; 835 836 device = container_of(work, struct btrfs_device, rcu_work); 837 rcu_string_free(device->name); 838 kfree(device); 839 } 840 841 static void free_device(struct rcu_head *head) 842 { 843 struct btrfs_device *device; 844 845 device = container_of(head, struct btrfs_device, rcu); 846 847 INIT_WORK(&device->rcu_work, __free_device); 848 schedule_work(&device->rcu_work); 849 } 850 851 static void btrfs_close_bdev(struct btrfs_device *device) 852 { 853 if (device->bdev && device->writeable) { 854 sync_blockdev(device->bdev); 855 invalidate_bdev(device->bdev); 856 } 857 858 if (device->bdev) 859 blkdev_put(device->bdev, device->mode); 860 } 861 862 static void btrfs_close_one_device(struct btrfs_device *device) 863 { 864 struct btrfs_fs_devices *fs_devices = device->fs_devices; 865 struct btrfs_device *new_device; 866 struct rcu_string *name; 867 868 if (device->bdev) 869 fs_devices->open_devices--; 870 871 if (device->writeable && 872 device->devid != BTRFS_DEV_REPLACE_DEVID) { 873 list_del_init(&device->dev_alloc_list); 874 fs_devices->rw_devices--; 875 } 876 877 if (device->missing) 878 fs_devices->missing_devices--; 879 880 btrfs_close_bdev(device); 881 882 new_device = btrfs_alloc_device(NULL, &device->devid, 883 device->uuid); 884 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 885 886 /* Safe because we are under uuid_mutex */ 887 if (device->name) { 888 name = rcu_string_strdup(device->name->str, GFP_NOFS); 889 BUG_ON(!name); /* -ENOMEM */ 890 rcu_assign_pointer(new_device->name, name); 891 } 892 893 list_replace_rcu(&device->dev_list, &new_device->dev_list); 894 new_device->fs_devices = device->fs_devices; 895 896 call_rcu(&device->rcu, free_device); 897 } 898 899 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 900 { 901 struct btrfs_device *device, *tmp; 902 903 if (--fs_devices->opened > 0) 904 return 0; 905 906 mutex_lock(&fs_devices->device_list_mutex); 907 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 908 btrfs_close_one_device(device); 909 } 910 mutex_unlock(&fs_devices->device_list_mutex); 911 912 WARN_ON(fs_devices->open_devices); 913 WARN_ON(fs_devices->rw_devices); 914 fs_devices->opened = 0; 915 fs_devices->seeding = 0; 916 917 return 0; 918 } 919 920 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 921 { 922 struct btrfs_fs_devices *seed_devices = NULL; 923 int ret; 924 925 mutex_lock(&uuid_mutex); 926 ret = __btrfs_close_devices(fs_devices); 927 if (!fs_devices->opened) { 928 seed_devices = fs_devices->seed; 929 fs_devices->seed = NULL; 930 } 931 mutex_unlock(&uuid_mutex); 932 933 while (seed_devices) { 934 fs_devices = seed_devices; 935 seed_devices = fs_devices->seed; 936 __btrfs_close_devices(fs_devices); 937 free_fs_devices(fs_devices); 938 } 939 /* 940 * Wait for rcu kworkers under __btrfs_close_devices 941 * to finish all blkdev_puts so device is really 942 * free when umount is done. 943 */ 944 rcu_barrier(); 945 return ret; 946 } 947 948 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 949 fmode_t flags, void *holder) 950 { 951 struct request_queue *q; 952 struct block_device *bdev; 953 struct list_head *head = &fs_devices->devices; 954 struct btrfs_device *device; 955 struct btrfs_device *latest_dev = NULL; 956 struct buffer_head *bh; 957 struct btrfs_super_block *disk_super; 958 u64 devid; 959 int seeding = 1; 960 int ret = 0; 961 962 flags |= FMODE_EXCL; 963 964 list_for_each_entry(device, head, dev_list) { 965 if (device->bdev) 966 continue; 967 if (!device->name) 968 continue; 969 970 /* Just open everything we can; ignore failures here */ 971 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 972 &bdev, &bh)) 973 continue; 974 975 disk_super = (struct btrfs_super_block *)bh->b_data; 976 devid = btrfs_stack_device_id(&disk_super->dev_item); 977 if (devid != device->devid) 978 goto error_brelse; 979 980 if (memcmp(device->uuid, disk_super->dev_item.uuid, 981 BTRFS_UUID_SIZE)) 982 goto error_brelse; 983 984 device->generation = btrfs_super_generation(disk_super); 985 if (!latest_dev || 986 device->generation > latest_dev->generation) 987 latest_dev = device; 988 989 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 990 device->writeable = 0; 991 } else { 992 device->writeable = !bdev_read_only(bdev); 993 seeding = 0; 994 } 995 996 q = bdev_get_queue(bdev); 997 if (blk_queue_discard(q)) 998 device->can_discard = 1; 999 1000 device->bdev = bdev; 1001 device->in_fs_metadata = 0; 1002 device->mode = flags; 1003 1004 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 1005 fs_devices->rotating = 1; 1006 1007 fs_devices->open_devices++; 1008 if (device->writeable && 1009 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1010 fs_devices->rw_devices++; 1011 list_add(&device->dev_alloc_list, 1012 &fs_devices->alloc_list); 1013 } 1014 brelse(bh); 1015 continue; 1016 1017 error_brelse: 1018 brelse(bh); 1019 blkdev_put(bdev, flags); 1020 continue; 1021 } 1022 if (fs_devices->open_devices == 0) { 1023 ret = -EINVAL; 1024 goto out; 1025 } 1026 fs_devices->seeding = seeding; 1027 fs_devices->opened = 1; 1028 fs_devices->latest_bdev = latest_dev->bdev; 1029 fs_devices->total_rw_bytes = 0; 1030 out: 1031 return ret; 1032 } 1033 1034 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1035 fmode_t flags, void *holder) 1036 { 1037 int ret; 1038 1039 mutex_lock(&uuid_mutex); 1040 if (fs_devices->opened) { 1041 fs_devices->opened++; 1042 ret = 0; 1043 } else { 1044 ret = __btrfs_open_devices(fs_devices, flags, holder); 1045 } 1046 mutex_unlock(&uuid_mutex); 1047 return ret; 1048 } 1049 1050 void btrfs_release_disk_super(struct page *page) 1051 { 1052 kunmap(page); 1053 put_page(page); 1054 } 1055 1056 int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, 1057 struct page **page, struct btrfs_super_block **disk_super) 1058 { 1059 void *p; 1060 pgoff_t index; 1061 1062 /* make sure our super fits in the device */ 1063 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1064 return 1; 1065 1066 /* make sure our super fits in the page */ 1067 if (sizeof(**disk_super) > PAGE_SIZE) 1068 return 1; 1069 1070 /* make sure our super doesn't straddle pages on disk */ 1071 index = bytenr >> PAGE_SHIFT; 1072 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) 1073 return 1; 1074 1075 /* pull in the page with our super */ 1076 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 1077 index, GFP_KERNEL); 1078 1079 if (IS_ERR_OR_NULL(*page)) 1080 return 1; 1081 1082 p = kmap(*page); 1083 1084 /* align our pointer to the offset of the super block */ 1085 *disk_super = p + (bytenr & ~PAGE_MASK); 1086 1087 if (btrfs_super_bytenr(*disk_super) != bytenr || 1088 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { 1089 btrfs_release_disk_super(*page); 1090 return 1; 1091 } 1092 1093 if ((*disk_super)->label[0] && 1094 (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) 1095 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1096 1097 return 0; 1098 } 1099 1100 /* 1101 * Look for a btrfs signature on a device. This may be called out of the mount path 1102 * and we are not allowed to call set_blocksize during the scan. The superblock 1103 * is read via pagecache 1104 */ 1105 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 1106 struct btrfs_fs_devices **fs_devices_ret) 1107 { 1108 struct btrfs_super_block *disk_super; 1109 struct block_device *bdev; 1110 struct page *page; 1111 int ret = -EINVAL; 1112 u64 devid; 1113 u64 transid; 1114 u64 total_devices; 1115 u64 bytenr; 1116 1117 /* 1118 * we would like to check all the supers, but that would make 1119 * a btrfs mount succeed after a mkfs from a different FS. 1120 * So, we need to add a special mount option to scan for 1121 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1122 */ 1123 bytenr = btrfs_sb_offset(0); 1124 flags |= FMODE_EXCL; 1125 mutex_lock(&uuid_mutex); 1126 1127 bdev = blkdev_get_by_path(path, flags, holder); 1128 if (IS_ERR(bdev)) { 1129 ret = PTR_ERR(bdev); 1130 goto error; 1131 } 1132 1133 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) 1134 goto error_bdev_put; 1135 1136 devid = btrfs_stack_device_id(&disk_super->dev_item); 1137 transid = btrfs_super_generation(disk_super); 1138 total_devices = btrfs_super_num_devices(disk_super); 1139 1140 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 1141 if (ret > 0) { 1142 if (disk_super->label[0]) { 1143 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label); 1144 } else { 1145 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid); 1146 } 1147 1148 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); 1149 ret = 0; 1150 } 1151 if (!ret && fs_devices_ret) 1152 (*fs_devices_ret)->total_devices = total_devices; 1153 1154 btrfs_release_disk_super(page); 1155 1156 error_bdev_put: 1157 blkdev_put(bdev, flags); 1158 error: 1159 mutex_unlock(&uuid_mutex); 1160 return ret; 1161 } 1162 1163 /* helper to account the used device space in the range */ 1164 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 1165 u64 end, u64 *length) 1166 { 1167 struct btrfs_key key; 1168 struct btrfs_root *root = device->dev_root; 1169 struct btrfs_dev_extent *dev_extent; 1170 struct btrfs_path *path; 1171 u64 extent_end; 1172 int ret; 1173 int slot; 1174 struct extent_buffer *l; 1175 1176 *length = 0; 1177 1178 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace) 1179 return 0; 1180 1181 path = btrfs_alloc_path(); 1182 if (!path) 1183 return -ENOMEM; 1184 path->reada = READA_FORWARD; 1185 1186 key.objectid = device->devid; 1187 key.offset = start; 1188 key.type = BTRFS_DEV_EXTENT_KEY; 1189 1190 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1191 if (ret < 0) 1192 goto out; 1193 if (ret > 0) { 1194 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1195 if (ret < 0) 1196 goto out; 1197 } 1198 1199 while (1) { 1200 l = path->nodes[0]; 1201 slot = path->slots[0]; 1202 if (slot >= btrfs_header_nritems(l)) { 1203 ret = btrfs_next_leaf(root, path); 1204 if (ret == 0) 1205 continue; 1206 if (ret < 0) 1207 goto out; 1208 1209 break; 1210 } 1211 btrfs_item_key_to_cpu(l, &key, slot); 1212 1213 if (key.objectid < device->devid) 1214 goto next; 1215 1216 if (key.objectid > device->devid) 1217 break; 1218 1219 if (key.type != BTRFS_DEV_EXTENT_KEY) 1220 goto next; 1221 1222 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1223 extent_end = key.offset + btrfs_dev_extent_length(l, 1224 dev_extent); 1225 if (key.offset <= start && extent_end > end) { 1226 *length = end - start + 1; 1227 break; 1228 } else if (key.offset <= start && extent_end > start) 1229 *length += extent_end - start; 1230 else if (key.offset > start && extent_end <= end) 1231 *length += extent_end - key.offset; 1232 else if (key.offset > start && key.offset <= end) { 1233 *length += end - key.offset + 1; 1234 break; 1235 } else if (key.offset > end) 1236 break; 1237 1238 next: 1239 path->slots[0]++; 1240 } 1241 ret = 0; 1242 out: 1243 btrfs_free_path(path); 1244 return ret; 1245 } 1246 1247 static int contains_pending_extent(struct btrfs_transaction *transaction, 1248 struct btrfs_device *device, 1249 u64 *start, u64 len) 1250 { 1251 struct btrfs_fs_info *fs_info = device->dev_root->fs_info; 1252 struct extent_map *em; 1253 struct list_head *search_list = &fs_info->pinned_chunks; 1254 int ret = 0; 1255 u64 physical_start = *start; 1256 1257 if (transaction) 1258 search_list = &transaction->pending_chunks; 1259 again: 1260 list_for_each_entry(em, search_list, list) { 1261 struct map_lookup *map; 1262 int i; 1263 1264 map = em->map_lookup; 1265 for (i = 0; i < map->num_stripes; i++) { 1266 u64 end; 1267 1268 if (map->stripes[i].dev != device) 1269 continue; 1270 if (map->stripes[i].physical >= physical_start + len || 1271 map->stripes[i].physical + em->orig_block_len <= 1272 physical_start) 1273 continue; 1274 /* 1275 * Make sure that while processing the pinned list we do 1276 * not override our *start with a lower value, because 1277 * we can have pinned chunks that fall within this 1278 * device hole and that have lower physical addresses 1279 * than the pending chunks we processed before. If we 1280 * do not take this special care we can end up getting 1281 * 2 pending chunks that start at the same physical 1282 * device offsets because the end offset of a pinned 1283 * chunk can be equal to the start offset of some 1284 * pending chunk. 1285 */ 1286 end = map->stripes[i].physical + em->orig_block_len; 1287 if (end > *start) { 1288 *start = end; 1289 ret = 1; 1290 } 1291 } 1292 } 1293 if (search_list != &fs_info->pinned_chunks) { 1294 search_list = &fs_info->pinned_chunks; 1295 goto again; 1296 } 1297 1298 return ret; 1299 } 1300 1301 1302 /* 1303 * find_free_dev_extent_start - find free space in the specified device 1304 * @device: the device which we search the free space in 1305 * @num_bytes: the size of the free space that we need 1306 * @search_start: the position from which to begin the search 1307 * @start: store the start of the free space. 1308 * @len: the size of the free space. that we find, or the size 1309 * of the max free space if we don't find suitable free space 1310 * 1311 * this uses a pretty simple search, the expectation is that it is 1312 * called very infrequently and that a given device has a small number 1313 * of extents 1314 * 1315 * @start is used to store the start of the free space if we find. But if we 1316 * don't find suitable free space, it will be used to store the start position 1317 * of the max free space. 1318 * 1319 * @len is used to store the size of the free space that we find. 1320 * But if we don't find suitable free space, it is used to store the size of 1321 * the max free space. 1322 */ 1323 int find_free_dev_extent_start(struct btrfs_transaction *transaction, 1324 struct btrfs_device *device, u64 num_bytes, 1325 u64 search_start, u64 *start, u64 *len) 1326 { 1327 struct btrfs_key key; 1328 struct btrfs_root *root = device->dev_root; 1329 struct btrfs_dev_extent *dev_extent; 1330 struct btrfs_path *path; 1331 u64 hole_size; 1332 u64 max_hole_start; 1333 u64 max_hole_size; 1334 u64 extent_end; 1335 u64 search_end = device->total_bytes; 1336 int ret; 1337 int slot; 1338 struct extent_buffer *l; 1339 u64 min_search_start; 1340 1341 /* 1342 * We don't want to overwrite the superblock on the drive nor any area 1343 * used by the boot loader (grub for example), so we make sure to start 1344 * at an offset of at least 1MB. 1345 */ 1346 min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024); 1347 search_start = max(search_start, min_search_start); 1348 1349 path = btrfs_alloc_path(); 1350 if (!path) 1351 return -ENOMEM; 1352 1353 max_hole_start = search_start; 1354 max_hole_size = 0; 1355 1356 again: 1357 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) { 1358 ret = -ENOSPC; 1359 goto out; 1360 } 1361 1362 path->reada = READA_FORWARD; 1363 path->search_commit_root = 1; 1364 path->skip_locking = 1; 1365 1366 key.objectid = device->devid; 1367 key.offset = search_start; 1368 key.type = BTRFS_DEV_EXTENT_KEY; 1369 1370 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1371 if (ret < 0) 1372 goto out; 1373 if (ret > 0) { 1374 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1375 if (ret < 0) 1376 goto out; 1377 } 1378 1379 while (1) { 1380 l = path->nodes[0]; 1381 slot = path->slots[0]; 1382 if (slot >= btrfs_header_nritems(l)) { 1383 ret = btrfs_next_leaf(root, path); 1384 if (ret == 0) 1385 continue; 1386 if (ret < 0) 1387 goto out; 1388 1389 break; 1390 } 1391 btrfs_item_key_to_cpu(l, &key, slot); 1392 1393 if (key.objectid < device->devid) 1394 goto next; 1395 1396 if (key.objectid > device->devid) 1397 break; 1398 1399 if (key.type != BTRFS_DEV_EXTENT_KEY) 1400 goto next; 1401 1402 if (key.offset > search_start) { 1403 hole_size = key.offset - search_start; 1404 1405 /* 1406 * Have to check before we set max_hole_start, otherwise 1407 * we could end up sending back this offset anyway. 1408 */ 1409 if (contains_pending_extent(transaction, device, 1410 &search_start, 1411 hole_size)) { 1412 if (key.offset >= search_start) { 1413 hole_size = key.offset - search_start; 1414 } else { 1415 WARN_ON_ONCE(1); 1416 hole_size = 0; 1417 } 1418 } 1419 1420 if (hole_size > max_hole_size) { 1421 max_hole_start = search_start; 1422 max_hole_size = hole_size; 1423 } 1424 1425 /* 1426 * If this free space is greater than which we need, 1427 * it must be the max free space that we have found 1428 * until now, so max_hole_start must point to the start 1429 * of this free space and the length of this free space 1430 * is stored in max_hole_size. Thus, we return 1431 * max_hole_start and max_hole_size and go back to the 1432 * caller. 1433 */ 1434 if (hole_size >= num_bytes) { 1435 ret = 0; 1436 goto out; 1437 } 1438 } 1439 1440 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1441 extent_end = key.offset + btrfs_dev_extent_length(l, 1442 dev_extent); 1443 if (extent_end > search_start) 1444 search_start = extent_end; 1445 next: 1446 path->slots[0]++; 1447 cond_resched(); 1448 } 1449 1450 /* 1451 * At this point, search_start should be the end of 1452 * allocated dev extents, and when shrinking the device, 1453 * search_end may be smaller than search_start. 1454 */ 1455 if (search_end > search_start) { 1456 hole_size = search_end - search_start; 1457 1458 if (contains_pending_extent(transaction, device, &search_start, 1459 hole_size)) { 1460 btrfs_release_path(path); 1461 goto again; 1462 } 1463 1464 if (hole_size > max_hole_size) { 1465 max_hole_start = search_start; 1466 max_hole_size = hole_size; 1467 } 1468 } 1469 1470 /* See above. */ 1471 if (max_hole_size < num_bytes) 1472 ret = -ENOSPC; 1473 else 1474 ret = 0; 1475 1476 out: 1477 btrfs_free_path(path); 1478 *start = max_hole_start; 1479 if (len) 1480 *len = max_hole_size; 1481 return ret; 1482 } 1483 1484 int find_free_dev_extent(struct btrfs_trans_handle *trans, 1485 struct btrfs_device *device, u64 num_bytes, 1486 u64 *start, u64 *len) 1487 { 1488 /* FIXME use last free of some kind */ 1489 return find_free_dev_extent_start(trans->transaction, device, 1490 num_bytes, 0, start, len); 1491 } 1492 1493 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1494 struct btrfs_device *device, 1495 u64 start, u64 *dev_extent_len) 1496 { 1497 int ret; 1498 struct btrfs_path *path; 1499 struct btrfs_root *root = device->dev_root; 1500 struct btrfs_key key; 1501 struct btrfs_key found_key; 1502 struct extent_buffer *leaf = NULL; 1503 struct btrfs_dev_extent *extent = NULL; 1504 1505 path = btrfs_alloc_path(); 1506 if (!path) 1507 return -ENOMEM; 1508 1509 key.objectid = device->devid; 1510 key.offset = start; 1511 key.type = BTRFS_DEV_EXTENT_KEY; 1512 again: 1513 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1514 if (ret > 0) { 1515 ret = btrfs_previous_item(root, path, key.objectid, 1516 BTRFS_DEV_EXTENT_KEY); 1517 if (ret) 1518 goto out; 1519 leaf = path->nodes[0]; 1520 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1521 extent = btrfs_item_ptr(leaf, path->slots[0], 1522 struct btrfs_dev_extent); 1523 BUG_ON(found_key.offset > start || found_key.offset + 1524 btrfs_dev_extent_length(leaf, extent) < start); 1525 key = found_key; 1526 btrfs_release_path(path); 1527 goto again; 1528 } else if (ret == 0) { 1529 leaf = path->nodes[0]; 1530 extent = btrfs_item_ptr(leaf, path->slots[0], 1531 struct btrfs_dev_extent); 1532 } else { 1533 btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed"); 1534 goto out; 1535 } 1536 1537 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1538 1539 ret = btrfs_del_item(trans, root, path); 1540 if (ret) { 1541 btrfs_handle_fs_error(root->fs_info, ret, 1542 "Failed to remove dev extent item"); 1543 } else { 1544 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1545 } 1546 out: 1547 btrfs_free_path(path); 1548 return ret; 1549 } 1550 1551 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1552 struct btrfs_device *device, 1553 u64 chunk_tree, u64 chunk_objectid, 1554 u64 chunk_offset, u64 start, u64 num_bytes) 1555 { 1556 int ret; 1557 struct btrfs_path *path; 1558 struct btrfs_root *root = device->dev_root; 1559 struct btrfs_dev_extent *extent; 1560 struct extent_buffer *leaf; 1561 struct btrfs_key key; 1562 1563 WARN_ON(!device->in_fs_metadata); 1564 WARN_ON(device->is_tgtdev_for_dev_replace); 1565 path = btrfs_alloc_path(); 1566 if (!path) 1567 return -ENOMEM; 1568 1569 key.objectid = device->devid; 1570 key.offset = start; 1571 key.type = BTRFS_DEV_EXTENT_KEY; 1572 ret = btrfs_insert_empty_item(trans, root, path, &key, 1573 sizeof(*extent)); 1574 if (ret) 1575 goto out; 1576 1577 leaf = path->nodes[0]; 1578 extent = btrfs_item_ptr(leaf, path->slots[0], 1579 struct btrfs_dev_extent); 1580 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 1581 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 1582 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1583 1584 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 1585 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE); 1586 1587 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1588 btrfs_mark_buffer_dirty(leaf); 1589 out: 1590 btrfs_free_path(path); 1591 return ret; 1592 } 1593 1594 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1595 { 1596 struct extent_map_tree *em_tree; 1597 struct extent_map *em; 1598 struct rb_node *n; 1599 u64 ret = 0; 1600 1601 em_tree = &fs_info->mapping_tree.map_tree; 1602 read_lock(&em_tree->lock); 1603 n = rb_last(&em_tree->map); 1604 if (n) { 1605 em = rb_entry(n, struct extent_map, rb_node); 1606 ret = em->start + em->len; 1607 } 1608 read_unlock(&em_tree->lock); 1609 1610 return ret; 1611 } 1612 1613 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1614 u64 *devid_ret) 1615 { 1616 int ret; 1617 struct btrfs_key key; 1618 struct btrfs_key found_key; 1619 struct btrfs_path *path; 1620 1621 path = btrfs_alloc_path(); 1622 if (!path) 1623 return -ENOMEM; 1624 1625 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1626 key.type = BTRFS_DEV_ITEM_KEY; 1627 key.offset = (u64)-1; 1628 1629 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1630 if (ret < 0) 1631 goto error; 1632 1633 BUG_ON(ret == 0); /* Corruption */ 1634 1635 ret = btrfs_previous_item(fs_info->chunk_root, path, 1636 BTRFS_DEV_ITEMS_OBJECTID, 1637 BTRFS_DEV_ITEM_KEY); 1638 if (ret) { 1639 *devid_ret = 1; 1640 } else { 1641 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1642 path->slots[0]); 1643 *devid_ret = found_key.offset + 1; 1644 } 1645 ret = 0; 1646 error: 1647 btrfs_free_path(path); 1648 return ret; 1649 } 1650 1651 /* 1652 * the device information is stored in the chunk root 1653 * the btrfs_device struct should be fully filled in 1654 */ 1655 static int btrfs_add_device(struct btrfs_trans_handle *trans, 1656 struct btrfs_root *root, 1657 struct btrfs_device *device) 1658 { 1659 int ret; 1660 struct btrfs_path *path; 1661 struct btrfs_dev_item *dev_item; 1662 struct extent_buffer *leaf; 1663 struct btrfs_key key; 1664 unsigned long ptr; 1665 1666 root = root->fs_info->chunk_root; 1667 1668 path = btrfs_alloc_path(); 1669 if (!path) 1670 return -ENOMEM; 1671 1672 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1673 key.type = BTRFS_DEV_ITEM_KEY; 1674 key.offset = device->devid; 1675 1676 ret = btrfs_insert_empty_item(trans, root, path, &key, 1677 sizeof(*dev_item)); 1678 if (ret) 1679 goto out; 1680 1681 leaf = path->nodes[0]; 1682 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1683 1684 btrfs_set_device_id(leaf, dev_item, device->devid); 1685 btrfs_set_device_generation(leaf, dev_item, 0); 1686 btrfs_set_device_type(leaf, dev_item, device->type); 1687 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1688 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1689 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1690 btrfs_set_device_total_bytes(leaf, dev_item, 1691 btrfs_device_get_disk_total_bytes(device)); 1692 btrfs_set_device_bytes_used(leaf, dev_item, 1693 btrfs_device_get_bytes_used(device)); 1694 btrfs_set_device_group(leaf, dev_item, 0); 1695 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1696 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1697 btrfs_set_device_start_offset(leaf, dev_item, 0); 1698 1699 ptr = btrfs_device_uuid(dev_item); 1700 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1701 ptr = btrfs_device_fsid(dev_item); 1702 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1703 btrfs_mark_buffer_dirty(leaf); 1704 1705 ret = 0; 1706 out: 1707 btrfs_free_path(path); 1708 return ret; 1709 } 1710 1711 /* 1712 * Function to update ctime/mtime for a given device path. 1713 * Mainly used for ctime/mtime based probe like libblkid. 1714 */ 1715 static void update_dev_time(char *path_name) 1716 { 1717 struct file *filp; 1718 1719 filp = filp_open(path_name, O_RDWR, 0); 1720 if (IS_ERR(filp)) 1721 return; 1722 file_update_time(filp); 1723 filp_close(filp, NULL); 1724 } 1725 1726 static int btrfs_rm_dev_item(struct btrfs_root *root, 1727 struct btrfs_device *device) 1728 { 1729 int ret; 1730 struct btrfs_path *path; 1731 struct btrfs_key key; 1732 struct btrfs_trans_handle *trans; 1733 1734 root = root->fs_info->chunk_root; 1735 1736 path = btrfs_alloc_path(); 1737 if (!path) 1738 return -ENOMEM; 1739 1740 trans = btrfs_start_transaction(root, 0); 1741 if (IS_ERR(trans)) { 1742 btrfs_free_path(path); 1743 return PTR_ERR(trans); 1744 } 1745 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1746 key.type = BTRFS_DEV_ITEM_KEY; 1747 key.offset = device->devid; 1748 1749 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1750 if (ret < 0) 1751 goto out; 1752 1753 if (ret > 0) { 1754 ret = -ENOENT; 1755 goto out; 1756 } 1757 1758 ret = btrfs_del_item(trans, root, path); 1759 if (ret) 1760 goto out; 1761 out: 1762 btrfs_free_path(path); 1763 btrfs_commit_transaction(trans, root); 1764 return ret; 1765 } 1766 1767 /* 1768 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1769 * filesystem. It's up to the caller to adjust that number regarding eg. device 1770 * replace. 1771 */ 1772 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1773 u64 num_devices) 1774 { 1775 u64 all_avail; 1776 unsigned seq; 1777 int i; 1778 1779 do { 1780 seq = read_seqbegin(&fs_info->profiles_lock); 1781 1782 all_avail = fs_info->avail_data_alloc_bits | 1783 fs_info->avail_system_alloc_bits | 1784 fs_info->avail_metadata_alloc_bits; 1785 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1786 1787 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1788 if (!(all_avail & btrfs_raid_group[i])) 1789 continue; 1790 1791 if (num_devices < btrfs_raid_array[i].devs_min) { 1792 int ret = btrfs_raid_mindev_error[i]; 1793 1794 if (ret) 1795 return ret; 1796 } 1797 } 1798 1799 return 0; 1800 } 1801 1802 struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs, 1803 struct btrfs_device *device) 1804 { 1805 struct btrfs_device *next_device; 1806 1807 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1808 if (next_device != device && 1809 !next_device->missing && next_device->bdev) 1810 return next_device; 1811 } 1812 1813 return NULL; 1814 } 1815 1816 /* 1817 * Helper function to check if the given device is part of s_bdev / latest_bdev 1818 * and replace it with the provided or the next active device, in the context 1819 * where this function called, there should be always be another device (or 1820 * this_dev) which is active. 1821 */ 1822 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, 1823 struct btrfs_device *device, struct btrfs_device *this_dev) 1824 { 1825 struct btrfs_device *next_device; 1826 1827 if (this_dev) 1828 next_device = this_dev; 1829 else 1830 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1831 device); 1832 ASSERT(next_device); 1833 1834 if (fs_info->sb->s_bdev && 1835 (fs_info->sb->s_bdev == device->bdev)) 1836 fs_info->sb->s_bdev = next_device->bdev; 1837 1838 if (fs_info->fs_devices->latest_bdev == device->bdev) 1839 fs_info->fs_devices->latest_bdev = next_device->bdev; 1840 } 1841 1842 int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid) 1843 { 1844 struct btrfs_device *device; 1845 struct btrfs_fs_devices *cur_devices; 1846 u64 num_devices; 1847 int ret = 0; 1848 bool clear_super = false; 1849 char *dev_name = NULL; 1850 1851 mutex_lock(&uuid_mutex); 1852 1853 num_devices = root->fs_info->fs_devices->num_devices; 1854 btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0); 1855 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) { 1856 WARN_ON(num_devices < 1); 1857 num_devices--; 1858 } 1859 btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0); 1860 1861 ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1); 1862 if (ret) 1863 goto out; 1864 1865 ret = btrfs_find_device_by_devspec(root, devid, device_path, 1866 &device); 1867 if (ret) 1868 goto out; 1869 1870 if (device->is_tgtdev_for_dev_replace) { 1871 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 1872 goto out; 1873 } 1874 1875 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1876 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 1877 goto out; 1878 } 1879 1880 if (device->writeable) { 1881 lock_chunks(root); 1882 list_del_init(&device->dev_alloc_list); 1883 device->fs_devices->rw_devices--; 1884 unlock_chunks(root); 1885 dev_name = kstrdup(device->name->str, GFP_KERNEL); 1886 if (!dev_name) { 1887 ret = -ENOMEM; 1888 goto error_undo; 1889 } 1890 clear_super = true; 1891 } 1892 1893 mutex_unlock(&uuid_mutex); 1894 ret = btrfs_shrink_device(device, 0); 1895 mutex_lock(&uuid_mutex); 1896 if (ret) 1897 goto error_undo; 1898 1899 /* 1900 * TODO: the superblock still includes this device in its num_devices 1901 * counter although write_all_supers() is not locked out. This 1902 * could give a filesystem state which requires a degraded mount. 1903 */ 1904 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1905 if (ret) 1906 goto error_undo; 1907 1908 device->in_fs_metadata = 0; 1909 btrfs_scrub_cancel_dev(root->fs_info, device); 1910 1911 /* 1912 * the device list mutex makes sure that we don't change 1913 * the device list while someone else is writing out all 1914 * the device supers. Whoever is writing all supers, should 1915 * lock the device list mutex before getting the number of 1916 * devices in the super block (super_copy). Conversely, 1917 * whoever updates the number of devices in the super block 1918 * (super_copy) should hold the device list mutex. 1919 */ 1920 1921 cur_devices = device->fs_devices; 1922 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1923 list_del_rcu(&device->dev_list); 1924 1925 device->fs_devices->num_devices--; 1926 device->fs_devices->total_devices--; 1927 1928 if (device->missing) 1929 device->fs_devices->missing_devices--; 1930 1931 btrfs_assign_next_active_device(root->fs_info, device, NULL); 1932 1933 if (device->bdev) { 1934 device->fs_devices->open_devices--; 1935 /* remove sysfs entry */ 1936 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device); 1937 } 1938 1939 btrfs_close_bdev(device); 1940 1941 call_rcu(&device->rcu, free_device); 1942 1943 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 1944 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices); 1945 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1946 1947 if (cur_devices->open_devices == 0) { 1948 struct btrfs_fs_devices *fs_devices; 1949 fs_devices = root->fs_info->fs_devices; 1950 while (fs_devices) { 1951 if (fs_devices->seed == cur_devices) { 1952 fs_devices->seed = cur_devices->seed; 1953 break; 1954 } 1955 fs_devices = fs_devices->seed; 1956 } 1957 cur_devices->seed = NULL; 1958 __btrfs_close_devices(cur_devices); 1959 free_fs_devices(cur_devices); 1960 } 1961 1962 root->fs_info->num_tolerated_disk_barrier_failures = 1963 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 1964 1965 /* 1966 * at this point, the device is zero sized. We want to 1967 * remove it from the devices list and zero out the old super 1968 */ 1969 if (clear_super) { 1970 struct block_device *bdev; 1971 1972 bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL, 1973 root->fs_info->bdev_holder); 1974 if (!IS_ERR(bdev)) { 1975 btrfs_scratch_superblocks(bdev, dev_name); 1976 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 1977 } 1978 } 1979 1980 out: 1981 kfree(dev_name); 1982 1983 mutex_unlock(&uuid_mutex); 1984 return ret; 1985 1986 error_undo: 1987 if (device->writeable) { 1988 lock_chunks(root); 1989 list_add(&device->dev_alloc_list, 1990 &root->fs_info->fs_devices->alloc_list); 1991 device->fs_devices->rw_devices++; 1992 unlock_chunks(root); 1993 } 1994 goto out; 1995 } 1996 1997 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, 1998 struct btrfs_device *srcdev) 1999 { 2000 struct btrfs_fs_devices *fs_devices; 2001 2002 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 2003 2004 /* 2005 * in case of fs with no seed, srcdev->fs_devices will point 2006 * to fs_devices of fs_info. However when the dev being replaced is 2007 * a seed dev it will point to the seed's local fs_devices. In short 2008 * srcdev will have its correct fs_devices in both the cases. 2009 */ 2010 fs_devices = srcdev->fs_devices; 2011 2012 list_del_rcu(&srcdev->dev_list); 2013 list_del_rcu(&srcdev->dev_alloc_list); 2014 fs_devices->num_devices--; 2015 if (srcdev->missing) 2016 fs_devices->missing_devices--; 2017 2018 if (srcdev->writeable) 2019 fs_devices->rw_devices--; 2020 2021 if (srcdev->bdev) 2022 fs_devices->open_devices--; 2023 } 2024 2025 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, 2026 struct btrfs_device *srcdev) 2027 { 2028 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2029 2030 if (srcdev->writeable) { 2031 /* zero out the old super if it is writable */ 2032 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); 2033 } 2034 2035 btrfs_close_bdev(srcdev); 2036 2037 call_rcu(&srcdev->rcu, free_device); 2038 2039 /* 2040 * unless fs_devices is seed fs, num_devices shouldn't go 2041 * zero 2042 */ 2043 BUG_ON(!fs_devices->num_devices && !fs_devices->seeding); 2044 2045 /* if this is no devs we rather delete the fs_devices */ 2046 if (!fs_devices->num_devices) { 2047 struct btrfs_fs_devices *tmp_fs_devices; 2048 2049 tmp_fs_devices = fs_info->fs_devices; 2050 while (tmp_fs_devices) { 2051 if (tmp_fs_devices->seed == fs_devices) { 2052 tmp_fs_devices->seed = fs_devices->seed; 2053 break; 2054 } 2055 tmp_fs_devices = tmp_fs_devices->seed; 2056 } 2057 fs_devices->seed = NULL; 2058 __btrfs_close_devices(fs_devices); 2059 free_fs_devices(fs_devices); 2060 } 2061 } 2062 2063 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 2064 struct btrfs_device *tgtdev) 2065 { 2066 mutex_lock(&uuid_mutex); 2067 WARN_ON(!tgtdev); 2068 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2069 2070 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); 2071 2072 if (tgtdev->bdev) 2073 fs_info->fs_devices->open_devices--; 2074 2075 fs_info->fs_devices->num_devices--; 2076 2077 btrfs_assign_next_active_device(fs_info, tgtdev, NULL); 2078 2079 list_del_rcu(&tgtdev->dev_list); 2080 2081 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2082 mutex_unlock(&uuid_mutex); 2083 2084 /* 2085 * The update_dev_time() with in btrfs_scratch_superblocks() 2086 * may lead to a call to btrfs_show_devname() which will try 2087 * to hold device_list_mutex. And here this device 2088 * is already out of device list, so we don't have to hold 2089 * the device_list_mutex lock. 2090 */ 2091 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); 2092 2093 btrfs_close_bdev(tgtdev); 2094 call_rcu(&tgtdev->rcu, free_device); 2095 } 2096 2097 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 2098 struct btrfs_device **device) 2099 { 2100 int ret = 0; 2101 struct btrfs_super_block *disk_super; 2102 u64 devid; 2103 u8 *dev_uuid; 2104 struct block_device *bdev; 2105 struct buffer_head *bh; 2106 2107 *device = NULL; 2108 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2109 root->fs_info->bdev_holder, 0, &bdev, &bh); 2110 if (ret) 2111 return ret; 2112 disk_super = (struct btrfs_super_block *)bh->b_data; 2113 devid = btrfs_stack_device_id(&disk_super->dev_item); 2114 dev_uuid = disk_super->dev_item.uuid; 2115 *device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2116 disk_super->fsid); 2117 brelse(bh); 2118 if (!*device) 2119 ret = -ENOENT; 2120 blkdev_put(bdev, FMODE_READ); 2121 return ret; 2122 } 2123 2124 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, 2125 char *device_path, 2126 struct btrfs_device **device) 2127 { 2128 *device = NULL; 2129 if (strcmp(device_path, "missing") == 0) { 2130 struct list_head *devices; 2131 struct btrfs_device *tmp; 2132 2133 devices = &root->fs_info->fs_devices->devices; 2134 /* 2135 * It is safe to read the devices since the volume_mutex 2136 * is held by the caller. 2137 */ 2138 list_for_each_entry(tmp, devices, dev_list) { 2139 if (tmp->in_fs_metadata && !tmp->bdev) { 2140 *device = tmp; 2141 break; 2142 } 2143 } 2144 2145 if (!*device) 2146 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2147 2148 return 0; 2149 } else { 2150 return btrfs_find_device_by_path(root, device_path, device); 2151 } 2152 } 2153 2154 /* 2155 * Lookup a device given by device id, or the path if the id is 0. 2156 */ 2157 int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid, 2158 char *devpath, 2159 struct btrfs_device **device) 2160 { 2161 int ret; 2162 2163 if (devid) { 2164 ret = 0; 2165 *device = btrfs_find_device(root->fs_info, devid, NULL, 2166 NULL); 2167 if (!*device) 2168 ret = -ENOENT; 2169 } else { 2170 if (!devpath || !devpath[0]) 2171 return -EINVAL; 2172 2173 ret = btrfs_find_device_missing_or_by_path(root, devpath, 2174 device); 2175 } 2176 return ret; 2177 } 2178 2179 /* 2180 * does all the dirty work required for changing file system's UUID. 2181 */ 2182 static int btrfs_prepare_sprout(struct btrfs_root *root) 2183 { 2184 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 2185 struct btrfs_fs_devices *old_devices; 2186 struct btrfs_fs_devices *seed_devices; 2187 struct btrfs_super_block *disk_super = root->fs_info->super_copy; 2188 struct btrfs_device *device; 2189 u64 super_flags; 2190 2191 BUG_ON(!mutex_is_locked(&uuid_mutex)); 2192 if (!fs_devices->seeding) 2193 return -EINVAL; 2194 2195 seed_devices = __alloc_fs_devices(); 2196 if (IS_ERR(seed_devices)) 2197 return PTR_ERR(seed_devices); 2198 2199 old_devices = clone_fs_devices(fs_devices); 2200 if (IS_ERR(old_devices)) { 2201 kfree(seed_devices); 2202 return PTR_ERR(old_devices); 2203 } 2204 2205 list_add(&old_devices->list, &fs_uuids); 2206 2207 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2208 seed_devices->opened = 1; 2209 INIT_LIST_HEAD(&seed_devices->devices); 2210 INIT_LIST_HEAD(&seed_devices->alloc_list); 2211 mutex_init(&seed_devices->device_list_mutex); 2212 2213 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2214 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2215 synchronize_rcu); 2216 list_for_each_entry(device, &seed_devices->devices, dev_list) 2217 device->fs_devices = seed_devices; 2218 2219 lock_chunks(root); 2220 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2221 unlock_chunks(root); 2222 2223 fs_devices->seeding = 0; 2224 fs_devices->num_devices = 0; 2225 fs_devices->open_devices = 0; 2226 fs_devices->missing_devices = 0; 2227 fs_devices->rotating = 0; 2228 fs_devices->seed = seed_devices; 2229 2230 generate_random_uuid(fs_devices->fsid); 2231 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2232 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2233 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2234 2235 super_flags = btrfs_super_flags(disk_super) & 2236 ~BTRFS_SUPER_FLAG_SEEDING; 2237 btrfs_set_super_flags(disk_super, super_flags); 2238 2239 return 0; 2240 } 2241 2242 /* 2243 * Store the expected generation for seed devices in device items. 2244 */ 2245 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2246 struct btrfs_root *root) 2247 { 2248 struct btrfs_path *path; 2249 struct extent_buffer *leaf; 2250 struct btrfs_dev_item *dev_item; 2251 struct btrfs_device *device; 2252 struct btrfs_key key; 2253 u8 fs_uuid[BTRFS_UUID_SIZE]; 2254 u8 dev_uuid[BTRFS_UUID_SIZE]; 2255 u64 devid; 2256 int ret; 2257 2258 path = btrfs_alloc_path(); 2259 if (!path) 2260 return -ENOMEM; 2261 2262 root = root->fs_info->chunk_root; 2263 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2264 key.offset = 0; 2265 key.type = BTRFS_DEV_ITEM_KEY; 2266 2267 while (1) { 2268 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2269 if (ret < 0) 2270 goto error; 2271 2272 leaf = path->nodes[0]; 2273 next_slot: 2274 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2275 ret = btrfs_next_leaf(root, path); 2276 if (ret > 0) 2277 break; 2278 if (ret < 0) 2279 goto error; 2280 leaf = path->nodes[0]; 2281 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2282 btrfs_release_path(path); 2283 continue; 2284 } 2285 2286 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2287 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2288 key.type != BTRFS_DEV_ITEM_KEY) 2289 break; 2290 2291 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2292 struct btrfs_dev_item); 2293 devid = btrfs_device_id(leaf, dev_item); 2294 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2295 BTRFS_UUID_SIZE); 2296 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2297 BTRFS_UUID_SIZE); 2298 device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2299 fs_uuid); 2300 BUG_ON(!device); /* Logic error */ 2301 2302 if (device->fs_devices->seeding) { 2303 btrfs_set_device_generation(leaf, dev_item, 2304 device->generation); 2305 btrfs_mark_buffer_dirty(leaf); 2306 } 2307 2308 path->slots[0]++; 2309 goto next_slot; 2310 } 2311 ret = 0; 2312 error: 2313 btrfs_free_path(path); 2314 return ret; 2315 } 2316 2317 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 2318 { 2319 struct request_queue *q; 2320 struct btrfs_trans_handle *trans; 2321 struct btrfs_device *device; 2322 struct block_device *bdev; 2323 struct list_head *devices; 2324 struct super_block *sb = root->fs_info->sb; 2325 struct rcu_string *name; 2326 u64 tmp; 2327 int seeding_dev = 0; 2328 int ret = 0; 2329 2330 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 2331 return -EROFS; 2332 2333 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2334 root->fs_info->bdev_holder); 2335 if (IS_ERR(bdev)) 2336 return PTR_ERR(bdev); 2337 2338 if (root->fs_info->fs_devices->seeding) { 2339 seeding_dev = 1; 2340 down_write(&sb->s_umount); 2341 mutex_lock(&uuid_mutex); 2342 } 2343 2344 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2345 2346 devices = &root->fs_info->fs_devices->devices; 2347 2348 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2349 list_for_each_entry(device, devices, dev_list) { 2350 if (device->bdev == bdev) { 2351 ret = -EEXIST; 2352 mutex_unlock( 2353 &root->fs_info->fs_devices->device_list_mutex); 2354 goto error; 2355 } 2356 } 2357 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2358 2359 device = btrfs_alloc_device(root->fs_info, NULL, NULL); 2360 if (IS_ERR(device)) { 2361 /* we can safely leave the fs_devices entry around */ 2362 ret = PTR_ERR(device); 2363 goto error; 2364 } 2365 2366 name = rcu_string_strdup(device_path, GFP_KERNEL); 2367 if (!name) { 2368 kfree(device); 2369 ret = -ENOMEM; 2370 goto error; 2371 } 2372 rcu_assign_pointer(device->name, name); 2373 2374 trans = btrfs_start_transaction(root, 0); 2375 if (IS_ERR(trans)) { 2376 rcu_string_free(device->name); 2377 kfree(device); 2378 ret = PTR_ERR(trans); 2379 goto error; 2380 } 2381 2382 q = bdev_get_queue(bdev); 2383 if (blk_queue_discard(q)) 2384 device->can_discard = 1; 2385 device->writeable = 1; 2386 device->generation = trans->transid; 2387 device->io_width = root->sectorsize; 2388 device->io_align = root->sectorsize; 2389 device->sector_size = root->sectorsize; 2390 device->total_bytes = i_size_read(bdev->bd_inode); 2391 device->disk_total_bytes = device->total_bytes; 2392 device->commit_total_bytes = device->total_bytes; 2393 device->dev_root = root->fs_info->dev_root; 2394 device->bdev = bdev; 2395 device->in_fs_metadata = 1; 2396 device->is_tgtdev_for_dev_replace = 0; 2397 device->mode = FMODE_EXCL; 2398 device->dev_stats_valid = 1; 2399 set_blocksize(device->bdev, 4096); 2400 2401 if (seeding_dev) { 2402 sb->s_flags &= ~MS_RDONLY; 2403 ret = btrfs_prepare_sprout(root); 2404 BUG_ON(ret); /* -ENOMEM */ 2405 } 2406 2407 device->fs_devices = root->fs_info->fs_devices; 2408 2409 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2410 lock_chunks(root); 2411 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); 2412 list_add(&device->dev_alloc_list, 2413 &root->fs_info->fs_devices->alloc_list); 2414 root->fs_info->fs_devices->num_devices++; 2415 root->fs_info->fs_devices->open_devices++; 2416 root->fs_info->fs_devices->rw_devices++; 2417 root->fs_info->fs_devices->total_devices++; 2418 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 2419 2420 spin_lock(&root->fs_info->free_chunk_lock); 2421 root->fs_info->free_chunk_space += device->total_bytes; 2422 spin_unlock(&root->fs_info->free_chunk_lock); 2423 2424 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2425 root->fs_info->fs_devices->rotating = 1; 2426 2427 tmp = btrfs_super_total_bytes(root->fs_info->super_copy); 2428 btrfs_set_super_total_bytes(root->fs_info->super_copy, 2429 tmp + device->total_bytes); 2430 2431 tmp = btrfs_super_num_devices(root->fs_info->super_copy); 2432 btrfs_set_super_num_devices(root->fs_info->super_copy, 2433 tmp + 1); 2434 2435 /* add sysfs device entry */ 2436 btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device); 2437 2438 /* 2439 * we've got more storage, clear any full flags on the space 2440 * infos 2441 */ 2442 btrfs_clear_space_info_full(root->fs_info); 2443 2444 unlock_chunks(root); 2445 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2446 2447 if (seeding_dev) { 2448 lock_chunks(root); 2449 ret = init_first_rw_device(trans, root, device); 2450 unlock_chunks(root); 2451 if (ret) { 2452 btrfs_abort_transaction(trans, ret); 2453 goto error_trans; 2454 } 2455 } 2456 2457 ret = btrfs_add_device(trans, root, device); 2458 if (ret) { 2459 btrfs_abort_transaction(trans, ret); 2460 goto error_trans; 2461 } 2462 2463 if (seeding_dev) { 2464 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; 2465 2466 ret = btrfs_finish_sprout(trans, root); 2467 if (ret) { 2468 btrfs_abort_transaction(trans, ret); 2469 goto error_trans; 2470 } 2471 2472 /* Sprouting would change fsid of the mounted root, 2473 * so rename the fsid on the sysfs 2474 */ 2475 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", 2476 root->fs_info->fsid); 2477 if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj, 2478 fsid_buf)) 2479 btrfs_warn(root->fs_info, 2480 "sysfs: failed to create fsid for sprout"); 2481 } 2482 2483 root->fs_info->num_tolerated_disk_barrier_failures = 2484 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 2485 ret = btrfs_commit_transaction(trans, root); 2486 2487 if (seeding_dev) { 2488 mutex_unlock(&uuid_mutex); 2489 up_write(&sb->s_umount); 2490 2491 if (ret) /* transaction commit */ 2492 return ret; 2493 2494 ret = btrfs_relocate_sys_chunks(root); 2495 if (ret < 0) 2496 btrfs_handle_fs_error(root->fs_info, ret, 2497 "Failed to relocate sys chunks after " 2498 "device initialization. This can be fixed " 2499 "using the \"btrfs balance\" command."); 2500 trans = btrfs_attach_transaction(root); 2501 if (IS_ERR(trans)) { 2502 if (PTR_ERR(trans) == -ENOENT) 2503 return 0; 2504 return PTR_ERR(trans); 2505 } 2506 ret = btrfs_commit_transaction(trans, root); 2507 } 2508 2509 /* Update ctime/mtime for libblkid */ 2510 update_dev_time(device_path); 2511 return ret; 2512 2513 error_trans: 2514 btrfs_end_transaction(trans, root); 2515 rcu_string_free(device->name); 2516 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device); 2517 kfree(device); 2518 error: 2519 blkdev_put(bdev, FMODE_EXCL); 2520 if (seeding_dev) { 2521 mutex_unlock(&uuid_mutex); 2522 up_write(&sb->s_umount); 2523 } 2524 return ret; 2525 } 2526 2527 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path, 2528 struct btrfs_device *srcdev, 2529 struct btrfs_device **device_out) 2530 { 2531 struct request_queue *q; 2532 struct btrfs_device *device; 2533 struct block_device *bdev; 2534 struct btrfs_fs_info *fs_info = root->fs_info; 2535 struct list_head *devices; 2536 struct rcu_string *name; 2537 u64 devid = BTRFS_DEV_REPLACE_DEVID; 2538 int ret = 0; 2539 2540 *device_out = NULL; 2541 if (fs_info->fs_devices->seeding) { 2542 btrfs_err(fs_info, "the filesystem is a seed filesystem!"); 2543 return -EINVAL; 2544 } 2545 2546 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2547 fs_info->bdev_holder); 2548 if (IS_ERR(bdev)) { 2549 btrfs_err(fs_info, "target device %s is invalid!", device_path); 2550 return PTR_ERR(bdev); 2551 } 2552 2553 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2554 2555 devices = &fs_info->fs_devices->devices; 2556 list_for_each_entry(device, devices, dev_list) { 2557 if (device->bdev == bdev) { 2558 btrfs_err(fs_info, "target device is in the filesystem!"); 2559 ret = -EEXIST; 2560 goto error; 2561 } 2562 } 2563 2564 2565 if (i_size_read(bdev->bd_inode) < 2566 btrfs_device_get_total_bytes(srcdev)) { 2567 btrfs_err(fs_info, "target device is smaller than source device!"); 2568 ret = -EINVAL; 2569 goto error; 2570 } 2571 2572 2573 device = btrfs_alloc_device(NULL, &devid, NULL); 2574 if (IS_ERR(device)) { 2575 ret = PTR_ERR(device); 2576 goto error; 2577 } 2578 2579 name = rcu_string_strdup(device_path, GFP_NOFS); 2580 if (!name) { 2581 kfree(device); 2582 ret = -ENOMEM; 2583 goto error; 2584 } 2585 rcu_assign_pointer(device->name, name); 2586 2587 q = bdev_get_queue(bdev); 2588 if (blk_queue_discard(q)) 2589 device->can_discard = 1; 2590 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2591 device->writeable = 1; 2592 device->generation = 0; 2593 device->io_width = root->sectorsize; 2594 device->io_align = root->sectorsize; 2595 device->sector_size = root->sectorsize; 2596 device->total_bytes = btrfs_device_get_total_bytes(srcdev); 2597 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); 2598 device->bytes_used = btrfs_device_get_bytes_used(srcdev); 2599 ASSERT(list_empty(&srcdev->resized_list)); 2600 device->commit_total_bytes = srcdev->commit_total_bytes; 2601 device->commit_bytes_used = device->bytes_used; 2602 device->dev_root = fs_info->dev_root; 2603 device->bdev = bdev; 2604 device->in_fs_metadata = 1; 2605 device->is_tgtdev_for_dev_replace = 1; 2606 device->mode = FMODE_EXCL; 2607 device->dev_stats_valid = 1; 2608 set_blocksize(device->bdev, 4096); 2609 device->fs_devices = fs_info->fs_devices; 2610 list_add(&device->dev_list, &fs_info->fs_devices->devices); 2611 fs_info->fs_devices->num_devices++; 2612 fs_info->fs_devices->open_devices++; 2613 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2614 2615 *device_out = device; 2616 return ret; 2617 2618 error: 2619 blkdev_put(bdev, FMODE_EXCL); 2620 return ret; 2621 } 2622 2623 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, 2624 struct btrfs_device *tgtdev) 2625 { 2626 WARN_ON(fs_info->fs_devices->rw_devices == 0); 2627 tgtdev->io_width = fs_info->dev_root->sectorsize; 2628 tgtdev->io_align = fs_info->dev_root->sectorsize; 2629 tgtdev->sector_size = fs_info->dev_root->sectorsize; 2630 tgtdev->dev_root = fs_info->dev_root; 2631 tgtdev->in_fs_metadata = 1; 2632 } 2633 2634 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2635 struct btrfs_device *device) 2636 { 2637 int ret; 2638 struct btrfs_path *path; 2639 struct btrfs_root *root; 2640 struct btrfs_dev_item *dev_item; 2641 struct extent_buffer *leaf; 2642 struct btrfs_key key; 2643 2644 root = device->dev_root->fs_info->chunk_root; 2645 2646 path = btrfs_alloc_path(); 2647 if (!path) 2648 return -ENOMEM; 2649 2650 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2651 key.type = BTRFS_DEV_ITEM_KEY; 2652 key.offset = device->devid; 2653 2654 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2655 if (ret < 0) 2656 goto out; 2657 2658 if (ret > 0) { 2659 ret = -ENOENT; 2660 goto out; 2661 } 2662 2663 leaf = path->nodes[0]; 2664 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2665 2666 btrfs_set_device_id(leaf, dev_item, device->devid); 2667 btrfs_set_device_type(leaf, dev_item, device->type); 2668 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2669 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2670 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2671 btrfs_set_device_total_bytes(leaf, dev_item, 2672 btrfs_device_get_disk_total_bytes(device)); 2673 btrfs_set_device_bytes_used(leaf, dev_item, 2674 btrfs_device_get_bytes_used(device)); 2675 btrfs_mark_buffer_dirty(leaf); 2676 2677 out: 2678 btrfs_free_path(path); 2679 return ret; 2680 } 2681 2682 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2683 struct btrfs_device *device, u64 new_size) 2684 { 2685 struct btrfs_super_block *super_copy = 2686 device->dev_root->fs_info->super_copy; 2687 struct btrfs_fs_devices *fs_devices; 2688 u64 old_total; 2689 u64 diff; 2690 2691 if (!device->writeable) 2692 return -EACCES; 2693 2694 lock_chunks(device->dev_root); 2695 old_total = btrfs_super_total_bytes(super_copy); 2696 diff = new_size - device->total_bytes; 2697 2698 if (new_size <= device->total_bytes || 2699 device->is_tgtdev_for_dev_replace) { 2700 unlock_chunks(device->dev_root); 2701 return -EINVAL; 2702 } 2703 2704 fs_devices = device->dev_root->fs_info->fs_devices; 2705 2706 btrfs_set_super_total_bytes(super_copy, old_total + diff); 2707 device->fs_devices->total_rw_bytes += diff; 2708 2709 btrfs_device_set_total_bytes(device, new_size); 2710 btrfs_device_set_disk_total_bytes(device, new_size); 2711 btrfs_clear_space_info_full(device->dev_root->fs_info); 2712 if (list_empty(&device->resized_list)) 2713 list_add_tail(&device->resized_list, 2714 &fs_devices->resized_devices); 2715 unlock_chunks(device->dev_root); 2716 2717 return btrfs_update_device(trans, device); 2718 } 2719 2720 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 2721 struct btrfs_root *root, u64 chunk_objectid, 2722 u64 chunk_offset) 2723 { 2724 int ret; 2725 struct btrfs_path *path; 2726 struct btrfs_key key; 2727 2728 root = root->fs_info->chunk_root; 2729 path = btrfs_alloc_path(); 2730 if (!path) 2731 return -ENOMEM; 2732 2733 key.objectid = chunk_objectid; 2734 key.offset = chunk_offset; 2735 key.type = BTRFS_CHUNK_ITEM_KEY; 2736 2737 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2738 if (ret < 0) 2739 goto out; 2740 else if (ret > 0) { /* Logic error or corruption */ 2741 btrfs_handle_fs_error(root->fs_info, -ENOENT, 2742 "Failed lookup while freeing chunk."); 2743 ret = -ENOENT; 2744 goto out; 2745 } 2746 2747 ret = btrfs_del_item(trans, root, path); 2748 if (ret < 0) 2749 btrfs_handle_fs_error(root->fs_info, ret, 2750 "Failed to delete chunk item."); 2751 out: 2752 btrfs_free_path(path); 2753 return ret; 2754 } 2755 2756 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 2757 chunk_offset) 2758 { 2759 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 2760 struct btrfs_disk_key *disk_key; 2761 struct btrfs_chunk *chunk; 2762 u8 *ptr; 2763 int ret = 0; 2764 u32 num_stripes; 2765 u32 array_size; 2766 u32 len = 0; 2767 u32 cur; 2768 struct btrfs_key key; 2769 2770 lock_chunks(root); 2771 array_size = btrfs_super_sys_array_size(super_copy); 2772 2773 ptr = super_copy->sys_chunk_array; 2774 cur = 0; 2775 2776 while (cur < array_size) { 2777 disk_key = (struct btrfs_disk_key *)ptr; 2778 btrfs_disk_key_to_cpu(&key, disk_key); 2779 2780 len = sizeof(*disk_key); 2781 2782 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2783 chunk = (struct btrfs_chunk *)(ptr + len); 2784 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2785 len += btrfs_chunk_item_size(num_stripes); 2786 } else { 2787 ret = -EIO; 2788 break; 2789 } 2790 if (key.objectid == chunk_objectid && 2791 key.offset == chunk_offset) { 2792 memmove(ptr, ptr + len, array_size - (cur + len)); 2793 array_size -= len; 2794 btrfs_set_super_sys_array_size(super_copy, array_size); 2795 } else { 2796 ptr += len; 2797 cur += len; 2798 } 2799 } 2800 unlock_chunks(root); 2801 return ret; 2802 } 2803 2804 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, 2805 struct btrfs_root *root, u64 chunk_offset) 2806 { 2807 struct extent_map_tree *em_tree; 2808 struct extent_map *em; 2809 struct btrfs_root *extent_root = root->fs_info->extent_root; 2810 struct map_lookup *map; 2811 u64 dev_extent_len = 0; 2812 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2813 int i, ret = 0; 2814 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 2815 2816 /* Just in case */ 2817 root = root->fs_info->chunk_root; 2818 em_tree = &root->fs_info->mapping_tree.map_tree; 2819 2820 read_lock(&em_tree->lock); 2821 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 2822 read_unlock(&em_tree->lock); 2823 2824 if (!em || em->start > chunk_offset || 2825 em->start + em->len < chunk_offset) { 2826 /* 2827 * This is a logic error, but we don't want to just rely on the 2828 * user having built with ASSERT enabled, so if ASSERT doesn't 2829 * do anything we still error out. 2830 */ 2831 ASSERT(0); 2832 if (em) 2833 free_extent_map(em); 2834 return -EINVAL; 2835 } 2836 map = em->map_lookup; 2837 lock_chunks(root->fs_info->chunk_root); 2838 check_system_chunk(trans, extent_root, map->type); 2839 unlock_chunks(root->fs_info->chunk_root); 2840 2841 /* 2842 * Take the device list mutex to prevent races with the final phase of 2843 * a device replace operation that replaces the device object associated 2844 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). 2845 */ 2846 mutex_lock(&fs_devices->device_list_mutex); 2847 for (i = 0; i < map->num_stripes; i++) { 2848 struct btrfs_device *device = map->stripes[i].dev; 2849 ret = btrfs_free_dev_extent(trans, device, 2850 map->stripes[i].physical, 2851 &dev_extent_len); 2852 if (ret) { 2853 mutex_unlock(&fs_devices->device_list_mutex); 2854 btrfs_abort_transaction(trans, ret); 2855 goto out; 2856 } 2857 2858 if (device->bytes_used > 0) { 2859 lock_chunks(root); 2860 btrfs_device_set_bytes_used(device, 2861 device->bytes_used - dev_extent_len); 2862 spin_lock(&root->fs_info->free_chunk_lock); 2863 root->fs_info->free_chunk_space += dev_extent_len; 2864 spin_unlock(&root->fs_info->free_chunk_lock); 2865 btrfs_clear_space_info_full(root->fs_info); 2866 unlock_chunks(root); 2867 } 2868 2869 if (map->stripes[i].dev) { 2870 ret = btrfs_update_device(trans, map->stripes[i].dev); 2871 if (ret) { 2872 mutex_unlock(&fs_devices->device_list_mutex); 2873 btrfs_abort_transaction(trans, ret); 2874 goto out; 2875 } 2876 } 2877 } 2878 mutex_unlock(&fs_devices->device_list_mutex); 2879 2880 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); 2881 if (ret) { 2882 btrfs_abort_transaction(trans, ret); 2883 goto out; 2884 } 2885 2886 trace_btrfs_chunk_free(root, map, chunk_offset, em->len); 2887 2888 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2889 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 2890 if (ret) { 2891 btrfs_abort_transaction(trans, ret); 2892 goto out; 2893 } 2894 } 2895 2896 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em); 2897 if (ret) { 2898 btrfs_abort_transaction(trans, ret); 2899 goto out; 2900 } 2901 2902 out: 2903 /* once for us */ 2904 free_extent_map(em); 2905 return ret; 2906 } 2907 2908 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset) 2909 { 2910 struct btrfs_root *extent_root; 2911 struct btrfs_trans_handle *trans; 2912 int ret; 2913 2914 root = root->fs_info->chunk_root; 2915 extent_root = root->fs_info->extent_root; 2916 2917 /* 2918 * Prevent races with automatic removal of unused block groups. 2919 * After we relocate and before we remove the chunk with offset 2920 * chunk_offset, automatic removal of the block group can kick in, 2921 * resulting in a failure when calling btrfs_remove_chunk() below. 2922 * 2923 * Make sure to acquire this mutex before doing a tree search (dev 2924 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 2925 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 2926 * we release the path used to search the chunk/dev tree and before 2927 * the current task acquires this mutex and calls us. 2928 */ 2929 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex)); 2930 2931 ret = btrfs_can_relocate(extent_root, chunk_offset); 2932 if (ret) 2933 return -ENOSPC; 2934 2935 /* step one, relocate all the extents inside this chunk */ 2936 btrfs_scrub_pause(root); 2937 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 2938 btrfs_scrub_continue(root); 2939 if (ret) 2940 return ret; 2941 2942 trans = btrfs_start_trans_remove_block_group(root->fs_info, 2943 chunk_offset); 2944 if (IS_ERR(trans)) { 2945 ret = PTR_ERR(trans); 2946 btrfs_handle_fs_error(root->fs_info, ret, NULL); 2947 return ret; 2948 } 2949 2950 /* 2951 * step two, delete the device extents and the 2952 * chunk tree entries 2953 */ 2954 ret = btrfs_remove_chunk(trans, root, chunk_offset); 2955 btrfs_end_transaction(trans, extent_root); 2956 return ret; 2957 } 2958 2959 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 2960 { 2961 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 2962 struct btrfs_path *path; 2963 struct extent_buffer *leaf; 2964 struct btrfs_chunk *chunk; 2965 struct btrfs_key key; 2966 struct btrfs_key found_key; 2967 u64 chunk_type; 2968 bool retried = false; 2969 int failed = 0; 2970 int ret; 2971 2972 path = btrfs_alloc_path(); 2973 if (!path) 2974 return -ENOMEM; 2975 2976 again: 2977 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2978 key.offset = (u64)-1; 2979 key.type = BTRFS_CHUNK_ITEM_KEY; 2980 2981 while (1) { 2982 mutex_lock(&root->fs_info->delete_unused_bgs_mutex); 2983 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2984 if (ret < 0) { 2985 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2986 goto error; 2987 } 2988 BUG_ON(ret == 0); /* Corruption */ 2989 2990 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2991 key.type); 2992 if (ret) 2993 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2994 if (ret < 0) 2995 goto error; 2996 if (ret > 0) 2997 break; 2998 2999 leaf = path->nodes[0]; 3000 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3001 3002 chunk = btrfs_item_ptr(leaf, path->slots[0], 3003 struct btrfs_chunk); 3004 chunk_type = btrfs_chunk_type(leaf, chunk); 3005 btrfs_release_path(path); 3006 3007 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3008 ret = btrfs_relocate_chunk(chunk_root, 3009 found_key.offset); 3010 if (ret == -ENOSPC) 3011 failed++; 3012 else 3013 BUG_ON(ret); 3014 } 3015 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 3016 3017 if (found_key.offset == 0) 3018 break; 3019 key.offset = found_key.offset - 1; 3020 } 3021 ret = 0; 3022 if (failed && !retried) { 3023 failed = 0; 3024 retried = true; 3025 goto again; 3026 } else if (WARN_ON(failed && retried)) { 3027 ret = -ENOSPC; 3028 } 3029 error: 3030 btrfs_free_path(path); 3031 return ret; 3032 } 3033 3034 static int insert_balance_item(struct btrfs_root *root, 3035 struct btrfs_balance_control *bctl) 3036 { 3037 struct btrfs_trans_handle *trans; 3038 struct btrfs_balance_item *item; 3039 struct btrfs_disk_balance_args disk_bargs; 3040 struct btrfs_path *path; 3041 struct extent_buffer *leaf; 3042 struct btrfs_key key; 3043 int ret, err; 3044 3045 path = btrfs_alloc_path(); 3046 if (!path) 3047 return -ENOMEM; 3048 3049 trans = btrfs_start_transaction(root, 0); 3050 if (IS_ERR(trans)) { 3051 btrfs_free_path(path); 3052 return PTR_ERR(trans); 3053 } 3054 3055 key.objectid = BTRFS_BALANCE_OBJECTID; 3056 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3057 key.offset = 0; 3058 3059 ret = btrfs_insert_empty_item(trans, root, path, &key, 3060 sizeof(*item)); 3061 if (ret) 3062 goto out; 3063 3064 leaf = path->nodes[0]; 3065 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3066 3067 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 3068 3069 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3070 btrfs_set_balance_data(leaf, item, &disk_bargs); 3071 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3072 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3073 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3074 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3075 3076 btrfs_set_balance_flags(leaf, item, bctl->flags); 3077 3078 btrfs_mark_buffer_dirty(leaf); 3079 out: 3080 btrfs_free_path(path); 3081 err = btrfs_commit_transaction(trans, root); 3082 if (err && !ret) 3083 ret = err; 3084 return ret; 3085 } 3086 3087 static int del_balance_item(struct btrfs_root *root) 3088 { 3089 struct btrfs_trans_handle *trans; 3090 struct btrfs_path *path; 3091 struct btrfs_key key; 3092 int ret, err; 3093 3094 path = btrfs_alloc_path(); 3095 if (!path) 3096 return -ENOMEM; 3097 3098 trans = btrfs_start_transaction(root, 0); 3099 if (IS_ERR(trans)) { 3100 btrfs_free_path(path); 3101 return PTR_ERR(trans); 3102 } 3103 3104 key.objectid = BTRFS_BALANCE_OBJECTID; 3105 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3106 key.offset = 0; 3107 3108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3109 if (ret < 0) 3110 goto out; 3111 if (ret > 0) { 3112 ret = -ENOENT; 3113 goto out; 3114 } 3115 3116 ret = btrfs_del_item(trans, root, path); 3117 out: 3118 btrfs_free_path(path); 3119 err = btrfs_commit_transaction(trans, root); 3120 if (err && !ret) 3121 ret = err; 3122 return ret; 3123 } 3124 3125 /* 3126 * This is a heuristic used to reduce the number of chunks balanced on 3127 * resume after balance was interrupted. 3128 */ 3129 static void update_balance_args(struct btrfs_balance_control *bctl) 3130 { 3131 /* 3132 * Turn on soft mode for chunk types that were being converted. 3133 */ 3134 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3135 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3136 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3137 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3138 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3139 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3140 3141 /* 3142 * Turn on usage filter if is not already used. The idea is 3143 * that chunks that we have already balanced should be 3144 * reasonably full. Don't do it for chunks that are being 3145 * converted - that will keep us from relocating unconverted 3146 * (albeit full) chunks. 3147 */ 3148 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3149 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3150 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3151 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3152 bctl->data.usage = 90; 3153 } 3154 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3155 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3156 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3157 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3158 bctl->sys.usage = 90; 3159 } 3160 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3161 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3162 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3163 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3164 bctl->meta.usage = 90; 3165 } 3166 } 3167 3168 /* 3169 * Should be called with both balance and volume mutexes held to 3170 * serialize other volume operations (add_dev/rm_dev/resize) with 3171 * restriper. Same goes for unset_balance_control. 3172 */ 3173 static void set_balance_control(struct btrfs_balance_control *bctl) 3174 { 3175 struct btrfs_fs_info *fs_info = bctl->fs_info; 3176 3177 BUG_ON(fs_info->balance_ctl); 3178 3179 spin_lock(&fs_info->balance_lock); 3180 fs_info->balance_ctl = bctl; 3181 spin_unlock(&fs_info->balance_lock); 3182 } 3183 3184 static void unset_balance_control(struct btrfs_fs_info *fs_info) 3185 { 3186 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3187 3188 BUG_ON(!fs_info->balance_ctl); 3189 3190 spin_lock(&fs_info->balance_lock); 3191 fs_info->balance_ctl = NULL; 3192 spin_unlock(&fs_info->balance_lock); 3193 3194 kfree(bctl); 3195 } 3196 3197 /* 3198 * Balance filters. Return 1 if chunk should be filtered out 3199 * (should not be balanced). 3200 */ 3201 static int chunk_profiles_filter(u64 chunk_type, 3202 struct btrfs_balance_args *bargs) 3203 { 3204 chunk_type = chunk_to_extended(chunk_type) & 3205 BTRFS_EXTENDED_PROFILE_MASK; 3206 3207 if (bargs->profiles & chunk_type) 3208 return 0; 3209 3210 return 1; 3211 } 3212 3213 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3214 struct btrfs_balance_args *bargs) 3215 { 3216 struct btrfs_block_group_cache *cache; 3217 u64 chunk_used; 3218 u64 user_thresh_min; 3219 u64 user_thresh_max; 3220 int ret = 1; 3221 3222 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3223 chunk_used = btrfs_block_group_used(&cache->item); 3224 3225 if (bargs->usage_min == 0) 3226 user_thresh_min = 0; 3227 else 3228 user_thresh_min = div_factor_fine(cache->key.offset, 3229 bargs->usage_min); 3230 3231 if (bargs->usage_max == 0) 3232 user_thresh_max = 1; 3233 else if (bargs->usage_max > 100) 3234 user_thresh_max = cache->key.offset; 3235 else 3236 user_thresh_max = div_factor_fine(cache->key.offset, 3237 bargs->usage_max); 3238 3239 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3240 ret = 0; 3241 3242 btrfs_put_block_group(cache); 3243 return ret; 3244 } 3245 3246 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3247 u64 chunk_offset, struct btrfs_balance_args *bargs) 3248 { 3249 struct btrfs_block_group_cache *cache; 3250 u64 chunk_used, user_thresh; 3251 int ret = 1; 3252 3253 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3254 chunk_used = btrfs_block_group_used(&cache->item); 3255 3256 if (bargs->usage_min == 0) 3257 user_thresh = 1; 3258 else if (bargs->usage > 100) 3259 user_thresh = cache->key.offset; 3260 else 3261 user_thresh = div_factor_fine(cache->key.offset, 3262 bargs->usage); 3263 3264 if (chunk_used < user_thresh) 3265 ret = 0; 3266 3267 btrfs_put_block_group(cache); 3268 return ret; 3269 } 3270 3271 static int chunk_devid_filter(struct extent_buffer *leaf, 3272 struct btrfs_chunk *chunk, 3273 struct btrfs_balance_args *bargs) 3274 { 3275 struct btrfs_stripe *stripe; 3276 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3277 int i; 3278 3279 for (i = 0; i < num_stripes; i++) { 3280 stripe = btrfs_stripe_nr(chunk, i); 3281 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3282 return 0; 3283 } 3284 3285 return 1; 3286 } 3287 3288 /* [pstart, pend) */ 3289 static int chunk_drange_filter(struct extent_buffer *leaf, 3290 struct btrfs_chunk *chunk, 3291 u64 chunk_offset, 3292 struct btrfs_balance_args *bargs) 3293 { 3294 struct btrfs_stripe *stripe; 3295 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3296 u64 stripe_offset; 3297 u64 stripe_length; 3298 int factor; 3299 int i; 3300 3301 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3302 return 0; 3303 3304 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | 3305 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { 3306 factor = num_stripes / 2; 3307 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { 3308 factor = num_stripes - 1; 3309 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { 3310 factor = num_stripes - 2; 3311 } else { 3312 factor = num_stripes; 3313 } 3314 3315 for (i = 0; i < num_stripes; i++) { 3316 stripe = btrfs_stripe_nr(chunk, i); 3317 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3318 continue; 3319 3320 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3321 stripe_length = btrfs_chunk_length(leaf, chunk); 3322 stripe_length = div_u64(stripe_length, factor); 3323 3324 if (stripe_offset < bargs->pend && 3325 stripe_offset + stripe_length > bargs->pstart) 3326 return 0; 3327 } 3328 3329 return 1; 3330 } 3331 3332 /* [vstart, vend) */ 3333 static int chunk_vrange_filter(struct extent_buffer *leaf, 3334 struct btrfs_chunk *chunk, 3335 u64 chunk_offset, 3336 struct btrfs_balance_args *bargs) 3337 { 3338 if (chunk_offset < bargs->vend && 3339 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3340 /* at least part of the chunk is inside this vrange */ 3341 return 0; 3342 3343 return 1; 3344 } 3345 3346 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3347 struct btrfs_chunk *chunk, 3348 struct btrfs_balance_args *bargs) 3349 { 3350 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3351 3352 if (bargs->stripes_min <= num_stripes 3353 && num_stripes <= bargs->stripes_max) 3354 return 0; 3355 3356 return 1; 3357 } 3358 3359 static int chunk_soft_convert_filter(u64 chunk_type, 3360 struct btrfs_balance_args *bargs) 3361 { 3362 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3363 return 0; 3364 3365 chunk_type = chunk_to_extended(chunk_type) & 3366 BTRFS_EXTENDED_PROFILE_MASK; 3367 3368 if (bargs->target == chunk_type) 3369 return 1; 3370 3371 return 0; 3372 } 3373 3374 static int should_balance_chunk(struct btrfs_root *root, 3375 struct extent_buffer *leaf, 3376 struct btrfs_chunk *chunk, u64 chunk_offset) 3377 { 3378 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 3379 struct btrfs_balance_args *bargs = NULL; 3380 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3381 3382 /* type filter */ 3383 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3384 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3385 return 0; 3386 } 3387 3388 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3389 bargs = &bctl->data; 3390 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3391 bargs = &bctl->sys; 3392 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3393 bargs = &bctl->meta; 3394 3395 /* profiles filter */ 3396 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3397 chunk_profiles_filter(chunk_type, bargs)) { 3398 return 0; 3399 } 3400 3401 /* usage filter */ 3402 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3403 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) { 3404 return 0; 3405 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3406 chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) { 3407 return 0; 3408 } 3409 3410 /* devid filter */ 3411 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3412 chunk_devid_filter(leaf, chunk, bargs)) { 3413 return 0; 3414 } 3415 3416 /* drange filter, makes sense only with devid filter */ 3417 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3418 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) { 3419 return 0; 3420 } 3421 3422 /* vrange filter */ 3423 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3424 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3425 return 0; 3426 } 3427 3428 /* stripes filter */ 3429 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3430 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3431 return 0; 3432 } 3433 3434 /* soft profile changing mode */ 3435 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3436 chunk_soft_convert_filter(chunk_type, bargs)) { 3437 return 0; 3438 } 3439 3440 /* 3441 * limited by count, must be the last filter 3442 */ 3443 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3444 if (bargs->limit == 0) 3445 return 0; 3446 else 3447 bargs->limit--; 3448 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3449 /* 3450 * Same logic as the 'limit' filter; the minimum cannot be 3451 * determined here because we do not have the global information 3452 * about the count of all chunks that satisfy the filters. 3453 */ 3454 if (bargs->limit_max == 0) 3455 return 0; 3456 else 3457 bargs->limit_max--; 3458 } 3459 3460 return 1; 3461 } 3462 3463 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3464 { 3465 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3466 struct btrfs_root *chunk_root = fs_info->chunk_root; 3467 struct btrfs_root *dev_root = fs_info->dev_root; 3468 struct list_head *devices; 3469 struct btrfs_device *device; 3470 u64 old_size; 3471 u64 size_to_free; 3472 u64 chunk_type; 3473 struct btrfs_chunk *chunk; 3474 struct btrfs_path *path = NULL; 3475 struct btrfs_key key; 3476 struct btrfs_key found_key; 3477 struct btrfs_trans_handle *trans; 3478 struct extent_buffer *leaf; 3479 int slot; 3480 int ret; 3481 int enospc_errors = 0; 3482 bool counting = true; 3483 /* The single value limit and min/max limits use the same bytes in the */ 3484 u64 limit_data = bctl->data.limit; 3485 u64 limit_meta = bctl->meta.limit; 3486 u64 limit_sys = bctl->sys.limit; 3487 u32 count_data = 0; 3488 u32 count_meta = 0; 3489 u32 count_sys = 0; 3490 int chunk_reserved = 0; 3491 u64 bytes_used = 0; 3492 3493 /* step one make some room on all the devices */ 3494 devices = &fs_info->fs_devices->devices; 3495 list_for_each_entry(device, devices, dev_list) { 3496 old_size = btrfs_device_get_total_bytes(device); 3497 size_to_free = div_factor(old_size, 1); 3498 size_to_free = min_t(u64, size_to_free, SZ_1M); 3499 if (!device->writeable || 3500 btrfs_device_get_total_bytes(device) - 3501 btrfs_device_get_bytes_used(device) > size_to_free || 3502 device->is_tgtdev_for_dev_replace) 3503 continue; 3504 3505 ret = btrfs_shrink_device(device, old_size - size_to_free); 3506 if (ret == -ENOSPC) 3507 break; 3508 if (ret) { 3509 /* btrfs_shrink_device never returns ret > 0 */ 3510 WARN_ON(ret > 0); 3511 goto error; 3512 } 3513 3514 trans = btrfs_start_transaction(dev_root, 0); 3515 if (IS_ERR(trans)) { 3516 ret = PTR_ERR(trans); 3517 btrfs_info_in_rcu(fs_info, 3518 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu", 3519 rcu_str_deref(device->name), ret, 3520 old_size, old_size - size_to_free); 3521 goto error; 3522 } 3523 3524 ret = btrfs_grow_device(trans, device, old_size); 3525 if (ret) { 3526 btrfs_end_transaction(trans, dev_root); 3527 /* btrfs_grow_device never returns ret > 0 */ 3528 WARN_ON(ret > 0); 3529 btrfs_info_in_rcu(fs_info, 3530 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu", 3531 rcu_str_deref(device->name), ret, 3532 old_size, old_size - size_to_free); 3533 goto error; 3534 } 3535 3536 btrfs_end_transaction(trans, dev_root); 3537 } 3538 3539 /* step two, relocate all the chunks */ 3540 path = btrfs_alloc_path(); 3541 if (!path) { 3542 ret = -ENOMEM; 3543 goto error; 3544 } 3545 3546 /* zero out stat counters */ 3547 spin_lock(&fs_info->balance_lock); 3548 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3549 spin_unlock(&fs_info->balance_lock); 3550 again: 3551 if (!counting) { 3552 /* 3553 * The single value limit and min/max limits use the same bytes 3554 * in the 3555 */ 3556 bctl->data.limit = limit_data; 3557 bctl->meta.limit = limit_meta; 3558 bctl->sys.limit = limit_sys; 3559 } 3560 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3561 key.offset = (u64)-1; 3562 key.type = BTRFS_CHUNK_ITEM_KEY; 3563 3564 while (1) { 3565 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3566 atomic_read(&fs_info->balance_cancel_req)) { 3567 ret = -ECANCELED; 3568 goto error; 3569 } 3570 3571 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3572 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3573 if (ret < 0) { 3574 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3575 goto error; 3576 } 3577 3578 /* 3579 * this shouldn't happen, it means the last relocate 3580 * failed 3581 */ 3582 if (ret == 0) 3583 BUG(); /* FIXME break ? */ 3584 3585 ret = btrfs_previous_item(chunk_root, path, 0, 3586 BTRFS_CHUNK_ITEM_KEY); 3587 if (ret) { 3588 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3589 ret = 0; 3590 break; 3591 } 3592 3593 leaf = path->nodes[0]; 3594 slot = path->slots[0]; 3595 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3596 3597 if (found_key.objectid != key.objectid) { 3598 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3599 break; 3600 } 3601 3602 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3603 chunk_type = btrfs_chunk_type(leaf, chunk); 3604 3605 if (!counting) { 3606 spin_lock(&fs_info->balance_lock); 3607 bctl->stat.considered++; 3608 spin_unlock(&fs_info->balance_lock); 3609 } 3610 3611 ret = should_balance_chunk(chunk_root, leaf, chunk, 3612 found_key.offset); 3613 3614 btrfs_release_path(path); 3615 if (!ret) { 3616 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3617 goto loop; 3618 } 3619 3620 if (counting) { 3621 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3622 spin_lock(&fs_info->balance_lock); 3623 bctl->stat.expected++; 3624 spin_unlock(&fs_info->balance_lock); 3625 3626 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3627 count_data++; 3628 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3629 count_sys++; 3630 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3631 count_meta++; 3632 3633 goto loop; 3634 } 3635 3636 /* 3637 * Apply limit_min filter, no need to check if the LIMITS 3638 * filter is used, limit_min is 0 by default 3639 */ 3640 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3641 count_data < bctl->data.limit_min) 3642 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3643 count_meta < bctl->meta.limit_min) 3644 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3645 count_sys < bctl->sys.limit_min)) { 3646 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3647 goto loop; 3648 } 3649 3650 ASSERT(fs_info->data_sinfo); 3651 spin_lock(&fs_info->data_sinfo->lock); 3652 bytes_used = fs_info->data_sinfo->bytes_used; 3653 spin_unlock(&fs_info->data_sinfo->lock); 3654 3655 if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3656 !chunk_reserved && !bytes_used) { 3657 trans = btrfs_start_transaction(chunk_root, 0); 3658 if (IS_ERR(trans)) { 3659 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3660 ret = PTR_ERR(trans); 3661 goto error; 3662 } 3663 3664 ret = btrfs_force_chunk_alloc(trans, chunk_root, 3665 BTRFS_BLOCK_GROUP_DATA); 3666 btrfs_end_transaction(trans, chunk_root); 3667 if (ret < 0) { 3668 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3669 goto error; 3670 } 3671 chunk_reserved = 1; 3672 } 3673 3674 ret = btrfs_relocate_chunk(chunk_root, 3675 found_key.offset); 3676 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3677 if (ret && ret != -ENOSPC) 3678 goto error; 3679 if (ret == -ENOSPC) { 3680 enospc_errors++; 3681 } else { 3682 spin_lock(&fs_info->balance_lock); 3683 bctl->stat.completed++; 3684 spin_unlock(&fs_info->balance_lock); 3685 } 3686 loop: 3687 if (found_key.offset == 0) 3688 break; 3689 key.offset = found_key.offset - 1; 3690 } 3691 3692 if (counting) { 3693 btrfs_release_path(path); 3694 counting = false; 3695 goto again; 3696 } 3697 error: 3698 btrfs_free_path(path); 3699 if (enospc_errors) { 3700 btrfs_info(fs_info, "%d enospc errors during balance", 3701 enospc_errors); 3702 if (!ret) 3703 ret = -ENOSPC; 3704 } 3705 3706 return ret; 3707 } 3708 3709 /** 3710 * alloc_profile_is_valid - see if a given profile is valid and reduced 3711 * @flags: profile to validate 3712 * @extended: if true @flags is treated as an extended profile 3713 */ 3714 static int alloc_profile_is_valid(u64 flags, int extended) 3715 { 3716 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3717 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3718 3719 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3720 3721 /* 1) check that all other bits are zeroed */ 3722 if (flags & ~mask) 3723 return 0; 3724 3725 /* 2) see if profile is reduced */ 3726 if (flags == 0) 3727 return !extended; /* "0" is valid for usual profiles */ 3728 3729 /* true if exactly one bit set */ 3730 return (flags & (flags - 1)) == 0; 3731 } 3732 3733 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3734 { 3735 /* cancel requested || normal exit path */ 3736 return atomic_read(&fs_info->balance_cancel_req) || 3737 (atomic_read(&fs_info->balance_pause_req) == 0 && 3738 atomic_read(&fs_info->balance_cancel_req) == 0); 3739 } 3740 3741 static void __cancel_balance(struct btrfs_fs_info *fs_info) 3742 { 3743 int ret; 3744 3745 unset_balance_control(fs_info); 3746 ret = del_balance_item(fs_info->tree_root); 3747 if (ret) 3748 btrfs_handle_fs_error(fs_info, ret, NULL); 3749 3750 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3751 } 3752 3753 /* Non-zero return value signifies invalidity */ 3754 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, 3755 u64 allowed) 3756 { 3757 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && 3758 (!alloc_profile_is_valid(bctl_arg->target, 1) || 3759 (bctl_arg->target & ~allowed))); 3760 } 3761 3762 /* 3763 * Should be called with both balance and volume mutexes held 3764 */ 3765 int btrfs_balance(struct btrfs_balance_control *bctl, 3766 struct btrfs_ioctl_balance_args *bargs) 3767 { 3768 struct btrfs_fs_info *fs_info = bctl->fs_info; 3769 u64 allowed; 3770 int mixed = 0; 3771 int ret; 3772 u64 num_devices; 3773 unsigned seq; 3774 3775 if (btrfs_fs_closing(fs_info) || 3776 atomic_read(&fs_info->balance_pause_req) || 3777 atomic_read(&fs_info->balance_cancel_req)) { 3778 ret = -EINVAL; 3779 goto out; 3780 } 3781 3782 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 3783 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 3784 mixed = 1; 3785 3786 /* 3787 * In case of mixed groups both data and meta should be picked, 3788 * and identical options should be given for both of them. 3789 */ 3790 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 3791 if (mixed && (bctl->flags & allowed)) { 3792 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3793 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3794 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3795 btrfs_err(fs_info, "with mixed groups data and " 3796 "metadata balance options must be the same"); 3797 ret = -EINVAL; 3798 goto out; 3799 } 3800 } 3801 3802 num_devices = fs_info->fs_devices->num_devices; 3803 btrfs_dev_replace_lock(&fs_info->dev_replace, 0); 3804 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 3805 BUG_ON(num_devices < 1); 3806 num_devices--; 3807 } 3808 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 3809 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; 3810 if (num_devices > 1) 3811 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3812 if (num_devices > 2) 3813 allowed |= BTRFS_BLOCK_GROUP_RAID5; 3814 if (num_devices > 3) 3815 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3816 BTRFS_BLOCK_GROUP_RAID6); 3817 if (validate_convert_profile(&bctl->data, allowed)) { 3818 btrfs_err(fs_info, "unable to start balance with target " 3819 "data profile %llu", 3820 bctl->data.target); 3821 ret = -EINVAL; 3822 goto out; 3823 } 3824 if (validate_convert_profile(&bctl->meta, allowed)) { 3825 btrfs_err(fs_info, 3826 "unable to start balance with target metadata profile %llu", 3827 bctl->meta.target); 3828 ret = -EINVAL; 3829 goto out; 3830 } 3831 if (validate_convert_profile(&bctl->sys, allowed)) { 3832 btrfs_err(fs_info, 3833 "unable to start balance with target system profile %llu", 3834 bctl->sys.target); 3835 ret = -EINVAL; 3836 goto out; 3837 } 3838 3839 /* allow to reduce meta or sys integrity only if force set */ 3840 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3841 BTRFS_BLOCK_GROUP_RAID10 | 3842 BTRFS_BLOCK_GROUP_RAID5 | 3843 BTRFS_BLOCK_GROUP_RAID6; 3844 do { 3845 seq = read_seqbegin(&fs_info->profiles_lock); 3846 3847 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3848 (fs_info->avail_system_alloc_bits & allowed) && 3849 !(bctl->sys.target & allowed)) || 3850 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3851 (fs_info->avail_metadata_alloc_bits & allowed) && 3852 !(bctl->meta.target & allowed))) { 3853 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3854 btrfs_info(fs_info, "force reducing metadata integrity"); 3855 } else { 3856 btrfs_err(fs_info, "balance will reduce metadata " 3857 "integrity, use force if you want this"); 3858 ret = -EINVAL; 3859 goto out; 3860 } 3861 } 3862 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3863 3864 if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) < 3865 btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) { 3866 btrfs_warn(fs_info, 3867 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx", 3868 bctl->meta.target, bctl->data.target); 3869 } 3870 3871 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3872 fs_info->num_tolerated_disk_barrier_failures = min( 3873 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info), 3874 btrfs_get_num_tolerated_disk_barrier_failures( 3875 bctl->sys.target)); 3876 } 3877 3878 ret = insert_balance_item(fs_info->tree_root, bctl); 3879 if (ret && ret != -EEXIST) 3880 goto out; 3881 3882 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 3883 BUG_ON(ret == -EEXIST); 3884 set_balance_control(bctl); 3885 } else { 3886 BUG_ON(ret != -EEXIST); 3887 spin_lock(&fs_info->balance_lock); 3888 update_balance_args(bctl); 3889 spin_unlock(&fs_info->balance_lock); 3890 } 3891 3892 atomic_inc(&fs_info->balance_running); 3893 mutex_unlock(&fs_info->balance_mutex); 3894 3895 ret = __btrfs_balance(fs_info); 3896 3897 mutex_lock(&fs_info->balance_mutex); 3898 atomic_dec(&fs_info->balance_running); 3899 3900 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3901 fs_info->num_tolerated_disk_barrier_failures = 3902 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 3903 } 3904 3905 if (bargs) { 3906 memset(bargs, 0, sizeof(*bargs)); 3907 update_ioctl_balance_args(fs_info, 0, bargs); 3908 } 3909 3910 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3911 balance_need_close(fs_info)) { 3912 __cancel_balance(fs_info); 3913 } 3914 3915 wake_up(&fs_info->balance_wait_q); 3916 3917 return ret; 3918 out: 3919 if (bctl->flags & BTRFS_BALANCE_RESUME) 3920 __cancel_balance(fs_info); 3921 else { 3922 kfree(bctl); 3923 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3924 } 3925 return ret; 3926 } 3927 3928 static int balance_kthread(void *data) 3929 { 3930 struct btrfs_fs_info *fs_info = data; 3931 int ret = 0; 3932 3933 mutex_lock(&fs_info->volume_mutex); 3934 mutex_lock(&fs_info->balance_mutex); 3935 3936 if (fs_info->balance_ctl) { 3937 btrfs_info(fs_info, "continuing balance"); 3938 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3939 } 3940 3941 mutex_unlock(&fs_info->balance_mutex); 3942 mutex_unlock(&fs_info->volume_mutex); 3943 3944 return ret; 3945 } 3946 3947 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 3948 { 3949 struct task_struct *tsk; 3950 3951 spin_lock(&fs_info->balance_lock); 3952 if (!fs_info->balance_ctl) { 3953 spin_unlock(&fs_info->balance_lock); 3954 return 0; 3955 } 3956 spin_unlock(&fs_info->balance_lock); 3957 3958 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 3959 btrfs_info(fs_info, "force skipping balance"); 3960 return 0; 3961 } 3962 3963 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3964 return PTR_ERR_OR_ZERO(tsk); 3965 } 3966 3967 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 3968 { 3969 struct btrfs_balance_control *bctl; 3970 struct btrfs_balance_item *item; 3971 struct btrfs_disk_balance_args disk_bargs; 3972 struct btrfs_path *path; 3973 struct extent_buffer *leaf; 3974 struct btrfs_key key; 3975 int ret; 3976 3977 path = btrfs_alloc_path(); 3978 if (!path) 3979 return -ENOMEM; 3980 3981 key.objectid = BTRFS_BALANCE_OBJECTID; 3982 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3983 key.offset = 0; 3984 3985 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 3986 if (ret < 0) 3987 goto out; 3988 if (ret > 0) { /* ret = -ENOENT; */ 3989 ret = 0; 3990 goto out; 3991 } 3992 3993 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3994 if (!bctl) { 3995 ret = -ENOMEM; 3996 goto out; 3997 } 3998 3999 leaf = path->nodes[0]; 4000 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4001 4002 bctl->fs_info = fs_info; 4003 bctl->flags = btrfs_balance_flags(leaf, item); 4004 bctl->flags |= BTRFS_BALANCE_RESUME; 4005 4006 btrfs_balance_data(leaf, item, &disk_bargs); 4007 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4008 btrfs_balance_meta(leaf, item, &disk_bargs); 4009 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4010 btrfs_balance_sys(leaf, item, &disk_bargs); 4011 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4012 4013 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 4014 4015 mutex_lock(&fs_info->volume_mutex); 4016 mutex_lock(&fs_info->balance_mutex); 4017 4018 set_balance_control(bctl); 4019 4020 mutex_unlock(&fs_info->balance_mutex); 4021 mutex_unlock(&fs_info->volume_mutex); 4022 out: 4023 btrfs_free_path(path); 4024 return ret; 4025 } 4026 4027 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4028 { 4029 int ret = 0; 4030 4031 mutex_lock(&fs_info->balance_mutex); 4032 if (!fs_info->balance_ctl) { 4033 mutex_unlock(&fs_info->balance_mutex); 4034 return -ENOTCONN; 4035 } 4036 4037 if (atomic_read(&fs_info->balance_running)) { 4038 atomic_inc(&fs_info->balance_pause_req); 4039 mutex_unlock(&fs_info->balance_mutex); 4040 4041 wait_event(fs_info->balance_wait_q, 4042 atomic_read(&fs_info->balance_running) == 0); 4043 4044 mutex_lock(&fs_info->balance_mutex); 4045 /* we are good with balance_ctl ripped off from under us */ 4046 BUG_ON(atomic_read(&fs_info->balance_running)); 4047 atomic_dec(&fs_info->balance_pause_req); 4048 } else { 4049 ret = -ENOTCONN; 4050 } 4051 4052 mutex_unlock(&fs_info->balance_mutex); 4053 return ret; 4054 } 4055 4056 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4057 { 4058 if (fs_info->sb->s_flags & MS_RDONLY) 4059 return -EROFS; 4060 4061 mutex_lock(&fs_info->balance_mutex); 4062 if (!fs_info->balance_ctl) { 4063 mutex_unlock(&fs_info->balance_mutex); 4064 return -ENOTCONN; 4065 } 4066 4067 atomic_inc(&fs_info->balance_cancel_req); 4068 /* 4069 * if we are running just wait and return, balance item is 4070 * deleted in btrfs_balance in this case 4071 */ 4072 if (atomic_read(&fs_info->balance_running)) { 4073 mutex_unlock(&fs_info->balance_mutex); 4074 wait_event(fs_info->balance_wait_q, 4075 atomic_read(&fs_info->balance_running) == 0); 4076 mutex_lock(&fs_info->balance_mutex); 4077 } else { 4078 /* __cancel_balance needs volume_mutex */ 4079 mutex_unlock(&fs_info->balance_mutex); 4080 mutex_lock(&fs_info->volume_mutex); 4081 mutex_lock(&fs_info->balance_mutex); 4082 4083 if (fs_info->balance_ctl) 4084 __cancel_balance(fs_info); 4085 4086 mutex_unlock(&fs_info->volume_mutex); 4087 } 4088 4089 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running)); 4090 atomic_dec(&fs_info->balance_cancel_req); 4091 mutex_unlock(&fs_info->balance_mutex); 4092 return 0; 4093 } 4094 4095 static int btrfs_uuid_scan_kthread(void *data) 4096 { 4097 struct btrfs_fs_info *fs_info = data; 4098 struct btrfs_root *root = fs_info->tree_root; 4099 struct btrfs_key key; 4100 struct btrfs_key max_key; 4101 struct btrfs_path *path = NULL; 4102 int ret = 0; 4103 struct extent_buffer *eb; 4104 int slot; 4105 struct btrfs_root_item root_item; 4106 u32 item_size; 4107 struct btrfs_trans_handle *trans = NULL; 4108 4109 path = btrfs_alloc_path(); 4110 if (!path) { 4111 ret = -ENOMEM; 4112 goto out; 4113 } 4114 4115 key.objectid = 0; 4116 key.type = BTRFS_ROOT_ITEM_KEY; 4117 key.offset = 0; 4118 4119 max_key.objectid = (u64)-1; 4120 max_key.type = BTRFS_ROOT_ITEM_KEY; 4121 max_key.offset = (u64)-1; 4122 4123 while (1) { 4124 ret = btrfs_search_forward(root, &key, path, 0); 4125 if (ret) { 4126 if (ret > 0) 4127 ret = 0; 4128 break; 4129 } 4130 4131 if (key.type != BTRFS_ROOT_ITEM_KEY || 4132 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4133 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4134 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4135 goto skip; 4136 4137 eb = path->nodes[0]; 4138 slot = path->slots[0]; 4139 item_size = btrfs_item_size_nr(eb, slot); 4140 if (item_size < sizeof(root_item)) 4141 goto skip; 4142 4143 read_extent_buffer(eb, &root_item, 4144 btrfs_item_ptr_offset(eb, slot), 4145 (int)sizeof(root_item)); 4146 if (btrfs_root_refs(&root_item) == 0) 4147 goto skip; 4148 4149 if (!btrfs_is_empty_uuid(root_item.uuid) || 4150 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4151 if (trans) 4152 goto update_tree; 4153 4154 btrfs_release_path(path); 4155 /* 4156 * 1 - subvol uuid item 4157 * 1 - received_subvol uuid item 4158 */ 4159 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4160 if (IS_ERR(trans)) { 4161 ret = PTR_ERR(trans); 4162 break; 4163 } 4164 continue; 4165 } else { 4166 goto skip; 4167 } 4168 update_tree: 4169 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4170 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 4171 root_item.uuid, 4172 BTRFS_UUID_KEY_SUBVOL, 4173 key.objectid); 4174 if (ret < 0) { 4175 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4176 ret); 4177 break; 4178 } 4179 } 4180 4181 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4182 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 4183 root_item.received_uuid, 4184 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4185 key.objectid); 4186 if (ret < 0) { 4187 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4188 ret); 4189 break; 4190 } 4191 } 4192 4193 skip: 4194 if (trans) { 4195 ret = btrfs_end_transaction(trans, fs_info->uuid_root); 4196 trans = NULL; 4197 if (ret) 4198 break; 4199 } 4200 4201 btrfs_release_path(path); 4202 if (key.offset < (u64)-1) { 4203 key.offset++; 4204 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4205 key.offset = 0; 4206 key.type = BTRFS_ROOT_ITEM_KEY; 4207 } else if (key.objectid < (u64)-1) { 4208 key.offset = 0; 4209 key.type = BTRFS_ROOT_ITEM_KEY; 4210 key.objectid++; 4211 } else { 4212 break; 4213 } 4214 cond_resched(); 4215 } 4216 4217 out: 4218 btrfs_free_path(path); 4219 if (trans && !IS_ERR(trans)) 4220 btrfs_end_transaction(trans, fs_info->uuid_root); 4221 if (ret) 4222 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4223 else 4224 fs_info->update_uuid_tree_gen = 1; 4225 up(&fs_info->uuid_tree_rescan_sem); 4226 return 0; 4227 } 4228 4229 /* 4230 * Callback for btrfs_uuid_tree_iterate(). 4231 * returns: 4232 * 0 check succeeded, the entry is not outdated. 4233 * < 0 if an error occurred. 4234 * > 0 if the check failed, which means the caller shall remove the entry. 4235 */ 4236 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 4237 u8 *uuid, u8 type, u64 subid) 4238 { 4239 struct btrfs_key key; 4240 int ret = 0; 4241 struct btrfs_root *subvol_root; 4242 4243 if (type != BTRFS_UUID_KEY_SUBVOL && 4244 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 4245 goto out; 4246 4247 key.objectid = subid; 4248 key.type = BTRFS_ROOT_ITEM_KEY; 4249 key.offset = (u64)-1; 4250 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 4251 if (IS_ERR(subvol_root)) { 4252 ret = PTR_ERR(subvol_root); 4253 if (ret == -ENOENT) 4254 ret = 1; 4255 goto out; 4256 } 4257 4258 switch (type) { 4259 case BTRFS_UUID_KEY_SUBVOL: 4260 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 4261 ret = 1; 4262 break; 4263 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 4264 if (memcmp(uuid, subvol_root->root_item.received_uuid, 4265 BTRFS_UUID_SIZE)) 4266 ret = 1; 4267 break; 4268 } 4269 4270 out: 4271 return ret; 4272 } 4273 4274 static int btrfs_uuid_rescan_kthread(void *data) 4275 { 4276 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 4277 int ret; 4278 4279 /* 4280 * 1st step is to iterate through the existing UUID tree and 4281 * to delete all entries that contain outdated data. 4282 * 2nd step is to add all missing entries to the UUID tree. 4283 */ 4284 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 4285 if (ret < 0) { 4286 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 4287 up(&fs_info->uuid_tree_rescan_sem); 4288 return ret; 4289 } 4290 return btrfs_uuid_scan_kthread(data); 4291 } 4292 4293 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4294 { 4295 struct btrfs_trans_handle *trans; 4296 struct btrfs_root *tree_root = fs_info->tree_root; 4297 struct btrfs_root *uuid_root; 4298 struct task_struct *task; 4299 int ret; 4300 4301 /* 4302 * 1 - root node 4303 * 1 - root item 4304 */ 4305 trans = btrfs_start_transaction(tree_root, 2); 4306 if (IS_ERR(trans)) 4307 return PTR_ERR(trans); 4308 4309 uuid_root = btrfs_create_tree(trans, fs_info, 4310 BTRFS_UUID_TREE_OBJECTID); 4311 if (IS_ERR(uuid_root)) { 4312 ret = PTR_ERR(uuid_root); 4313 btrfs_abort_transaction(trans, ret); 4314 btrfs_end_transaction(trans, tree_root); 4315 return ret; 4316 } 4317 4318 fs_info->uuid_root = uuid_root; 4319 4320 ret = btrfs_commit_transaction(trans, tree_root); 4321 if (ret) 4322 return ret; 4323 4324 down(&fs_info->uuid_tree_rescan_sem); 4325 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4326 if (IS_ERR(task)) { 4327 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4328 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4329 up(&fs_info->uuid_tree_rescan_sem); 4330 return PTR_ERR(task); 4331 } 4332 4333 return 0; 4334 } 4335 4336 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 4337 { 4338 struct task_struct *task; 4339 4340 down(&fs_info->uuid_tree_rescan_sem); 4341 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 4342 if (IS_ERR(task)) { 4343 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4344 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 4345 up(&fs_info->uuid_tree_rescan_sem); 4346 return PTR_ERR(task); 4347 } 4348 4349 return 0; 4350 } 4351 4352 /* 4353 * shrinking a device means finding all of the device extents past 4354 * the new size, and then following the back refs to the chunks. 4355 * The chunk relocation code actually frees the device extent 4356 */ 4357 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4358 { 4359 struct btrfs_trans_handle *trans; 4360 struct btrfs_root *root = device->dev_root; 4361 struct btrfs_dev_extent *dev_extent = NULL; 4362 struct btrfs_path *path; 4363 u64 length; 4364 u64 chunk_offset; 4365 int ret; 4366 int slot; 4367 int failed = 0; 4368 bool retried = false; 4369 bool checked_pending_chunks = false; 4370 struct extent_buffer *l; 4371 struct btrfs_key key; 4372 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 4373 u64 old_total = btrfs_super_total_bytes(super_copy); 4374 u64 old_size = btrfs_device_get_total_bytes(device); 4375 u64 diff = old_size - new_size; 4376 4377 if (device->is_tgtdev_for_dev_replace) 4378 return -EINVAL; 4379 4380 path = btrfs_alloc_path(); 4381 if (!path) 4382 return -ENOMEM; 4383 4384 path->reada = READA_FORWARD; 4385 4386 lock_chunks(root); 4387 4388 btrfs_device_set_total_bytes(device, new_size); 4389 if (device->writeable) { 4390 device->fs_devices->total_rw_bytes -= diff; 4391 spin_lock(&root->fs_info->free_chunk_lock); 4392 root->fs_info->free_chunk_space -= diff; 4393 spin_unlock(&root->fs_info->free_chunk_lock); 4394 } 4395 unlock_chunks(root); 4396 4397 again: 4398 key.objectid = device->devid; 4399 key.offset = (u64)-1; 4400 key.type = BTRFS_DEV_EXTENT_KEY; 4401 4402 do { 4403 mutex_lock(&root->fs_info->delete_unused_bgs_mutex); 4404 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4405 if (ret < 0) { 4406 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4407 goto done; 4408 } 4409 4410 ret = btrfs_previous_item(root, path, 0, key.type); 4411 if (ret) 4412 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4413 if (ret < 0) 4414 goto done; 4415 if (ret) { 4416 ret = 0; 4417 btrfs_release_path(path); 4418 break; 4419 } 4420 4421 l = path->nodes[0]; 4422 slot = path->slots[0]; 4423 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4424 4425 if (key.objectid != device->devid) { 4426 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4427 btrfs_release_path(path); 4428 break; 4429 } 4430 4431 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4432 length = btrfs_dev_extent_length(l, dev_extent); 4433 4434 if (key.offset + length <= new_size) { 4435 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4436 btrfs_release_path(path); 4437 break; 4438 } 4439 4440 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4441 btrfs_release_path(path); 4442 4443 ret = btrfs_relocate_chunk(root, chunk_offset); 4444 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4445 if (ret && ret != -ENOSPC) 4446 goto done; 4447 if (ret == -ENOSPC) 4448 failed++; 4449 } while (key.offset-- > 0); 4450 4451 if (failed && !retried) { 4452 failed = 0; 4453 retried = true; 4454 goto again; 4455 } else if (failed && retried) { 4456 ret = -ENOSPC; 4457 goto done; 4458 } 4459 4460 /* Shrinking succeeded, else we would be at "done". */ 4461 trans = btrfs_start_transaction(root, 0); 4462 if (IS_ERR(trans)) { 4463 ret = PTR_ERR(trans); 4464 goto done; 4465 } 4466 4467 lock_chunks(root); 4468 4469 /* 4470 * We checked in the above loop all device extents that were already in 4471 * the device tree. However before we have updated the device's 4472 * total_bytes to the new size, we might have had chunk allocations that 4473 * have not complete yet (new block groups attached to transaction 4474 * handles), and therefore their device extents were not yet in the 4475 * device tree and we missed them in the loop above. So if we have any 4476 * pending chunk using a device extent that overlaps the device range 4477 * that we can not use anymore, commit the current transaction and 4478 * repeat the search on the device tree - this way we guarantee we will 4479 * not have chunks using device extents that end beyond 'new_size'. 4480 */ 4481 if (!checked_pending_chunks) { 4482 u64 start = new_size; 4483 u64 len = old_size - new_size; 4484 4485 if (contains_pending_extent(trans->transaction, device, 4486 &start, len)) { 4487 unlock_chunks(root); 4488 checked_pending_chunks = true; 4489 failed = 0; 4490 retried = false; 4491 ret = btrfs_commit_transaction(trans, root); 4492 if (ret) 4493 goto done; 4494 goto again; 4495 } 4496 } 4497 4498 btrfs_device_set_disk_total_bytes(device, new_size); 4499 if (list_empty(&device->resized_list)) 4500 list_add_tail(&device->resized_list, 4501 &root->fs_info->fs_devices->resized_devices); 4502 4503 WARN_ON(diff > old_total); 4504 btrfs_set_super_total_bytes(super_copy, old_total - diff); 4505 unlock_chunks(root); 4506 4507 /* Now btrfs_update_device() will change the on-disk size. */ 4508 ret = btrfs_update_device(trans, device); 4509 btrfs_end_transaction(trans, root); 4510 done: 4511 btrfs_free_path(path); 4512 if (ret) { 4513 lock_chunks(root); 4514 btrfs_device_set_total_bytes(device, old_size); 4515 if (device->writeable) 4516 device->fs_devices->total_rw_bytes += diff; 4517 spin_lock(&root->fs_info->free_chunk_lock); 4518 root->fs_info->free_chunk_space += diff; 4519 spin_unlock(&root->fs_info->free_chunk_lock); 4520 unlock_chunks(root); 4521 } 4522 return ret; 4523 } 4524 4525 static int btrfs_add_system_chunk(struct btrfs_root *root, 4526 struct btrfs_key *key, 4527 struct btrfs_chunk *chunk, int item_size) 4528 { 4529 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 4530 struct btrfs_disk_key disk_key; 4531 u32 array_size; 4532 u8 *ptr; 4533 4534 lock_chunks(root); 4535 array_size = btrfs_super_sys_array_size(super_copy); 4536 if (array_size + item_size + sizeof(disk_key) 4537 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4538 unlock_chunks(root); 4539 return -EFBIG; 4540 } 4541 4542 ptr = super_copy->sys_chunk_array + array_size; 4543 btrfs_cpu_key_to_disk(&disk_key, key); 4544 memcpy(ptr, &disk_key, sizeof(disk_key)); 4545 ptr += sizeof(disk_key); 4546 memcpy(ptr, chunk, item_size); 4547 item_size += sizeof(disk_key); 4548 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4549 unlock_chunks(root); 4550 4551 return 0; 4552 } 4553 4554 /* 4555 * sort the devices in descending order by max_avail, total_avail 4556 */ 4557 static int btrfs_cmp_device_info(const void *a, const void *b) 4558 { 4559 const struct btrfs_device_info *di_a = a; 4560 const struct btrfs_device_info *di_b = b; 4561 4562 if (di_a->max_avail > di_b->max_avail) 4563 return -1; 4564 if (di_a->max_avail < di_b->max_avail) 4565 return 1; 4566 if (di_a->total_avail > di_b->total_avail) 4567 return -1; 4568 if (di_a->total_avail < di_b->total_avail) 4569 return 1; 4570 return 0; 4571 } 4572 4573 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) 4574 { 4575 /* TODO allow them to set a preferred stripe size */ 4576 return SZ_64K; 4577 } 4578 4579 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4580 { 4581 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4582 return; 4583 4584 btrfs_set_fs_incompat(info, RAID56); 4585 } 4586 4587 #define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r) \ 4588 - sizeof(struct btrfs_chunk)) \ 4589 / sizeof(struct btrfs_stripe) + 1) 4590 4591 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 4592 - 2 * sizeof(struct btrfs_disk_key) \ 4593 - 2 * sizeof(struct btrfs_chunk)) \ 4594 / sizeof(struct btrfs_stripe) + 1) 4595 4596 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4597 struct btrfs_root *extent_root, u64 start, 4598 u64 type) 4599 { 4600 struct btrfs_fs_info *info = extent_root->fs_info; 4601 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4602 struct list_head *cur; 4603 struct map_lookup *map = NULL; 4604 struct extent_map_tree *em_tree; 4605 struct extent_map *em; 4606 struct btrfs_device_info *devices_info = NULL; 4607 u64 total_avail; 4608 int num_stripes; /* total number of stripes to allocate */ 4609 int data_stripes; /* number of stripes that count for 4610 block group size */ 4611 int sub_stripes; /* sub_stripes info for map */ 4612 int dev_stripes; /* stripes per dev */ 4613 int devs_max; /* max devs to use */ 4614 int devs_min; /* min devs needed */ 4615 int devs_increment; /* ndevs has to be a multiple of this */ 4616 int ncopies; /* how many copies to data has */ 4617 int ret; 4618 u64 max_stripe_size; 4619 u64 max_chunk_size; 4620 u64 stripe_size; 4621 u64 num_bytes; 4622 u64 raid_stripe_len = BTRFS_STRIPE_LEN; 4623 int ndevs; 4624 int i; 4625 int j; 4626 int index; 4627 4628 BUG_ON(!alloc_profile_is_valid(type, 0)); 4629 4630 if (list_empty(&fs_devices->alloc_list)) 4631 return -ENOSPC; 4632 4633 index = __get_raid_index(type); 4634 4635 sub_stripes = btrfs_raid_array[index].sub_stripes; 4636 dev_stripes = btrfs_raid_array[index].dev_stripes; 4637 devs_max = btrfs_raid_array[index].devs_max; 4638 devs_min = btrfs_raid_array[index].devs_min; 4639 devs_increment = btrfs_raid_array[index].devs_increment; 4640 ncopies = btrfs_raid_array[index].ncopies; 4641 4642 if (type & BTRFS_BLOCK_GROUP_DATA) { 4643 max_stripe_size = SZ_1G; 4644 max_chunk_size = 10 * max_stripe_size; 4645 if (!devs_max) 4646 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4647 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4648 /* for larger filesystems, use larger metadata chunks */ 4649 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4650 max_stripe_size = SZ_1G; 4651 else 4652 max_stripe_size = SZ_256M; 4653 max_chunk_size = max_stripe_size; 4654 if (!devs_max) 4655 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4656 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4657 max_stripe_size = SZ_32M; 4658 max_chunk_size = 2 * max_stripe_size; 4659 if (!devs_max) 4660 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; 4661 } else { 4662 btrfs_err(info, "invalid chunk type 0x%llx requested", 4663 type); 4664 BUG_ON(1); 4665 } 4666 4667 /* we don't want a chunk larger than 10% of writeable space */ 4668 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4669 max_chunk_size); 4670 4671 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4672 GFP_NOFS); 4673 if (!devices_info) 4674 return -ENOMEM; 4675 4676 cur = fs_devices->alloc_list.next; 4677 4678 /* 4679 * in the first pass through the devices list, we gather information 4680 * about the available holes on each device. 4681 */ 4682 ndevs = 0; 4683 while (cur != &fs_devices->alloc_list) { 4684 struct btrfs_device *device; 4685 u64 max_avail; 4686 u64 dev_offset; 4687 4688 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 4689 4690 cur = cur->next; 4691 4692 if (!device->writeable) { 4693 WARN(1, KERN_ERR 4694 "BTRFS: read-only device in alloc_list\n"); 4695 continue; 4696 } 4697 4698 if (!device->in_fs_metadata || 4699 device->is_tgtdev_for_dev_replace) 4700 continue; 4701 4702 if (device->total_bytes > device->bytes_used) 4703 total_avail = device->total_bytes - device->bytes_used; 4704 else 4705 total_avail = 0; 4706 4707 /* If there is no space on this device, skip it. */ 4708 if (total_avail == 0) 4709 continue; 4710 4711 ret = find_free_dev_extent(trans, device, 4712 max_stripe_size * dev_stripes, 4713 &dev_offset, &max_avail); 4714 if (ret && ret != -ENOSPC) 4715 goto error; 4716 4717 if (ret == 0) 4718 max_avail = max_stripe_size * dev_stripes; 4719 4720 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) 4721 continue; 4722 4723 if (ndevs == fs_devices->rw_devices) { 4724 WARN(1, "%s: found more than %llu devices\n", 4725 __func__, fs_devices->rw_devices); 4726 break; 4727 } 4728 devices_info[ndevs].dev_offset = dev_offset; 4729 devices_info[ndevs].max_avail = max_avail; 4730 devices_info[ndevs].total_avail = total_avail; 4731 devices_info[ndevs].dev = device; 4732 ++ndevs; 4733 } 4734 4735 /* 4736 * now sort the devices by hole size / available space 4737 */ 4738 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4739 btrfs_cmp_device_info, NULL); 4740 4741 /* round down to number of usable stripes */ 4742 ndevs -= ndevs % devs_increment; 4743 4744 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { 4745 ret = -ENOSPC; 4746 goto error; 4747 } 4748 4749 if (devs_max && ndevs > devs_max) 4750 ndevs = devs_max; 4751 /* 4752 * the primary goal is to maximize the number of stripes, so use as many 4753 * devices as possible, even if the stripes are not maximum sized. 4754 */ 4755 stripe_size = devices_info[ndevs-1].max_avail; 4756 num_stripes = ndevs * dev_stripes; 4757 4758 /* 4759 * this will have to be fixed for RAID1 and RAID10 over 4760 * more drives 4761 */ 4762 data_stripes = num_stripes / ncopies; 4763 4764 if (type & BTRFS_BLOCK_GROUP_RAID5) { 4765 raid_stripe_len = find_raid56_stripe_len(ndevs - 1, 4766 extent_root->stripesize); 4767 data_stripes = num_stripes - 1; 4768 } 4769 if (type & BTRFS_BLOCK_GROUP_RAID6) { 4770 raid_stripe_len = find_raid56_stripe_len(ndevs - 2, 4771 extent_root->stripesize); 4772 data_stripes = num_stripes - 2; 4773 } 4774 4775 /* 4776 * Use the number of data stripes to figure out how big this chunk 4777 * is really going to be in terms of logical address space, 4778 * and compare that answer with the max chunk size 4779 */ 4780 if (stripe_size * data_stripes > max_chunk_size) { 4781 u64 mask = (1ULL << 24) - 1; 4782 4783 stripe_size = div_u64(max_chunk_size, data_stripes); 4784 4785 /* bump the answer up to a 16MB boundary */ 4786 stripe_size = (stripe_size + mask) & ~mask; 4787 4788 /* but don't go higher than the limits we found 4789 * while searching for free extents 4790 */ 4791 if (stripe_size > devices_info[ndevs-1].max_avail) 4792 stripe_size = devices_info[ndevs-1].max_avail; 4793 } 4794 4795 stripe_size = div_u64(stripe_size, dev_stripes); 4796 4797 /* align to BTRFS_STRIPE_LEN */ 4798 stripe_size = div_u64(stripe_size, raid_stripe_len); 4799 stripe_size *= raid_stripe_len; 4800 4801 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 4802 if (!map) { 4803 ret = -ENOMEM; 4804 goto error; 4805 } 4806 map->num_stripes = num_stripes; 4807 4808 for (i = 0; i < ndevs; ++i) { 4809 for (j = 0; j < dev_stripes; ++j) { 4810 int s = i * dev_stripes + j; 4811 map->stripes[s].dev = devices_info[i].dev; 4812 map->stripes[s].physical = devices_info[i].dev_offset + 4813 j * stripe_size; 4814 } 4815 } 4816 map->sector_size = extent_root->sectorsize; 4817 map->stripe_len = raid_stripe_len; 4818 map->io_align = raid_stripe_len; 4819 map->io_width = raid_stripe_len; 4820 map->type = type; 4821 map->sub_stripes = sub_stripes; 4822 4823 num_bytes = stripe_size * data_stripes; 4824 4825 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); 4826 4827 em = alloc_extent_map(); 4828 if (!em) { 4829 kfree(map); 4830 ret = -ENOMEM; 4831 goto error; 4832 } 4833 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 4834 em->map_lookup = map; 4835 em->start = start; 4836 em->len = num_bytes; 4837 em->block_start = 0; 4838 em->block_len = em->len; 4839 em->orig_block_len = stripe_size; 4840 4841 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4842 write_lock(&em_tree->lock); 4843 ret = add_extent_mapping(em_tree, em, 0); 4844 if (!ret) { 4845 list_add_tail(&em->list, &trans->transaction->pending_chunks); 4846 atomic_inc(&em->refs); 4847 } 4848 write_unlock(&em_tree->lock); 4849 if (ret) { 4850 free_extent_map(em); 4851 goto error; 4852 } 4853 4854 ret = btrfs_make_block_group(trans, extent_root, 0, type, 4855 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4856 start, num_bytes); 4857 if (ret) 4858 goto error_del_extent; 4859 4860 for (i = 0; i < map->num_stripes; i++) { 4861 num_bytes = map->stripes[i].dev->bytes_used + stripe_size; 4862 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); 4863 } 4864 4865 spin_lock(&extent_root->fs_info->free_chunk_lock); 4866 extent_root->fs_info->free_chunk_space -= (stripe_size * 4867 map->num_stripes); 4868 spin_unlock(&extent_root->fs_info->free_chunk_lock); 4869 4870 free_extent_map(em); 4871 check_raid56_incompat_flag(extent_root->fs_info, type); 4872 4873 kfree(devices_info); 4874 return 0; 4875 4876 error_del_extent: 4877 write_lock(&em_tree->lock); 4878 remove_extent_mapping(em_tree, em); 4879 write_unlock(&em_tree->lock); 4880 4881 /* One for our allocation */ 4882 free_extent_map(em); 4883 /* One for the tree reference */ 4884 free_extent_map(em); 4885 /* One for the pending_chunks list reference */ 4886 free_extent_map(em); 4887 error: 4888 kfree(devices_info); 4889 return ret; 4890 } 4891 4892 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 4893 struct btrfs_root *extent_root, 4894 u64 chunk_offset, u64 chunk_size) 4895 { 4896 struct btrfs_key key; 4897 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 4898 struct btrfs_device *device; 4899 struct btrfs_chunk *chunk; 4900 struct btrfs_stripe *stripe; 4901 struct extent_map_tree *em_tree; 4902 struct extent_map *em; 4903 struct map_lookup *map; 4904 size_t item_size; 4905 u64 dev_offset; 4906 u64 stripe_size; 4907 int i = 0; 4908 int ret = 0; 4909 4910 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4911 read_lock(&em_tree->lock); 4912 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size); 4913 read_unlock(&em_tree->lock); 4914 4915 if (!em) { 4916 btrfs_crit(extent_root->fs_info, "unable to find logical " 4917 "%Lu len %Lu", chunk_offset, chunk_size); 4918 return -EINVAL; 4919 } 4920 4921 if (em->start != chunk_offset || em->len != chunk_size) { 4922 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted" 4923 " %Lu-%Lu, found %Lu-%Lu", chunk_offset, 4924 chunk_size, em->start, em->len); 4925 free_extent_map(em); 4926 return -EINVAL; 4927 } 4928 4929 map = em->map_lookup; 4930 item_size = btrfs_chunk_item_size(map->num_stripes); 4931 stripe_size = em->orig_block_len; 4932 4933 chunk = kzalloc(item_size, GFP_NOFS); 4934 if (!chunk) { 4935 ret = -ENOMEM; 4936 goto out; 4937 } 4938 4939 /* 4940 * Take the device list mutex to prevent races with the final phase of 4941 * a device replace operation that replaces the device object associated 4942 * with the map's stripes, because the device object's id can change 4943 * at any time during that final phase of the device replace operation 4944 * (dev-replace.c:btrfs_dev_replace_finishing()). 4945 */ 4946 mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex); 4947 for (i = 0; i < map->num_stripes; i++) { 4948 device = map->stripes[i].dev; 4949 dev_offset = map->stripes[i].physical; 4950 4951 ret = btrfs_update_device(trans, device); 4952 if (ret) 4953 break; 4954 ret = btrfs_alloc_dev_extent(trans, device, 4955 chunk_root->root_key.objectid, 4956 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4957 chunk_offset, dev_offset, 4958 stripe_size); 4959 if (ret) 4960 break; 4961 } 4962 if (ret) { 4963 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex); 4964 goto out; 4965 } 4966 4967 stripe = &chunk->stripe; 4968 for (i = 0; i < map->num_stripes; i++) { 4969 device = map->stripes[i].dev; 4970 dev_offset = map->stripes[i].physical; 4971 4972 btrfs_set_stack_stripe_devid(stripe, device->devid); 4973 btrfs_set_stack_stripe_offset(stripe, dev_offset); 4974 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 4975 stripe++; 4976 } 4977 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex); 4978 4979 btrfs_set_stack_chunk_length(chunk, chunk_size); 4980 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 4981 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 4982 btrfs_set_stack_chunk_type(chunk, map->type); 4983 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 4984 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 4985 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 4986 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 4987 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 4988 4989 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4990 key.type = BTRFS_CHUNK_ITEM_KEY; 4991 key.offset = chunk_offset; 4992 4993 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 4994 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 4995 /* 4996 * TODO: Cleanup of inserted chunk root in case of 4997 * failure. 4998 */ 4999 ret = btrfs_add_system_chunk(chunk_root, &key, chunk, 5000 item_size); 5001 } 5002 5003 out: 5004 kfree(chunk); 5005 free_extent_map(em); 5006 return ret; 5007 } 5008 5009 /* 5010 * Chunk allocation falls into two parts. The first part does works 5011 * that make the new allocated chunk useable, but not do any operation 5012 * that modifies the chunk tree. The second part does the works that 5013 * require modifying the chunk tree. This division is important for the 5014 * bootstrap process of adding storage to a seed btrfs. 5015 */ 5016 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 5017 struct btrfs_root *extent_root, u64 type) 5018 { 5019 u64 chunk_offset; 5020 5021 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); 5022 chunk_offset = find_next_chunk(extent_root->fs_info); 5023 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 5024 } 5025 5026 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 5027 struct btrfs_root *root, 5028 struct btrfs_device *device) 5029 { 5030 u64 chunk_offset; 5031 u64 sys_chunk_offset; 5032 u64 alloc_profile; 5033 struct btrfs_fs_info *fs_info = root->fs_info; 5034 struct btrfs_root *extent_root = fs_info->extent_root; 5035 int ret; 5036 5037 chunk_offset = find_next_chunk(fs_info); 5038 alloc_profile = btrfs_get_alloc_profile(extent_root, 0); 5039 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset, 5040 alloc_profile); 5041 if (ret) 5042 return ret; 5043 5044 sys_chunk_offset = find_next_chunk(root->fs_info); 5045 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0); 5046 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset, 5047 alloc_profile); 5048 return ret; 5049 } 5050 5051 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5052 { 5053 int max_errors; 5054 5055 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 5056 BTRFS_BLOCK_GROUP_RAID10 | 5057 BTRFS_BLOCK_GROUP_RAID5 | 5058 BTRFS_BLOCK_GROUP_DUP)) { 5059 max_errors = 1; 5060 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { 5061 max_errors = 2; 5062 } else { 5063 max_errors = 0; 5064 } 5065 5066 return max_errors; 5067 } 5068 5069 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 5070 { 5071 struct extent_map *em; 5072 struct map_lookup *map; 5073 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 5074 int readonly = 0; 5075 int miss_ndevs = 0; 5076 int i; 5077 5078 read_lock(&map_tree->map_tree.lock); 5079 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 5080 read_unlock(&map_tree->map_tree.lock); 5081 if (!em) 5082 return 1; 5083 5084 map = em->map_lookup; 5085 for (i = 0; i < map->num_stripes; i++) { 5086 if (map->stripes[i].dev->missing) { 5087 miss_ndevs++; 5088 continue; 5089 } 5090 5091 if (!map->stripes[i].dev->writeable) { 5092 readonly = 1; 5093 goto end; 5094 } 5095 } 5096 5097 /* 5098 * If the number of missing devices is larger than max errors, 5099 * we can not write the data into that chunk successfully, so 5100 * set it readonly. 5101 */ 5102 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5103 readonly = 1; 5104 end: 5105 free_extent_map(em); 5106 return readonly; 5107 } 5108 5109 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 5110 { 5111 extent_map_tree_init(&tree->map_tree); 5112 } 5113 5114 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 5115 { 5116 struct extent_map *em; 5117 5118 while (1) { 5119 write_lock(&tree->map_tree.lock); 5120 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 5121 if (em) 5122 remove_extent_mapping(&tree->map_tree, em); 5123 write_unlock(&tree->map_tree.lock); 5124 if (!em) 5125 break; 5126 /* once for us */ 5127 free_extent_map(em); 5128 /* once for the tree */ 5129 free_extent_map(em); 5130 } 5131 } 5132 5133 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5134 { 5135 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 5136 struct extent_map *em; 5137 struct map_lookup *map; 5138 struct extent_map_tree *em_tree = &map_tree->map_tree; 5139 int ret; 5140 5141 read_lock(&em_tree->lock); 5142 em = lookup_extent_mapping(em_tree, logical, len); 5143 read_unlock(&em_tree->lock); 5144 5145 /* 5146 * We could return errors for these cases, but that could get ugly and 5147 * we'd probably do the same thing which is just not do anything else 5148 * and exit, so return 1 so the callers don't try to use other copies. 5149 */ 5150 if (!em) { 5151 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical, 5152 logical+len); 5153 return 1; 5154 } 5155 5156 if (em->start > logical || em->start + em->len < logical) { 5157 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got " 5158 "%Lu-%Lu", logical, logical+len, em->start, 5159 em->start + em->len); 5160 free_extent_map(em); 5161 return 1; 5162 } 5163 5164 map = em->map_lookup; 5165 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 5166 ret = map->num_stripes; 5167 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5168 ret = map->sub_stripes; 5169 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5170 ret = 2; 5171 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5172 ret = 3; 5173 else 5174 ret = 1; 5175 free_extent_map(em); 5176 5177 btrfs_dev_replace_lock(&fs_info->dev_replace, 0); 5178 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) 5179 ret++; 5180 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 5181 5182 return ret; 5183 } 5184 5185 unsigned long btrfs_full_stripe_len(struct btrfs_root *root, 5186 struct btrfs_mapping_tree *map_tree, 5187 u64 logical) 5188 { 5189 struct extent_map *em; 5190 struct map_lookup *map; 5191 struct extent_map_tree *em_tree = &map_tree->map_tree; 5192 unsigned long len = root->sectorsize; 5193 5194 read_lock(&em_tree->lock); 5195 em = lookup_extent_mapping(em_tree, logical, len); 5196 read_unlock(&em_tree->lock); 5197 BUG_ON(!em); 5198 5199 BUG_ON(em->start > logical || em->start + em->len < logical); 5200 map = em->map_lookup; 5201 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5202 len = map->stripe_len * nr_data_stripes(map); 5203 free_extent_map(em); 5204 return len; 5205 } 5206 5207 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, 5208 u64 logical, u64 len, int mirror_num) 5209 { 5210 struct extent_map *em; 5211 struct map_lookup *map; 5212 struct extent_map_tree *em_tree = &map_tree->map_tree; 5213 int ret = 0; 5214 5215 read_lock(&em_tree->lock); 5216 em = lookup_extent_mapping(em_tree, logical, len); 5217 read_unlock(&em_tree->lock); 5218 BUG_ON(!em); 5219 5220 BUG_ON(em->start > logical || em->start + em->len < logical); 5221 map = em->map_lookup; 5222 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5223 ret = 1; 5224 free_extent_map(em); 5225 return ret; 5226 } 5227 5228 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5229 struct map_lookup *map, int first, int num, 5230 int optimal, int dev_replace_is_ongoing) 5231 { 5232 int i; 5233 int tolerance; 5234 struct btrfs_device *srcdev; 5235 5236 if (dev_replace_is_ongoing && 5237 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5238 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5239 srcdev = fs_info->dev_replace.srcdev; 5240 else 5241 srcdev = NULL; 5242 5243 /* 5244 * try to avoid the drive that is the source drive for a 5245 * dev-replace procedure, only choose it if no other non-missing 5246 * mirror is available 5247 */ 5248 for (tolerance = 0; tolerance < 2; tolerance++) { 5249 if (map->stripes[optimal].dev->bdev && 5250 (tolerance || map->stripes[optimal].dev != srcdev)) 5251 return optimal; 5252 for (i = first; i < first + num; i++) { 5253 if (map->stripes[i].dev->bdev && 5254 (tolerance || map->stripes[i].dev != srcdev)) 5255 return i; 5256 } 5257 } 5258 5259 /* we couldn't find one that doesn't fail. Just return something 5260 * and the io error handling code will clean up eventually 5261 */ 5262 return optimal; 5263 } 5264 5265 static inline int parity_smaller(u64 a, u64 b) 5266 { 5267 return a > b; 5268 } 5269 5270 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5271 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5272 { 5273 struct btrfs_bio_stripe s; 5274 int i; 5275 u64 l; 5276 int again = 1; 5277 5278 while (again) { 5279 again = 0; 5280 for (i = 0; i < num_stripes - 1; i++) { 5281 if (parity_smaller(bbio->raid_map[i], 5282 bbio->raid_map[i+1])) { 5283 s = bbio->stripes[i]; 5284 l = bbio->raid_map[i]; 5285 bbio->stripes[i] = bbio->stripes[i+1]; 5286 bbio->raid_map[i] = bbio->raid_map[i+1]; 5287 bbio->stripes[i+1] = s; 5288 bbio->raid_map[i+1] = l; 5289 5290 again = 1; 5291 } 5292 } 5293 } 5294 } 5295 5296 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5297 { 5298 struct btrfs_bio *bbio = kzalloc( 5299 /* the size of the btrfs_bio */ 5300 sizeof(struct btrfs_bio) + 5301 /* plus the variable array for the stripes */ 5302 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5303 /* plus the variable array for the tgt dev */ 5304 sizeof(int) * (real_stripes) + 5305 /* 5306 * plus the raid_map, which includes both the tgt dev 5307 * and the stripes 5308 */ 5309 sizeof(u64) * (total_stripes), 5310 GFP_NOFS|__GFP_NOFAIL); 5311 5312 atomic_set(&bbio->error, 0); 5313 atomic_set(&bbio->refs, 1); 5314 5315 return bbio; 5316 } 5317 5318 void btrfs_get_bbio(struct btrfs_bio *bbio) 5319 { 5320 WARN_ON(!atomic_read(&bbio->refs)); 5321 atomic_inc(&bbio->refs); 5322 } 5323 5324 void btrfs_put_bbio(struct btrfs_bio *bbio) 5325 { 5326 if (!bbio) 5327 return; 5328 if (atomic_dec_and_test(&bbio->refs)) 5329 kfree(bbio); 5330 } 5331 5332 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op, 5333 u64 logical, u64 *length, 5334 struct btrfs_bio **bbio_ret, 5335 int mirror_num, int need_raid_map) 5336 { 5337 struct extent_map *em; 5338 struct map_lookup *map; 5339 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 5340 struct extent_map_tree *em_tree = &map_tree->map_tree; 5341 u64 offset; 5342 u64 stripe_offset; 5343 u64 stripe_end_offset; 5344 u64 stripe_nr; 5345 u64 stripe_nr_orig; 5346 u64 stripe_nr_end; 5347 u64 stripe_len; 5348 u32 stripe_index; 5349 int i; 5350 int ret = 0; 5351 int num_stripes; 5352 int max_errors = 0; 5353 int tgtdev_indexes = 0; 5354 struct btrfs_bio *bbio = NULL; 5355 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 5356 int dev_replace_is_ongoing = 0; 5357 int num_alloc_stripes; 5358 int patch_the_first_stripe_for_dev_replace = 0; 5359 u64 physical_to_patch_in_first_stripe = 0; 5360 u64 raid56_full_stripe_start = (u64)-1; 5361 5362 read_lock(&em_tree->lock); 5363 em = lookup_extent_mapping(em_tree, logical, *length); 5364 read_unlock(&em_tree->lock); 5365 5366 if (!em) { 5367 btrfs_crit(fs_info, "unable to find logical %llu len %llu", 5368 logical, *length); 5369 return -EINVAL; 5370 } 5371 5372 if (em->start > logical || em->start + em->len < logical) { 5373 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, " 5374 "found %Lu-%Lu", logical, em->start, 5375 em->start + em->len); 5376 free_extent_map(em); 5377 return -EINVAL; 5378 } 5379 5380 map = em->map_lookup; 5381 offset = logical - em->start; 5382 5383 stripe_len = map->stripe_len; 5384 stripe_nr = offset; 5385 /* 5386 * stripe_nr counts the total number of stripes we have to stride 5387 * to get to this block 5388 */ 5389 stripe_nr = div64_u64(stripe_nr, stripe_len); 5390 5391 stripe_offset = stripe_nr * stripe_len; 5392 if (offset < stripe_offset) { 5393 btrfs_crit(fs_info, "stripe math has gone wrong, " 5394 "stripe_offset=%llu, offset=%llu, start=%llu, " 5395 "logical=%llu, stripe_len=%llu", 5396 stripe_offset, offset, em->start, logical, 5397 stripe_len); 5398 free_extent_map(em); 5399 return -EINVAL; 5400 } 5401 5402 /* stripe_offset is the offset of this block in its stripe*/ 5403 stripe_offset = offset - stripe_offset; 5404 5405 /* if we're here for raid56, we need to know the stripe aligned start */ 5406 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5407 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 5408 raid56_full_stripe_start = offset; 5409 5410 /* allow a write of a full stripe, but make sure we don't 5411 * allow straddling of stripes 5412 */ 5413 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5414 full_stripe_len); 5415 raid56_full_stripe_start *= full_stripe_len; 5416 } 5417 5418 if (op == REQ_OP_DISCARD) { 5419 /* we don't discard raid56 yet */ 5420 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5421 ret = -EOPNOTSUPP; 5422 goto out; 5423 } 5424 *length = min_t(u64, em->len - offset, *length); 5425 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5426 u64 max_len; 5427 /* For writes to RAID[56], allow a full stripeset across all disks. 5428 For other RAID types and for RAID[56] reads, just allow a single 5429 stripe (on a single disk). */ 5430 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 5431 (op == REQ_OP_WRITE)) { 5432 max_len = stripe_len * nr_data_stripes(map) - 5433 (offset - raid56_full_stripe_start); 5434 } else { 5435 /* we limit the length of each bio to what fits in a stripe */ 5436 max_len = stripe_len - stripe_offset; 5437 } 5438 *length = min_t(u64, em->len - offset, max_len); 5439 } else { 5440 *length = em->len - offset; 5441 } 5442 5443 /* This is for when we're called from btrfs_merge_bio_hook() and all 5444 it cares about is the length */ 5445 if (!bbio_ret) 5446 goto out; 5447 5448 btrfs_dev_replace_lock(dev_replace, 0); 5449 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 5450 if (!dev_replace_is_ongoing) 5451 btrfs_dev_replace_unlock(dev_replace, 0); 5452 else 5453 btrfs_dev_replace_set_lock_blocking(dev_replace); 5454 5455 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 5456 op != REQ_OP_WRITE && op != REQ_OP_DISCARD && 5457 op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) { 5458 /* 5459 * in dev-replace case, for repair case (that's the only 5460 * case where the mirror is selected explicitly when 5461 * calling btrfs_map_block), blocks left of the left cursor 5462 * can also be read from the target drive. 5463 * For REQ_GET_READ_MIRRORS, the target drive is added as 5464 * the last one to the array of stripes. For READ, it also 5465 * needs to be supported using the same mirror number. 5466 * If the requested block is not left of the left cursor, 5467 * EIO is returned. This can happen because btrfs_num_copies() 5468 * returns one more in the dev-replace case. 5469 */ 5470 u64 tmp_length = *length; 5471 struct btrfs_bio *tmp_bbio = NULL; 5472 int tmp_num_stripes; 5473 u64 srcdev_devid = dev_replace->srcdev->devid; 5474 int index_srcdev = 0; 5475 int found = 0; 5476 u64 physical_of_found = 0; 5477 5478 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, 5479 logical, &tmp_length, &tmp_bbio, 0, 0); 5480 if (ret) { 5481 WARN_ON(tmp_bbio != NULL); 5482 goto out; 5483 } 5484 5485 tmp_num_stripes = tmp_bbio->num_stripes; 5486 if (mirror_num > tmp_num_stripes) { 5487 /* 5488 * REQ_GET_READ_MIRRORS does not contain this 5489 * mirror, that means that the requested area 5490 * is not left of the left cursor 5491 */ 5492 ret = -EIO; 5493 btrfs_put_bbio(tmp_bbio); 5494 goto out; 5495 } 5496 5497 /* 5498 * process the rest of the function using the mirror_num 5499 * of the source drive. Therefore look it up first. 5500 * At the end, patch the device pointer to the one of the 5501 * target drive. 5502 */ 5503 for (i = 0; i < tmp_num_stripes; i++) { 5504 if (tmp_bbio->stripes[i].dev->devid != srcdev_devid) 5505 continue; 5506 5507 /* 5508 * In case of DUP, in order to keep it simple, only add 5509 * the mirror with the lowest physical address 5510 */ 5511 if (found && 5512 physical_of_found <= tmp_bbio->stripes[i].physical) 5513 continue; 5514 5515 index_srcdev = i; 5516 found = 1; 5517 physical_of_found = tmp_bbio->stripes[i].physical; 5518 } 5519 5520 btrfs_put_bbio(tmp_bbio); 5521 5522 if (!found) { 5523 WARN_ON(1); 5524 ret = -EIO; 5525 goto out; 5526 } 5527 5528 mirror_num = index_srcdev + 1; 5529 patch_the_first_stripe_for_dev_replace = 1; 5530 physical_to_patch_in_first_stripe = physical_of_found; 5531 } else if (mirror_num > map->num_stripes) { 5532 mirror_num = 0; 5533 } 5534 5535 num_stripes = 1; 5536 stripe_index = 0; 5537 stripe_nr_orig = stripe_nr; 5538 stripe_nr_end = ALIGN(offset + *length, map->stripe_len); 5539 stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len); 5540 stripe_end_offset = stripe_nr_end * map->stripe_len - 5541 (offset + *length); 5542 5543 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5544 if (op == REQ_OP_DISCARD) 5545 num_stripes = min_t(u64, map->num_stripes, 5546 stripe_nr_end - stripe_nr_orig); 5547 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5548 &stripe_index); 5549 if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD && 5550 op != REQ_GET_READ_MIRRORS) 5551 mirror_num = 1; 5552 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 5553 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD || 5554 op == REQ_GET_READ_MIRRORS) 5555 num_stripes = map->num_stripes; 5556 else if (mirror_num) 5557 stripe_index = mirror_num - 1; 5558 else { 5559 stripe_index = find_live_mirror(fs_info, map, 0, 5560 map->num_stripes, 5561 current->pid % map->num_stripes, 5562 dev_replace_is_ongoing); 5563 mirror_num = stripe_index + 1; 5564 } 5565 5566 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 5567 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD || 5568 op == REQ_GET_READ_MIRRORS) { 5569 num_stripes = map->num_stripes; 5570 } else if (mirror_num) { 5571 stripe_index = mirror_num - 1; 5572 } else { 5573 mirror_num = 1; 5574 } 5575 5576 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5577 u32 factor = map->num_stripes / map->sub_stripes; 5578 5579 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5580 stripe_index *= map->sub_stripes; 5581 5582 if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) 5583 num_stripes = map->sub_stripes; 5584 else if (op == REQ_OP_DISCARD) 5585 num_stripes = min_t(u64, map->sub_stripes * 5586 (stripe_nr_end - stripe_nr_orig), 5587 map->num_stripes); 5588 else if (mirror_num) 5589 stripe_index += mirror_num - 1; 5590 else { 5591 int old_stripe_index = stripe_index; 5592 stripe_index = find_live_mirror(fs_info, map, 5593 stripe_index, 5594 map->sub_stripes, stripe_index + 5595 current->pid % map->sub_stripes, 5596 dev_replace_is_ongoing); 5597 mirror_num = stripe_index - old_stripe_index + 1; 5598 } 5599 5600 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5601 if (need_raid_map && 5602 (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS || 5603 mirror_num > 1)) { 5604 /* push stripe_nr back to the start of the full stripe */ 5605 stripe_nr = div_u64(raid56_full_stripe_start, 5606 stripe_len * nr_data_stripes(map)); 5607 5608 /* RAID[56] write or recovery. Return all stripes */ 5609 num_stripes = map->num_stripes; 5610 max_errors = nr_parity_stripes(map); 5611 5612 *length = map->stripe_len; 5613 stripe_index = 0; 5614 stripe_offset = 0; 5615 } else { 5616 /* 5617 * Mirror #0 or #1 means the original data block. 5618 * Mirror #2 is RAID5 parity block. 5619 * Mirror #3 is RAID6 Q block. 5620 */ 5621 stripe_nr = div_u64_rem(stripe_nr, 5622 nr_data_stripes(map), &stripe_index); 5623 if (mirror_num > 1) 5624 stripe_index = nr_data_stripes(map) + 5625 mirror_num - 2; 5626 5627 /* We distribute the parity blocks across stripes */ 5628 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 5629 &stripe_index); 5630 if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD && 5631 op != REQ_GET_READ_MIRRORS) && mirror_num <= 1) 5632 mirror_num = 1; 5633 } 5634 } else { 5635 /* 5636 * after this, stripe_nr is the number of stripes on this 5637 * device we have to walk to find the data, and stripe_index is 5638 * the number of our device in the stripe array 5639 */ 5640 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5641 &stripe_index); 5642 mirror_num = stripe_index + 1; 5643 } 5644 if (stripe_index >= map->num_stripes) { 5645 btrfs_crit(fs_info, "stripe index math went horribly wrong, " 5646 "got stripe_index=%u, num_stripes=%u", 5647 stripe_index, map->num_stripes); 5648 ret = -EINVAL; 5649 goto out; 5650 } 5651 5652 num_alloc_stripes = num_stripes; 5653 if (dev_replace_is_ongoing) { 5654 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) 5655 num_alloc_stripes <<= 1; 5656 if (op == REQ_GET_READ_MIRRORS) 5657 num_alloc_stripes++; 5658 tgtdev_indexes = num_stripes; 5659 } 5660 5661 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 5662 if (!bbio) { 5663 ret = -ENOMEM; 5664 goto out; 5665 } 5666 if (dev_replace_is_ongoing) 5667 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 5668 5669 /* build raid_map */ 5670 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 5671 need_raid_map && 5672 ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) || 5673 mirror_num > 1)) { 5674 u64 tmp; 5675 unsigned rot; 5676 5677 bbio->raid_map = (u64 *)((void *)bbio->stripes + 5678 sizeof(struct btrfs_bio_stripe) * 5679 num_alloc_stripes + 5680 sizeof(int) * tgtdev_indexes); 5681 5682 /* Work out the disk rotation on this stripe-set */ 5683 div_u64_rem(stripe_nr, num_stripes, &rot); 5684 5685 /* Fill in the logical address of each stripe */ 5686 tmp = stripe_nr * nr_data_stripes(map); 5687 for (i = 0; i < nr_data_stripes(map); i++) 5688 bbio->raid_map[(i+rot) % num_stripes] = 5689 em->start + (tmp + i) * map->stripe_len; 5690 5691 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 5692 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5693 bbio->raid_map[(i+rot+1) % num_stripes] = 5694 RAID6_Q_STRIPE; 5695 } 5696 5697 if (op == REQ_OP_DISCARD) { 5698 u32 factor = 0; 5699 u32 sub_stripes = 0; 5700 u64 stripes_per_dev = 0; 5701 u32 remaining_stripes = 0; 5702 u32 last_stripe = 0; 5703 5704 if (map->type & 5705 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 5706 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5707 sub_stripes = 1; 5708 else 5709 sub_stripes = map->sub_stripes; 5710 5711 factor = map->num_stripes / sub_stripes; 5712 stripes_per_dev = div_u64_rem(stripe_nr_end - 5713 stripe_nr_orig, 5714 factor, 5715 &remaining_stripes); 5716 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5717 last_stripe *= sub_stripes; 5718 } 5719 5720 for (i = 0; i < num_stripes; i++) { 5721 bbio->stripes[i].physical = 5722 map->stripes[stripe_index].physical + 5723 stripe_offset + stripe_nr * map->stripe_len; 5724 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5725 5726 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5727 BTRFS_BLOCK_GROUP_RAID10)) { 5728 bbio->stripes[i].length = stripes_per_dev * 5729 map->stripe_len; 5730 5731 if (i / sub_stripes < remaining_stripes) 5732 bbio->stripes[i].length += 5733 map->stripe_len; 5734 5735 /* 5736 * Special for the first stripe and 5737 * the last stripe: 5738 * 5739 * |-------|...|-------| 5740 * |----------| 5741 * off end_off 5742 */ 5743 if (i < sub_stripes) 5744 bbio->stripes[i].length -= 5745 stripe_offset; 5746 5747 if (stripe_index >= last_stripe && 5748 stripe_index <= (last_stripe + 5749 sub_stripes - 1)) 5750 bbio->stripes[i].length -= 5751 stripe_end_offset; 5752 5753 if (i == sub_stripes - 1) 5754 stripe_offset = 0; 5755 } else 5756 bbio->stripes[i].length = *length; 5757 5758 stripe_index++; 5759 if (stripe_index == map->num_stripes) { 5760 /* This could only happen for RAID0/10 */ 5761 stripe_index = 0; 5762 stripe_nr++; 5763 } 5764 } 5765 } else { 5766 for (i = 0; i < num_stripes; i++) { 5767 bbio->stripes[i].physical = 5768 map->stripes[stripe_index].physical + 5769 stripe_offset + 5770 stripe_nr * map->stripe_len; 5771 bbio->stripes[i].dev = 5772 map->stripes[stripe_index].dev; 5773 stripe_index++; 5774 } 5775 } 5776 5777 if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) 5778 max_errors = btrfs_chunk_max_errors(map); 5779 5780 if (bbio->raid_map) 5781 sort_parity_stripes(bbio, num_stripes); 5782 5783 tgtdev_indexes = 0; 5784 if (dev_replace_is_ongoing && 5785 (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) && 5786 dev_replace->tgtdev != NULL) { 5787 int index_where_to_add; 5788 u64 srcdev_devid = dev_replace->srcdev->devid; 5789 5790 /* 5791 * duplicate the write operations while the dev replace 5792 * procedure is running. Since the copying of the old disk 5793 * to the new disk takes place at run time while the 5794 * filesystem is mounted writable, the regular write 5795 * operations to the old disk have to be duplicated to go 5796 * to the new disk as well. 5797 * Note that device->missing is handled by the caller, and 5798 * that the write to the old disk is already set up in the 5799 * stripes array. 5800 */ 5801 index_where_to_add = num_stripes; 5802 for (i = 0; i < num_stripes; i++) { 5803 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5804 /* write to new disk, too */ 5805 struct btrfs_bio_stripe *new = 5806 bbio->stripes + index_where_to_add; 5807 struct btrfs_bio_stripe *old = 5808 bbio->stripes + i; 5809 5810 new->physical = old->physical; 5811 new->length = old->length; 5812 new->dev = dev_replace->tgtdev; 5813 bbio->tgtdev_map[i] = index_where_to_add; 5814 index_where_to_add++; 5815 max_errors++; 5816 tgtdev_indexes++; 5817 } 5818 } 5819 num_stripes = index_where_to_add; 5820 } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) && 5821 dev_replace->tgtdev != NULL) { 5822 u64 srcdev_devid = dev_replace->srcdev->devid; 5823 int index_srcdev = 0; 5824 int found = 0; 5825 u64 physical_of_found = 0; 5826 5827 /* 5828 * During the dev-replace procedure, the target drive can 5829 * also be used to read data in case it is needed to repair 5830 * a corrupt block elsewhere. This is possible if the 5831 * requested area is left of the left cursor. In this area, 5832 * the target drive is a full copy of the source drive. 5833 */ 5834 for (i = 0; i < num_stripes; i++) { 5835 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5836 /* 5837 * In case of DUP, in order to keep it 5838 * simple, only add the mirror with the 5839 * lowest physical address 5840 */ 5841 if (found && 5842 physical_of_found <= 5843 bbio->stripes[i].physical) 5844 continue; 5845 index_srcdev = i; 5846 found = 1; 5847 physical_of_found = bbio->stripes[i].physical; 5848 } 5849 } 5850 if (found) { 5851 struct btrfs_bio_stripe *tgtdev_stripe = 5852 bbio->stripes + num_stripes; 5853 5854 tgtdev_stripe->physical = physical_of_found; 5855 tgtdev_stripe->length = 5856 bbio->stripes[index_srcdev].length; 5857 tgtdev_stripe->dev = dev_replace->tgtdev; 5858 bbio->tgtdev_map[index_srcdev] = num_stripes; 5859 5860 tgtdev_indexes++; 5861 num_stripes++; 5862 } 5863 } 5864 5865 *bbio_ret = bbio; 5866 bbio->map_type = map->type; 5867 bbio->num_stripes = num_stripes; 5868 bbio->max_errors = max_errors; 5869 bbio->mirror_num = mirror_num; 5870 bbio->num_tgtdevs = tgtdev_indexes; 5871 5872 /* 5873 * this is the case that REQ_READ && dev_replace_is_ongoing && 5874 * mirror_num == num_stripes + 1 && dev_replace target drive is 5875 * available as a mirror 5876 */ 5877 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 5878 WARN_ON(num_stripes > 1); 5879 bbio->stripes[0].dev = dev_replace->tgtdev; 5880 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5881 bbio->mirror_num = map->num_stripes + 1; 5882 } 5883 out: 5884 if (dev_replace_is_ongoing) { 5885 btrfs_dev_replace_clear_lock_blocking(dev_replace); 5886 btrfs_dev_replace_unlock(dev_replace, 0); 5887 } 5888 free_extent_map(em); 5889 return ret; 5890 } 5891 5892 int btrfs_map_block(struct btrfs_fs_info *fs_info, int op, 5893 u64 logical, u64 *length, 5894 struct btrfs_bio **bbio_ret, int mirror_num) 5895 { 5896 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 5897 mirror_num, 0); 5898 } 5899 5900 /* For Scrub/replace */ 5901 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op, 5902 u64 logical, u64 *length, 5903 struct btrfs_bio **bbio_ret, int mirror_num, 5904 int need_raid_map) 5905 { 5906 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 5907 mirror_num, need_raid_map); 5908 } 5909 5910 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5911 u64 chunk_start, u64 physical, u64 devid, 5912 u64 **logical, int *naddrs, int *stripe_len) 5913 { 5914 struct extent_map_tree *em_tree = &map_tree->map_tree; 5915 struct extent_map *em; 5916 struct map_lookup *map; 5917 u64 *buf; 5918 u64 bytenr; 5919 u64 length; 5920 u64 stripe_nr; 5921 u64 rmap_len; 5922 int i, j, nr = 0; 5923 5924 read_lock(&em_tree->lock); 5925 em = lookup_extent_mapping(em_tree, chunk_start, 1); 5926 read_unlock(&em_tree->lock); 5927 5928 if (!em) { 5929 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n", 5930 chunk_start); 5931 return -EIO; 5932 } 5933 5934 if (em->start != chunk_start) { 5935 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n", 5936 em->start, chunk_start); 5937 free_extent_map(em); 5938 return -EIO; 5939 } 5940 map = em->map_lookup; 5941 5942 length = em->len; 5943 rmap_len = map->stripe_len; 5944 5945 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5946 length = div_u64(length, map->num_stripes / map->sub_stripes); 5947 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5948 length = div_u64(length, map->num_stripes); 5949 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5950 length = div_u64(length, nr_data_stripes(map)); 5951 rmap_len = map->stripe_len * nr_data_stripes(map); 5952 } 5953 5954 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 5955 BUG_ON(!buf); /* -ENOMEM */ 5956 5957 for (i = 0; i < map->num_stripes; i++) { 5958 if (devid && map->stripes[i].dev->devid != devid) 5959 continue; 5960 if (map->stripes[i].physical > physical || 5961 map->stripes[i].physical + length <= physical) 5962 continue; 5963 5964 stripe_nr = physical - map->stripes[i].physical; 5965 stripe_nr = div_u64(stripe_nr, map->stripe_len); 5966 5967 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5968 stripe_nr = stripe_nr * map->num_stripes + i; 5969 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 5970 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5971 stripe_nr = stripe_nr * map->num_stripes + i; 5972 } /* else if RAID[56], multiply by nr_data_stripes(). 5973 * Alternatively, just use rmap_len below instead of 5974 * map->stripe_len */ 5975 5976 bytenr = chunk_start + stripe_nr * rmap_len; 5977 WARN_ON(nr >= map->num_stripes); 5978 for (j = 0; j < nr; j++) { 5979 if (buf[j] == bytenr) 5980 break; 5981 } 5982 if (j == nr) { 5983 WARN_ON(nr >= map->num_stripes); 5984 buf[nr++] = bytenr; 5985 } 5986 } 5987 5988 *logical = buf; 5989 *naddrs = nr; 5990 *stripe_len = rmap_len; 5991 5992 free_extent_map(em); 5993 return 0; 5994 } 5995 5996 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 5997 { 5998 bio->bi_private = bbio->private; 5999 bio->bi_end_io = bbio->end_io; 6000 bio_endio(bio); 6001 6002 btrfs_put_bbio(bbio); 6003 } 6004 6005 static void btrfs_end_bio(struct bio *bio) 6006 { 6007 struct btrfs_bio *bbio = bio->bi_private; 6008 int is_orig_bio = 0; 6009 6010 if (bio->bi_error) { 6011 atomic_inc(&bbio->error); 6012 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) { 6013 unsigned int stripe_index = 6014 btrfs_io_bio(bio)->stripe_index; 6015 struct btrfs_device *dev; 6016 6017 BUG_ON(stripe_index >= bbio->num_stripes); 6018 dev = bbio->stripes[stripe_index].dev; 6019 if (dev->bdev) { 6020 if (bio_op(bio) == REQ_OP_WRITE) 6021 btrfs_dev_stat_inc(dev, 6022 BTRFS_DEV_STAT_WRITE_ERRS); 6023 else 6024 btrfs_dev_stat_inc(dev, 6025 BTRFS_DEV_STAT_READ_ERRS); 6026 if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH) 6027 btrfs_dev_stat_inc(dev, 6028 BTRFS_DEV_STAT_FLUSH_ERRS); 6029 btrfs_dev_stat_print_on_error(dev); 6030 } 6031 } 6032 } 6033 6034 if (bio == bbio->orig_bio) 6035 is_orig_bio = 1; 6036 6037 btrfs_bio_counter_dec(bbio->fs_info); 6038 6039 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6040 if (!is_orig_bio) { 6041 bio_put(bio); 6042 bio = bbio->orig_bio; 6043 } 6044 6045 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6046 /* only send an error to the higher layers if it is 6047 * beyond the tolerance of the btrfs bio 6048 */ 6049 if (atomic_read(&bbio->error) > bbio->max_errors) { 6050 bio->bi_error = -EIO; 6051 } else { 6052 /* 6053 * this bio is actually up to date, we didn't 6054 * go over the max number of errors 6055 */ 6056 bio->bi_error = 0; 6057 } 6058 6059 btrfs_end_bbio(bbio, bio); 6060 } else if (!is_orig_bio) { 6061 bio_put(bio); 6062 } 6063 } 6064 6065 /* 6066 * see run_scheduled_bios for a description of why bios are collected for 6067 * async submit. 6068 * 6069 * This will add one bio to the pending list for a device and make sure 6070 * the work struct is scheduled. 6071 */ 6072 static noinline void btrfs_schedule_bio(struct btrfs_root *root, 6073 struct btrfs_device *device, 6074 struct bio *bio) 6075 { 6076 int should_queue = 1; 6077 struct btrfs_pending_bios *pending_bios; 6078 6079 if (device->missing || !device->bdev) { 6080 bio_io_error(bio); 6081 return; 6082 } 6083 6084 /* don't bother with additional async steps for reads, right now */ 6085 if (bio_op(bio) == REQ_OP_READ) { 6086 bio_get(bio); 6087 btrfsic_submit_bio(bio); 6088 bio_put(bio); 6089 return; 6090 } 6091 6092 /* 6093 * nr_async_bios allows us to reliably return congestion to the 6094 * higher layers. Otherwise, the async bio makes it appear we have 6095 * made progress against dirty pages when we've really just put it 6096 * on a queue for later 6097 */ 6098 atomic_inc(&root->fs_info->nr_async_bios); 6099 WARN_ON(bio->bi_next); 6100 bio->bi_next = NULL; 6101 6102 spin_lock(&device->io_lock); 6103 if (bio->bi_opf & REQ_SYNC) 6104 pending_bios = &device->pending_sync_bios; 6105 else 6106 pending_bios = &device->pending_bios; 6107 6108 if (pending_bios->tail) 6109 pending_bios->tail->bi_next = bio; 6110 6111 pending_bios->tail = bio; 6112 if (!pending_bios->head) 6113 pending_bios->head = bio; 6114 if (device->running_pending) 6115 should_queue = 0; 6116 6117 spin_unlock(&device->io_lock); 6118 6119 if (should_queue) 6120 btrfs_queue_work(root->fs_info->submit_workers, 6121 &device->work); 6122 } 6123 6124 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, 6125 struct bio *bio, u64 physical, int dev_nr, 6126 int async) 6127 { 6128 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 6129 6130 bio->bi_private = bbio; 6131 btrfs_io_bio(bio)->stripe_index = dev_nr; 6132 bio->bi_end_io = btrfs_end_bio; 6133 bio->bi_iter.bi_sector = physical >> 9; 6134 #ifdef DEBUG 6135 { 6136 struct rcu_string *name; 6137 6138 rcu_read_lock(); 6139 name = rcu_dereference(dev->name); 6140 pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu " 6141 "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf, 6142 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, 6143 name->str, dev->devid, bio->bi_iter.bi_size); 6144 rcu_read_unlock(); 6145 } 6146 #endif 6147 bio->bi_bdev = dev->bdev; 6148 6149 btrfs_bio_counter_inc_noblocked(root->fs_info); 6150 6151 if (async) 6152 btrfs_schedule_bio(root, dev, bio); 6153 else 6154 btrfsic_submit_bio(bio); 6155 } 6156 6157 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6158 { 6159 atomic_inc(&bbio->error); 6160 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6161 /* Should be the original bio. */ 6162 WARN_ON(bio != bbio->orig_bio); 6163 6164 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6165 bio->bi_iter.bi_sector = logical >> 9; 6166 bio->bi_error = -EIO; 6167 btrfs_end_bbio(bbio, bio); 6168 } 6169 } 6170 6171 int btrfs_map_bio(struct btrfs_root *root, struct bio *bio, 6172 int mirror_num, int async_submit) 6173 { 6174 struct btrfs_device *dev; 6175 struct bio *first_bio = bio; 6176 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 6177 u64 length = 0; 6178 u64 map_length; 6179 int ret; 6180 int dev_nr; 6181 int total_devs; 6182 struct btrfs_bio *bbio = NULL; 6183 6184 length = bio->bi_iter.bi_size; 6185 map_length = length; 6186 6187 btrfs_bio_counter_inc_blocked(root->fs_info); 6188 ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical, 6189 &map_length, &bbio, mirror_num, 1); 6190 if (ret) { 6191 btrfs_bio_counter_dec(root->fs_info); 6192 return ret; 6193 } 6194 6195 total_devs = bbio->num_stripes; 6196 bbio->orig_bio = first_bio; 6197 bbio->private = first_bio->bi_private; 6198 bbio->end_io = first_bio->bi_end_io; 6199 bbio->fs_info = root->fs_info; 6200 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6201 6202 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6203 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { 6204 /* In this case, map_length has been set to the length of 6205 a single stripe; not the whole write */ 6206 if (bio_op(bio) == REQ_OP_WRITE) { 6207 ret = raid56_parity_write(root, bio, bbio, map_length); 6208 } else { 6209 ret = raid56_parity_recover(root, bio, bbio, map_length, 6210 mirror_num, 1); 6211 } 6212 6213 btrfs_bio_counter_dec(root->fs_info); 6214 return ret; 6215 } 6216 6217 if (map_length < length) { 6218 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu", 6219 logical, length, map_length); 6220 BUG(); 6221 } 6222 6223 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6224 dev = bbio->stripes[dev_nr].dev; 6225 if (!dev || !dev->bdev || 6226 (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { 6227 bbio_error(bbio, first_bio, logical); 6228 continue; 6229 } 6230 6231 if (dev_nr < total_devs - 1) { 6232 bio = btrfs_bio_clone(first_bio, GFP_NOFS); 6233 BUG_ON(!bio); /* -ENOMEM */ 6234 } else 6235 bio = first_bio; 6236 6237 submit_stripe_bio(root, bbio, bio, 6238 bbio->stripes[dev_nr].physical, dev_nr, 6239 async_submit); 6240 } 6241 btrfs_bio_counter_dec(root->fs_info); 6242 return 0; 6243 } 6244 6245 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 6246 u8 *uuid, u8 *fsid) 6247 { 6248 struct btrfs_device *device; 6249 struct btrfs_fs_devices *cur_devices; 6250 6251 cur_devices = fs_info->fs_devices; 6252 while (cur_devices) { 6253 if (!fsid || 6254 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 6255 device = __find_device(&cur_devices->devices, 6256 devid, uuid); 6257 if (device) 6258 return device; 6259 } 6260 cur_devices = cur_devices->seed; 6261 } 6262 return NULL; 6263 } 6264 6265 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 6266 struct btrfs_fs_devices *fs_devices, 6267 u64 devid, u8 *dev_uuid) 6268 { 6269 struct btrfs_device *device; 6270 6271 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6272 if (IS_ERR(device)) 6273 return NULL; 6274 6275 list_add(&device->dev_list, &fs_devices->devices); 6276 device->fs_devices = fs_devices; 6277 fs_devices->num_devices++; 6278 6279 device->missing = 1; 6280 fs_devices->missing_devices++; 6281 6282 return device; 6283 } 6284 6285 /** 6286 * btrfs_alloc_device - allocate struct btrfs_device 6287 * @fs_info: used only for generating a new devid, can be NULL if 6288 * devid is provided (i.e. @devid != NULL). 6289 * @devid: a pointer to devid for this device. If NULL a new devid 6290 * is generated. 6291 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6292 * is generated. 6293 * 6294 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6295 * on error. Returned struct is not linked onto any lists and can be 6296 * destroyed with kfree() right away. 6297 */ 6298 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6299 const u64 *devid, 6300 const u8 *uuid) 6301 { 6302 struct btrfs_device *dev; 6303 u64 tmp; 6304 6305 if (WARN_ON(!devid && !fs_info)) 6306 return ERR_PTR(-EINVAL); 6307 6308 dev = __alloc_device(); 6309 if (IS_ERR(dev)) 6310 return dev; 6311 6312 if (devid) 6313 tmp = *devid; 6314 else { 6315 int ret; 6316 6317 ret = find_next_devid(fs_info, &tmp); 6318 if (ret) { 6319 kfree(dev); 6320 return ERR_PTR(ret); 6321 } 6322 } 6323 dev->devid = tmp; 6324 6325 if (uuid) 6326 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6327 else 6328 generate_random_uuid(dev->uuid); 6329 6330 btrfs_init_work(&dev->work, btrfs_submit_helper, 6331 pending_bios_fn, NULL, NULL); 6332 6333 return dev; 6334 } 6335 6336 /* Return -EIO if any error, otherwise return 0. */ 6337 static int btrfs_check_chunk_valid(struct btrfs_root *root, 6338 struct extent_buffer *leaf, 6339 struct btrfs_chunk *chunk, u64 logical) 6340 { 6341 u64 length; 6342 u64 stripe_len; 6343 u16 num_stripes; 6344 u16 sub_stripes; 6345 u64 type; 6346 6347 length = btrfs_chunk_length(leaf, chunk); 6348 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6349 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6350 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6351 type = btrfs_chunk_type(leaf, chunk); 6352 6353 if (!num_stripes) { 6354 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", 6355 num_stripes); 6356 return -EIO; 6357 } 6358 if (!IS_ALIGNED(logical, root->sectorsize)) { 6359 btrfs_err(root->fs_info, 6360 "invalid chunk logical %llu", logical); 6361 return -EIO; 6362 } 6363 if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) { 6364 btrfs_err(root->fs_info, "invalid chunk sectorsize %u", 6365 btrfs_chunk_sector_size(leaf, chunk)); 6366 return -EIO; 6367 } 6368 if (!length || !IS_ALIGNED(length, root->sectorsize)) { 6369 btrfs_err(root->fs_info, 6370 "invalid chunk length %llu", length); 6371 return -EIO; 6372 } 6373 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { 6374 btrfs_err(root->fs_info, "invalid chunk stripe length: %llu", 6375 stripe_len); 6376 return -EIO; 6377 } 6378 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6379 type) { 6380 btrfs_err(root->fs_info, "unrecognized chunk type: %llu", 6381 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 6382 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6383 btrfs_chunk_type(leaf, chunk)); 6384 return -EIO; 6385 } 6386 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || 6387 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || 6388 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || 6389 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || 6390 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || 6391 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && 6392 num_stripes != 1)) { 6393 btrfs_err(root->fs_info, 6394 "invalid num_stripes:sub_stripes %u:%u for profile %llu", 6395 num_stripes, sub_stripes, 6396 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 6397 return -EIO; 6398 } 6399 6400 return 0; 6401 } 6402 6403 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 6404 struct extent_buffer *leaf, 6405 struct btrfs_chunk *chunk) 6406 { 6407 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 6408 struct map_lookup *map; 6409 struct extent_map *em; 6410 u64 logical; 6411 u64 length; 6412 u64 stripe_len; 6413 u64 devid; 6414 u8 uuid[BTRFS_UUID_SIZE]; 6415 int num_stripes; 6416 int ret; 6417 int i; 6418 6419 logical = key->offset; 6420 length = btrfs_chunk_length(leaf, chunk); 6421 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6422 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6423 6424 ret = btrfs_check_chunk_valid(root, leaf, chunk, logical); 6425 if (ret) 6426 return ret; 6427 6428 read_lock(&map_tree->map_tree.lock); 6429 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6430 read_unlock(&map_tree->map_tree.lock); 6431 6432 /* already mapped? */ 6433 if (em && em->start <= logical && em->start + em->len > logical) { 6434 free_extent_map(em); 6435 return 0; 6436 } else if (em) { 6437 free_extent_map(em); 6438 } 6439 6440 em = alloc_extent_map(); 6441 if (!em) 6442 return -ENOMEM; 6443 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6444 if (!map) { 6445 free_extent_map(em); 6446 return -ENOMEM; 6447 } 6448 6449 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6450 em->map_lookup = map; 6451 em->start = logical; 6452 em->len = length; 6453 em->orig_start = 0; 6454 em->block_start = 0; 6455 em->block_len = em->len; 6456 6457 map->num_stripes = num_stripes; 6458 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6459 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6460 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 6461 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6462 map->type = btrfs_chunk_type(leaf, chunk); 6463 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6464 for (i = 0; i < num_stripes; i++) { 6465 map->stripes[i].physical = 6466 btrfs_stripe_offset_nr(leaf, chunk, i); 6467 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6468 read_extent_buffer(leaf, uuid, (unsigned long) 6469 btrfs_stripe_dev_uuid_nr(chunk, i), 6470 BTRFS_UUID_SIZE); 6471 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid, 6472 uuid, NULL); 6473 if (!map->stripes[i].dev && 6474 !btrfs_test_opt(root->fs_info, DEGRADED)) { 6475 free_extent_map(em); 6476 return -EIO; 6477 } 6478 if (!map->stripes[i].dev) { 6479 map->stripes[i].dev = 6480 add_missing_dev(root, root->fs_info->fs_devices, 6481 devid, uuid); 6482 if (!map->stripes[i].dev) { 6483 free_extent_map(em); 6484 return -EIO; 6485 } 6486 btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing", 6487 devid, uuid); 6488 } 6489 map->stripes[i].dev->in_fs_metadata = 1; 6490 } 6491 6492 write_lock(&map_tree->map_tree.lock); 6493 ret = add_extent_mapping(&map_tree->map_tree, em, 0); 6494 write_unlock(&map_tree->map_tree.lock); 6495 BUG_ON(ret); /* Tree corruption */ 6496 free_extent_map(em); 6497 6498 return 0; 6499 } 6500 6501 static void fill_device_from_item(struct extent_buffer *leaf, 6502 struct btrfs_dev_item *dev_item, 6503 struct btrfs_device *device) 6504 { 6505 unsigned long ptr; 6506 6507 device->devid = btrfs_device_id(leaf, dev_item); 6508 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6509 device->total_bytes = device->disk_total_bytes; 6510 device->commit_total_bytes = device->disk_total_bytes; 6511 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6512 device->commit_bytes_used = device->bytes_used; 6513 device->type = btrfs_device_type(leaf, dev_item); 6514 device->io_align = btrfs_device_io_align(leaf, dev_item); 6515 device->io_width = btrfs_device_io_width(leaf, dev_item); 6516 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6517 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6518 device->is_tgtdev_for_dev_replace = 0; 6519 6520 ptr = btrfs_device_uuid(dev_item); 6521 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6522 } 6523 6524 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root, 6525 u8 *fsid) 6526 { 6527 struct btrfs_fs_devices *fs_devices; 6528 int ret; 6529 6530 BUG_ON(!mutex_is_locked(&uuid_mutex)); 6531 6532 fs_devices = root->fs_info->fs_devices->seed; 6533 while (fs_devices) { 6534 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) 6535 return fs_devices; 6536 6537 fs_devices = fs_devices->seed; 6538 } 6539 6540 fs_devices = find_fsid(fsid); 6541 if (!fs_devices) { 6542 if (!btrfs_test_opt(root->fs_info, DEGRADED)) 6543 return ERR_PTR(-ENOENT); 6544 6545 fs_devices = alloc_fs_devices(fsid); 6546 if (IS_ERR(fs_devices)) 6547 return fs_devices; 6548 6549 fs_devices->seeding = 1; 6550 fs_devices->opened = 1; 6551 return fs_devices; 6552 } 6553 6554 fs_devices = clone_fs_devices(fs_devices); 6555 if (IS_ERR(fs_devices)) 6556 return fs_devices; 6557 6558 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 6559 root->fs_info->bdev_holder); 6560 if (ret) { 6561 free_fs_devices(fs_devices); 6562 fs_devices = ERR_PTR(ret); 6563 goto out; 6564 } 6565 6566 if (!fs_devices->seeding) { 6567 __btrfs_close_devices(fs_devices); 6568 free_fs_devices(fs_devices); 6569 fs_devices = ERR_PTR(-EINVAL); 6570 goto out; 6571 } 6572 6573 fs_devices->seed = root->fs_info->fs_devices->seed; 6574 root->fs_info->fs_devices->seed = fs_devices; 6575 out: 6576 return fs_devices; 6577 } 6578 6579 static int read_one_dev(struct btrfs_root *root, 6580 struct extent_buffer *leaf, 6581 struct btrfs_dev_item *dev_item) 6582 { 6583 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6584 struct btrfs_device *device; 6585 u64 devid; 6586 int ret; 6587 u8 fs_uuid[BTRFS_UUID_SIZE]; 6588 u8 dev_uuid[BTRFS_UUID_SIZE]; 6589 6590 devid = btrfs_device_id(leaf, dev_item); 6591 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6592 BTRFS_UUID_SIZE); 6593 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6594 BTRFS_UUID_SIZE); 6595 6596 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 6597 fs_devices = open_seed_devices(root, fs_uuid); 6598 if (IS_ERR(fs_devices)) 6599 return PTR_ERR(fs_devices); 6600 } 6601 6602 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid); 6603 if (!device) { 6604 if (!btrfs_test_opt(root->fs_info, DEGRADED)) 6605 return -EIO; 6606 6607 device = add_missing_dev(root, fs_devices, devid, dev_uuid); 6608 if (!device) 6609 return -ENOMEM; 6610 btrfs_warn(root->fs_info, "devid %llu uuid %pU missing", 6611 devid, dev_uuid); 6612 } else { 6613 if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED)) 6614 return -EIO; 6615 6616 if(!device->bdev && !device->missing) { 6617 /* 6618 * this happens when a device that was properly setup 6619 * in the device info lists suddenly goes bad. 6620 * device->bdev is NULL, and so we have to set 6621 * device->missing to one here 6622 */ 6623 device->fs_devices->missing_devices++; 6624 device->missing = 1; 6625 } 6626 6627 /* Move the device to its own fs_devices */ 6628 if (device->fs_devices != fs_devices) { 6629 ASSERT(device->missing); 6630 6631 list_move(&device->dev_list, &fs_devices->devices); 6632 device->fs_devices->num_devices--; 6633 fs_devices->num_devices++; 6634 6635 device->fs_devices->missing_devices--; 6636 fs_devices->missing_devices++; 6637 6638 device->fs_devices = fs_devices; 6639 } 6640 } 6641 6642 if (device->fs_devices != root->fs_info->fs_devices) { 6643 BUG_ON(device->writeable); 6644 if (device->generation != 6645 btrfs_device_generation(leaf, dev_item)) 6646 return -EINVAL; 6647 } 6648 6649 fill_device_from_item(leaf, dev_item, device); 6650 device->in_fs_metadata = 1; 6651 if (device->writeable && !device->is_tgtdev_for_dev_replace) { 6652 device->fs_devices->total_rw_bytes += device->total_bytes; 6653 spin_lock(&root->fs_info->free_chunk_lock); 6654 root->fs_info->free_chunk_space += device->total_bytes - 6655 device->bytes_used; 6656 spin_unlock(&root->fs_info->free_chunk_lock); 6657 } 6658 ret = 0; 6659 return ret; 6660 } 6661 6662 int btrfs_read_sys_array(struct btrfs_root *root) 6663 { 6664 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 6665 struct extent_buffer *sb; 6666 struct btrfs_disk_key *disk_key; 6667 struct btrfs_chunk *chunk; 6668 u8 *array_ptr; 6669 unsigned long sb_array_offset; 6670 int ret = 0; 6671 u32 num_stripes; 6672 u32 array_size; 6673 u32 len = 0; 6674 u32 cur_offset; 6675 u64 type; 6676 struct btrfs_key key; 6677 6678 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); 6679 /* 6680 * This will create extent buffer of nodesize, superblock size is 6681 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6682 * overallocate but we can keep it as-is, only the first page is used. 6683 */ 6684 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); 6685 if (IS_ERR(sb)) 6686 return PTR_ERR(sb); 6687 set_extent_buffer_uptodate(sb); 6688 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6689 /* 6690 * The sb extent buffer is artificial and just used to read the system array. 6691 * set_extent_buffer_uptodate() call does not properly mark all it's 6692 * pages up-to-date when the page is larger: extent does not cover the 6693 * whole page and consequently check_page_uptodate does not find all 6694 * the page's extents up-to-date (the hole beyond sb), 6695 * write_extent_buffer then triggers a WARN_ON. 6696 * 6697 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6698 * but sb spans only this function. Add an explicit SetPageUptodate call 6699 * to silence the warning eg. on PowerPC 64. 6700 */ 6701 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 6702 SetPageUptodate(sb->pages[0]); 6703 6704 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6705 array_size = btrfs_super_sys_array_size(super_copy); 6706 6707 array_ptr = super_copy->sys_chunk_array; 6708 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6709 cur_offset = 0; 6710 6711 while (cur_offset < array_size) { 6712 disk_key = (struct btrfs_disk_key *)array_ptr; 6713 len = sizeof(*disk_key); 6714 if (cur_offset + len > array_size) 6715 goto out_short_read; 6716 6717 btrfs_disk_key_to_cpu(&key, disk_key); 6718 6719 array_ptr += len; 6720 sb_array_offset += len; 6721 cur_offset += len; 6722 6723 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6724 chunk = (struct btrfs_chunk *)sb_array_offset; 6725 /* 6726 * At least one btrfs_chunk with one stripe must be 6727 * present, exact stripe count check comes afterwards 6728 */ 6729 len = btrfs_chunk_item_size(1); 6730 if (cur_offset + len > array_size) 6731 goto out_short_read; 6732 6733 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6734 if (!num_stripes) { 6735 printk(KERN_ERR 6736 "BTRFS: invalid number of stripes %u in sys_array at offset %u\n", 6737 num_stripes, cur_offset); 6738 ret = -EIO; 6739 break; 6740 } 6741 6742 type = btrfs_chunk_type(sb, chunk); 6743 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 6744 btrfs_err(root->fs_info, 6745 "invalid chunk type %llu in sys_array at offset %u", 6746 type, cur_offset); 6747 ret = -EIO; 6748 break; 6749 } 6750 6751 len = btrfs_chunk_item_size(num_stripes); 6752 if (cur_offset + len > array_size) 6753 goto out_short_read; 6754 6755 ret = read_one_chunk(root, &key, sb, chunk); 6756 if (ret) 6757 break; 6758 } else { 6759 printk(KERN_ERR 6760 "BTRFS: unexpected item type %u in sys_array at offset %u\n", 6761 (u32)key.type, cur_offset); 6762 ret = -EIO; 6763 break; 6764 } 6765 array_ptr += len; 6766 sb_array_offset += len; 6767 cur_offset += len; 6768 } 6769 clear_extent_buffer_uptodate(sb); 6770 free_extent_buffer_stale(sb); 6771 return ret; 6772 6773 out_short_read: 6774 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6775 len, cur_offset); 6776 clear_extent_buffer_uptodate(sb); 6777 free_extent_buffer_stale(sb); 6778 return -EIO; 6779 } 6780 6781 int btrfs_read_chunk_tree(struct btrfs_root *root) 6782 { 6783 struct btrfs_path *path; 6784 struct extent_buffer *leaf; 6785 struct btrfs_key key; 6786 struct btrfs_key found_key; 6787 int ret; 6788 int slot; 6789 u64 total_dev = 0; 6790 6791 root = root->fs_info->chunk_root; 6792 6793 path = btrfs_alloc_path(); 6794 if (!path) 6795 return -ENOMEM; 6796 6797 mutex_lock(&uuid_mutex); 6798 lock_chunks(root); 6799 6800 /* 6801 * Read all device items, and then all the chunk items. All 6802 * device items are found before any chunk item (their object id 6803 * is smaller than the lowest possible object id for a chunk 6804 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 6805 */ 6806 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 6807 key.offset = 0; 6808 key.type = 0; 6809 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6810 if (ret < 0) 6811 goto error; 6812 while (1) { 6813 leaf = path->nodes[0]; 6814 slot = path->slots[0]; 6815 if (slot >= btrfs_header_nritems(leaf)) { 6816 ret = btrfs_next_leaf(root, path); 6817 if (ret == 0) 6818 continue; 6819 if (ret < 0) 6820 goto error; 6821 break; 6822 } 6823 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6824 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 6825 struct btrfs_dev_item *dev_item; 6826 dev_item = btrfs_item_ptr(leaf, slot, 6827 struct btrfs_dev_item); 6828 ret = read_one_dev(root, leaf, dev_item); 6829 if (ret) 6830 goto error; 6831 total_dev++; 6832 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6833 struct btrfs_chunk *chunk; 6834 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6835 ret = read_one_chunk(root, &found_key, leaf, chunk); 6836 if (ret) 6837 goto error; 6838 } 6839 path->slots[0]++; 6840 } 6841 6842 /* 6843 * After loading chunk tree, we've got all device information, 6844 * do another round of validation checks. 6845 */ 6846 if (total_dev != root->fs_info->fs_devices->total_devices) { 6847 btrfs_err(root->fs_info, 6848 "super_num_devices %llu mismatch with num_devices %llu found here", 6849 btrfs_super_num_devices(root->fs_info->super_copy), 6850 total_dev); 6851 ret = -EINVAL; 6852 goto error; 6853 } 6854 if (btrfs_super_total_bytes(root->fs_info->super_copy) < 6855 root->fs_info->fs_devices->total_rw_bytes) { 6856 btrfs_err(root->fs_info, 6857 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 6858 btrfs_super_total_bytes(root->fs_info->super_copy), 6859 root->fs_info->fs_devices->total_rw_bytes); 6860 ret = -EINVAL; 6861 goto error; 6862 } 6863 ret = 0; 6864 error: 6865 unlock_chunks(root); 6866 mutex_unlock(&uuid_mutex); 6867 6868 btrfs_free_path(path); 6869 return ret; 6870 } 6871 6872 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 6873 { 6874 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6875 struct btrfs_device *device; 6876 6877 while (fs_devices) { 6878 mutex_lock(&fs_devices->device_list_mutex); 6879 list_for_each_entry(device, &fs_devices->devices, dev_list) 6880 device->dev_root = fs_info->dev_root; 6881 mutex_unlock(&fs_devices->device_list_mutex); 6882 6883 fs_devices = fs_devices->seed; 6884 } 6885 } 6886 6887 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 6888 { 6889 int i; 6890 6891 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6892 btrfs_dev_stat_reset(dev, i); 6893 } 6894 6895 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 6896 { 6897 struct btrfs_key key; 6898 struct btrfs_key found_key; 6899 struct btrfs_root *dev_root = fs_info->dev_root; 6900 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6901 struct extent_buffer *eb; 6902 int slot; 6903 int ret = 0; 6904 struct btrfs_device *device; 6905 struct btrfs_path *path = NULL; 6906 int i; 6907 6908 path = btrfs_alloc_path(); 6909 if (!path) { 6910 ret = -ENOMEM; 6911 goto out; 6912 } 6913 6914 mutex_lock(&fs_devices->device_list_mutex); 6915 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6916 int item_size; 6917 struct btrfs_dev_stats_item *ptr; 6918 6919 key.objectid = BTRFS_DEV_STATS_OBJECTID; 6920 key.type = BTRFS_PERSISTENT_ITEM_KEY; 6921 key.offset = device->devid; 6922 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 6923 if (ret) { 6924 __btrfs_reset_dev_stats(device); 6925 device->dev_stats_valid = 1; 6926 btrfs_release_path(path); 6927 continue; 6928 } 6929 slot = path->slots[0]; 6930 eb = path->nodes[0]; 6931 btrfs_item_key_to_cpu(eb, &found_key, slot); 6932 item_size = btrfs_item_size_nr(eb, slot); 6933 6934 ptr = btrfs_item_ptr(eb, slot, 6935 struct btrfs_dev_stats_item); 6936 6937 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6938 if (item_size >= (1 + i) * sizeof(__le64)) 6939 btrfs_dev_stat_set(device, i, 6940 btrfs_dev_stats_value(eb, ptr, i)); 6941 else 6942 btrfs_dev_stat_reset(device, i); 6943 } 6944 6945 device->dev_stats_valid = 1; 6946 btrfs_dev_stat_print_on_load(device); 6947 btrfs_release_path(path); 6948 } 6949 mutex_unlock(&fs_devices->device_list_mutex); 6950 6951 out: 6952 btrfs_free_path(path); 6953 return ret < 0 ? ret : 0; 6954 } 6955 6956 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 6957 struct btrfs_root *dev_root, 6958 struct btrfs_device *device) 6959 { 6960 struct btrfs_path *path; 6961 struct btrfs_key key; 6962 struct extent_buffer *eb; 6963 struct btrfs_dev_stats_item *ptr; 6964 int ret; 6965 int i; 6966 6967 key.objectid = BTRFS_DEV_STATS_OBJECTID; 6968 key.type = BTRFS_PERSISTENT_ITEM_KEY; 6969 key.offset = device->devid; 6970 6971 path = btrfs_alloc_path(); 6972 BUG_ON(!path); 6973 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 6974 if (ret < 0) { 6975 btrfs_warn_in_rcu(dev_root->fs_info, 6976 "error %d while searching for dev_stats item for device %s", 6977 ret, rcu_str_deref(device->name)); 6978 goto out; 6979 } 6980 6981 if (ret == 0 && 6982 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 6983 /* need to delete old one and insert a new one */ 6984 ret = btrfs_del_item(trans, dev_root, path); 6985 if (ret != 0) { 6986 btrfs_warn_in_rcu(dev_root->fs_info, 6987 "delete too small dev_stats item for device %s failed %d", 6988 rcu_str_deref(device->name), ret); 6989 goto out; 6990 } 6991 ret = 1; 6992 } 6993 6994 if (ret == 1) { 6995 /* need to insert a new item */ 6996 btrfs_release_path(path); 6997 ret = btrfs_insert_empty_item(trans, dev_root, path, 6998 &key, sizeof(*ptr)); 6999 if (ret < 0) { 7000 btrfs_warn_in_rcu(dev_root->fs_info, 7001 "insert dev_stats item for device %s failed %d", 7002 rcu_str_deref(device->name), ret); 7003 goto out; 7004 } 7005 } 7006 7007 eb = path->nodes[0]; 7008 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7009 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7010 btrfs_set_dev_stats_value(eb, ptr, i, 7011 btrfs_dev_stat_read(device, i)); 7012 btrfs_mark_buffer_dirty(eb); 7013 7014 out: 7015 btrfs_free_path(path); 7016 return ret; 7017 } 7018 7019 /* 7020 * called from commit_transaction. Writes all changed device stats to disk. 7021 */ 7022 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 7023 struct btrfs_fs_info *fs_info) 7024 { 7025 struct btrfs_root *dev_root = fs_info->dev_root; 7026 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7027 struct btrfs_device *device; 7028 int stats_cnt; 7029 int ret = 0; 7030 7031 mutex_lock(&fs_devices->device_list_mutex); 7032 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7033 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device)) 7034 continue; 7035 7036 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7037 ret = update_dev_stat_item(trans, dev_root, device); 7038 if (!ret) 7039 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7040 } 7041 mutex_unlock(&fs_devices->device_list_mutex); 7042 7043 return ret; 7044 } 7045 7046 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7047 { 7048 btrfs_dev_stat_inc(dev, index); 7049 btrfs_dev_stat_print_on_error(dev); 7050 } 7051 7052 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7053 { 7054 if (!dev->dev_stats_valid) 7055 return; 7056 btrfs_err_rl_in_rcu(dev->dev_root->fs_info, 7057 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7058 rcu_str_deref(dev->name), 7059 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7060 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7061 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7062 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7063 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7064 } 7065 7066 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7067 { 7068 int i; 7069 7070 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7071 if (btrfs_dev_stat_read(dev, i) != 0) 7072 break; 7073 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7074 return; /* all values == 0, suppress message */ 7075 7076 btrfs_info_in_rcu(dev->dev_root->fs_info, 7077 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7078 rcu_str_deref(dev->name), 7079 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7080 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7081 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7082 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7083 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7084 } 7085 7086 int btrfs_get_dev_stats(struct btrfs_root *root, 7087 struct btrfs_ioctl_get_dev_stats *stats) 7088 { 7089 struct btrfs_device *dev; 7090 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 7091 int i; 7092 7093 mutex_lock(&fs_devices->device_list_mutex); 7094 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL); 7095 mutex_unlock(&fs_devices->device_list_mutex); 7096 7097 if (!dev) { 7098 btrfs_warn(root->fs_info, "get dev_stats failed, device not found"); 7099 return -ENODEV; 7100 } else if (!dev->dev_stats_valid) { 7101 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid"); 7102 return -ENODEV; 7103 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7104 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7105 if (stats->nr_items > i) 7106 stats->values[i] = 7107 btrfs_dev_stat_read_and_reset(dev, i); 7108 else 7109 btrfs_dev_stat_reset(dev, i); 7110 } 7111 } else { 7112 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7113 if (stats->nr_items > i) 7114 stats->values[i] = btrfs_dev_stat_read(dev, i); 7115 } 7116 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7117 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7118 return 0; 7119 } 7120 7121 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path) 7122 { 7123 struct buffer_head *bh; 7124 struct btrfs_super_block *disk_super; 7125 int copy_num; 7126 7127 if (!bdev) 7128 return; 7129 7130 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; 7131 copy_num++) { 7132 7133 if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) 7134 continue; 7135 7136 disk_super = (struct btrfs_super_block *)bh->b_data; 7137 7138 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 7139 set_buffer_dirty(bh); 7140 sync_dirty_buffer(bh); 7141 brelse(bh); 7142 } 7143 7144 /* Notify udev that device has changed */ 7145 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 7146 7147 /* Update ctime/mtime for device path for libblkid */ 7148 update_dev_time(device_path); 7149 } 7150 7151 /* 7152 * Update the size of all devices, which is used for writing out the 7153 * super blocks. 7154 */ 7155 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) 7156 { 7157 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7158 struct btrfs_device *curr, *next; 7159 7160 if (list_empty(&fs_devices->resized_devices)) 7161 return; 7162 7163 mutex_lock(&fs_devices->device_list_mutex); 7164 lock_chunks(fs_info->dev_root); 7165 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, 7166 resized_list) { 7167 list_del_init(&curr->resized_list); 7168 curr->commit_total_bytes = curr->disk_total_bytes; 7169 } 7170 unlock_chunks(fs_info->dev_root); 7171 mutex_unlock(&fs_devices->device_list_mutex); 7172 } 7173 7174 /* Must be invoked during the transaction commit */ 7175 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root, 7176 struct btrfs_transaction *transaction) 7177 { 7178 struct extent_map *em; 7179 struct map_lookup *map; 7180 struct btrfs_device *dev; 7181 int i; 7182 7183 if (list_empty(&transaction->pending_chunks)) 7184 return; 7185 7186 /* In order to kick the device replace finish process */ 7187 lock_chunks(root); 7188 list_for_each_entry(em, &transaction->pending_chunks, list) { 7189 map = em->map_lookup; 7190 7191 for (i = 0; i < map->num_stripes; i++) { 7192 dev = map->stripes[i].dev; 7193 dev->commit_bytes_used = dev->bytes_used; 7194 } 7195 } 7196 unlock_chunks(root); 7197 } 7198 7199 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 7200 { 7201 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7202 while (fs_devices) { 7203 fs_devices->fs_info = fs_info; 7204 fs_devices = fs_devices->seed; 7205 } 7206 } 7207 7208 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 7209 { 7210 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7211 while (fs_devices) { 7212 fs_devices->fs_info = NULL; 7213 fs_devices = fs_devices->seed; 7214 } 7215 } 7216