1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/random.h> 24 #include <linux/iocontext.h> 25 #include <linux/capability.h> 26 #include <linux/ratelimit.h> 27 #include <linux/kthread.h> 28 #include <linux/raid/pq.h> 29 #include <linux/semaphore.h> 30 #include <asm/div64.h> 31 #include "ctree.h" 32 #include "extent_map.h" 33 #include "disk-io.h" 34 #include "transaction.h" 35 #include "print-tree.h" 36 #include "volumes.h" 37 #include "raid56.h" 38 #include "async-thread.h" 39 #include "check-integrity.h" 40 #include "rcu-string.h" 41 #include "math.h" 42 #include "dev-replace.h" 43 44 static int init_first_rw_device(struct btrfs_trans_handle *trans, 45 struct btrfs_root *root, 46 struct btrfs_device *device); 47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 48 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 49 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 50 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 51 52 static DEFINE_MUTEX(uuid_mutex); 53 static LIST_HEAD(fs_uuids); 54 55 static void lock_chunks(struct btrfs_root *root) 56 { 57 mutex_lock(&root->fs_info->chunk_mutex); 58 } 59 60 static void unlock_chunks(struct btrfs_root *root) 61 { 62 mutex_unlock(&root->fs_info->chunk_mutex); 63 } 64 65 static struct btrfs_fs_devices *__alloc_fs_devices(void) 66 { 67 struct btrfs_fs_devices *fs_devs; 68 69 fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS); 70 if (!fs_devs) 71 return ERR_PTR(-ENOMEM); 72 73 mutex_init(&fs_devs->device_list_mutex); 74 75 INIT_LIST_HEAD(&fs_devs->devices); 76 INIT_LIST_HEAD(&fs_devs->alloc_list); 77 INIT_LIST_HEAD(&fs_devs->list); 78 79 return fs_devs; 80 } 81 82 /** 83 * alloc_fs_devices - allocate struct btrfs_fs_devices 84 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is 85 * generated. 86 * 87 * Return: a pointer to a new &struct btrfs_fs_devices on success; 88 * ERR_PTR() on error. Returned struct is not linked onto any lists and 89 * can be destroyed with kfree() right away. 90 */ 91 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 92 { 93 struct btrfs_fs_devices *fs_devs; 94 95 fs_devs = __alloc_fs_devices(); 96 if (IS_ERR(fs_devs)) 97 return fs_devs; 98 99 if (fsid) 100 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 101 else 102 generate_random_uuid(fs_devs->fsid); 103 104 return fs_devs; 105 } 106 107 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 108 { 109 struct btrfs_device *device; 110 WARN_ON(fs_devices->opened); 111 while (!list_empty(&fs_devices->devices)) { 112 device = list_entry(fs_devices->devices.next, 113 struct btrfs_device, dev_list); 114 list_del(&device->dev_list); 115 rcu_string_free(device->name); 116 kfree(device); 117 } 118 kfree(fs_devices); 119 } 120 121 static void btrfs_kobject_uevent(struct block_device *bdev, 122 enum kobject_action action) 123 { 124 int ret; 125 126 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 127 if (ret) 128 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 129 action, 130 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 131 &disk_to_dev(bdev->bd_disk)->kobj); 132 } 133 134 void btrfs_cleanup_fs_uuids(void) 135 { 136 struct btrfs_fs_devices *fs_devices; 137 138 while (!list_empty(&fs_uuids)) { 139 fs_devices = list_entry(fs_uuids.next, 140 struct btrfs_fs_devices, list); 141 list_del(&fs_devices->list); 142 free_fs_devices(fs_devices); 143 } 144 } 145 146 static struct btrfs_device *__alloc_device(void) 147 { 148 struct btrfs_device *dev; 149 150 dev = kzalloc(sizeof(*dev), GFP_NOFS); 151 if (!dev) 152 return ERR_PTR(-ENOMEM); 153 154 INIT_LIST_HEAD(&dev->dev_list); 155 INIT_LIST_HEAD(&dev->dev_alloc_list); 156 157 spin_lock_init(&dev->io_lock); 158 159 spin_lock_init(&dev->reada_lock); 160 atomic_set(&dev->reada_in_flight, 0); 161 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT); 162 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT); 163 164 return dev; 165 } 166 167 static noinline struct btrfs_device *__find_device(struct list_head *head, 168 u64 devid, u8 *uuid) 169 { 170 struct btrfs_device *dev; 171 172 list_for_each_entry(dev, head, dev_list) { 173 if (dev->devid == devid && 174 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 175 return dev; 176 } 177 } 178 return NULL; 179 } 180 181 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 182 { 183 struct btrfs_fs_devices *fs_devices; 184 185 list_for_each_entry(fs_devices, &fs_uuids, list) { 186 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 187 return fs_devices; 188 } 189 return NULL; 190 } 191 192 static int 193 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 194 int flush, struct block_device **bdev, 195 struct buffer_head **bh) 196 { 197 int ret; 198 199 *bdev = blkdev_get_by_path(device_path, flags, holder); 200 201 if (IS_ERR(*bdev)) { 202 ret = PTR_ERR(*bdev); 203 printk(KERN_INFO "BTRFS: open %s failed\n", device_path); 204 goto error; 205 } 206 207 if (flush) 208 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 209 ret = set_blocksize(*bdev, 4096); 210 if (ret) { 211 blkdev_put(*bdev, flags); 212 goto error; 213 } 214 invalidate_bdev(*bdev); 215 *bh = btrfs_read_dev_super(*bdev); 216 if (!*bh) { 217 ret = -EINVAL; 218 blkdev_put(*bdev, flags); 219 goto error; 220 } 221 222 return 0; 223 224 error: 225 *bdev = NULL; 226 *bh = NULL; 227 return ret; 228 } 229 230 static void requeue_list(struct btrfs_pending_bios *pending_bios, 231 struct bio *head, struct bio *tail) 232 { 233 234 struct bio *old_head; 235 236 old_head = pending_bios->head; 237 pending_bios->head = head; 238 if (pending_bios->tail) 239 tail->bi_next = old_head; 240 else 241 pending_bios->tail = tail; 242 } 243 244 /* 245 * we try to collect pending bios for a device so we don't get a large 246 * number of procs sending bios down to the same device. This greatly 247 * improves the schedulers ability to collect and merge the bios. 248 * 249 * But, it also turns into a long list of bios to process and that is sure 250 * to eventually make the worker thread block. The solution here is to 251 * make some progress and then put this work struct back at the end of 252 * the list if the block device is congested. This way, multiple devices 253 * can make progress from a single worker thread. 254 */ 255 static noinline void run_scheduled_bios(struct btrfs_device *device) 256 { 257 struct bio *pending; 258 struct backing_dev_info *bdi; 259 struct btrfs_fs_info *fs_info; 260 struct btrfs_pending_bios *pending_bios; 261 struct bio *tail; 262 struct bio *cur; 263 int again = 0; 264 unsigned long num_run; 265 unsigned long batch_run = 0; 266 unsigned long limit; 267 unsigned long last_waited = 0; 268 int force_reg = 0; 269 int sync_pending = 0; 270 struct blk_plug plug; 271 272 /* 273 * this function runs all the bios we've collected for 274 * a particular device. We don't want to wander off to 275 * another device without first sending all of these down. 276 * So, setup a plug here and finish it off before we return 277 */ 278 blk_start_plug(&plug); 279 280 bdi = blk_get_backing_dev_info(device->bdev); 281 fs_info = device->dev_root->fs_info; 282 limit = btrfs_async_submit_limit(fs_info); 283 limit = limit * 2 / 3; 284 285 loop: 286 spin_lock(&device->io_lock); 287 288 loop_lock: 289 num_run = 0; 290 291 /* take all the bios off the list at once and process them 292 * later on (without the lock held). But, remember the 293 * tail and other pointers so the bios can be properly reinserted 294 * into the list if we hit congestion 295 */ 296 if (!force_reg && device->pending_sync_bios.head) { 297 pending_bios = &device->pending_sync_bios; 298 force_reg = 1; 299 } else { 300 pending_bios = &device->pending_bios; 301 force_reg = 0; 302 } 303 304 pending = pending_bios->head; 305 tail = pending_bios->tail; 306 WARN_ON(pending && !tail); 307 308 /* 309 * if pending was null this time around, no bios need processing 310 * at all and we can stop. Otherwise it'll loop back up again 311 * and do an additional check so no bios are missed. 312 * 313 * device->running_pending is used to synchronize with the 314 * schedule_bio code. 315 */ 316 if (device->pending_sync_bios.head == NULL && 317 device->pending_bios.head == NULL) { 318 again = 0; 319 device->running_pending = 0; 320 } else { 321 again = 1; 322 device->running_pending = 1; 323 } 324 325 pending_bios->head = NULL; 326 pending_bios->tail = NULL; 327 328 spin_unlock(&device->io_lock); 329 330 while (pending) { 331 332 rmb(); 333 /* we want to work on both lists, but do more bios on the 334 * sync list than the regular list 335 */ 336 if ((num_run > 32 && 337 pending_bios != &device->pending_sync_bios && 338 device->pending_sync_bios.head) || 339 (num_run > 64 && pending_bios == &device->pending_sync_bios && 340 device->pending_bios.head)) { 341 spin_lock(&device->io_lock); 342 requeue_list(pending_bios, pending, tail); 343 goto loop_lock; 344 } 345 346 cur = pending; 347 pending = pending->bi_next; 348 cur->bi_next = NULL; 349 350 if (atomic_dec_return(&fs_info->nr_async_bios) < limit && 351 waitqueue_active(&fs_info->async_submit_wait)) 352 wake_up(&fs_info->async_submit_wait); 353 354 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 355 356 /* 357 * if we're doing the sync list, record that our 358 * plug has some sync requests on it 359 * 360 * If we're doing the regular list and there are 361 * sync requests sitting around, unplug before 362 * we add more 363 */ 364 if (pending_bios == &device->pending_sync_bios) { 365 sync_pending = 1; 366 } else if (sync_pending) { 367 blk_finish_plug(&plug); 368 blk_start_plug(&plug); 369 sync_pending = 0; 370 } 371 372 btrfsic_submit_bio(cur->bi_rw, cur); 373 num_run++; 374 batch_run++; 375 if (need_resched()) 376 cond_resched(); 377 378 /* 379 * we made progress, there is more work to do and the bdi 380 * is now congested. Back off and let other work structs 381 * run instead 382 */ 383 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 384 fs_info->fs_devices->open_devices > 1) { 385 struct io_context *ioc; 386 387 ioc = current->io_context; 388 389 /* 390 * the main goal here is that we don't want to 391 * block if we're going to be able to submit 392 * more requests without blocking. 393 * 394 * This code does two great things, it pokes into 395 * the elevator code from a filesystem _and_ 396 * it makes assumptions about how batching works. 397 */ 398 if (ioc && ioc->nr_batch_requests > 0 && 399 time_before(jiffies, ioc->last_waited + HZ/50UL) && 400 (last_waited == 0 || 401 ioc->last_waited == last_waited)) { 402 /* 403 * we want to go through our batch of 404 * requests and stop. So, we copy out 405 * the ioc->last_waited time and test 406 * against it before looping 407 */ 408 last_waited = ioc->last_waited; 409 if (need_resched()) 410 cond_resched(); 411 continue; 412 } 413 spin_lock(&device->io_lock); 414 requeue_list(pending_bios, pending, tail); 415 device->running_pending = 1; 416 417 spin_unlock(&device->io_lock); 418 btrfs_queue_work(fs_info->submit_workers, 419 &device->work); 420 goto done; 421 } 422 /* unplug every 64 requests just for good measure */ 423 if (batch_run % 64 == 0) { 424 blk_finish_plug(&plug); 425 blk_start_plug(&plug); 426 sync_pending = 0; 427 } 428 } 429 430 cond_resched(); 431 if (again) 432 goto loop; 433 434 spin_lock(&device->io_lock); 435 if (device->pending_bios.head || device->pending_sync_bios.head) 436 goto loop_lock; 437 spin_unlock(&device->io_lock); 438 439 done: 440 blk_finish_plug(&plug); 441 } 442 443 static void pending_bios_fn(struct btrfs_work *work) 444 { 445 struct btrfs_device *device; 446 447 device = container_of(work, struct btrfs_device, work); 448 run_scheduled_bios(device); 449 } 450 451 /* 452 * Add new device to list of registered devices 453 * 454 * Returns: 455 * 1 - first time device is seen 456 * 0 - device already known 457 * < 0 - error 458 */ 459 static noinline int device_list_add(const char *path, 460 struct btrfs_super_block *disk_super, 461 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 462 { 463 struct btrfs_device *device; 464 struct btrfs_fs_devices *fs_devices; 465 struct rcu_string *name; 466 int ret = 0; 467 u64 found_transid = btrfs_super_generation(disk_super); 468 469 fs_devices = find_fsid(disk_super->fsid); 470 if (!fs_devices) { 471 fs_devices = alloc_fs_devices(disk_super->fsid); 472 if (IS_ERR(fs_devices)) 473 return PTR_ERR(fs_devices); 474 475 list_add(&fs_devices->list, &fs_uuids); 476 fs_devices->latest_devid = devid; 477 fs_devices->latest_trans = found_transid; 478 479 device = NULL; 480 } else { 481 device = __find_device(&fs_devices->devices, devid, 482 disk_super->dev_item.uuid); 483 } 484 if (!device) { 485 if (fs_devices->opened) 486 return -EBUSY; 487 488 device = btrfs_alloc_device(NULL, &devid, 489 disk_super->dev_item.uuid); 490 if (IS_ERR(device)) { 491 /* we can safely leave the fs_devices entry around */ 492 return PTR_ERR(device); 493 } 494 495 name = rcu_string_strdup(path, GFP_NOFS); 496 if (!name) { 497 kfree(device); 498 return -ENOMEM; 499 } 500 rcu_assign_pointer(device->name, name); 501 502 mutex_lock(&fs_devices->device_list_mutex); 503 list_add_rcu(&device->dev_list, &fs_devices->devices); 504 fs_devices->num_devices++; 505 mutex_unlock(&fs_devices->device_list_mutex); 506 507 ret = 1; 508 device->fs_devices = fs_devices; 509 } else if (!device->name || strcmp(device->name->str, path)) { 510 name = rcu_string_strdup(path, GFP_NOFS); 511 if (!name) 512 return -ENOMEM; 513 rcu_string_free(device->name); 514 rcu_assign_pointer(device->name, name); 515 if (device->missing) { 516 fs_devices->missing_devices--; 517 device->missing = 0; 518 } 519 } 520 521 if (found_transid > fs_devices->latest_trans) { 522 fs_devices->latest_devid = devid; 523 fs_devices->latest_trans = found_transid; 524 } 525 *fs_devices_ret = fs_devices; 526 527 return ret; 528 } 529 530 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 531 { 532 struct btrfs_fs_devices *fs_devices; 533 struct btrfs_device *device; 534 struct btrfs_device *orig_dev; 535 536 fs_devices = alloc_fs_devices(orig->fsid); 537 if (IS_ERR(fs_devices)) 538 return fs_devices; 539 540 fs_devices->latest_devid = orig->latest_devid; 541 fs_devices->latest_trans = orig->latest_trans; 542 fs_devices->total_devices = orig->total_devices; 543 544 /* We have held the volume lock, it is safe to get the devices. */ 545 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 546 struct rcu_string *name; 547 548 device = btrfs_alloc_device(NULL, &orig_dev->devid, 549 orig_dev->uuid); 550 if (IS_ERR(device)) 551 goto error; 552 553 /* 554 * This is ok to do without rcu read locked because we hold the 555 * uuid mutex so nothing we touch in here is going to disappear. 556 */ 557 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); 558 if (!name) { 559 kfree(device); 560 goto error; 561 } 562 rcu_assign_pointer(device->name, name); 563 564 list_add(&device->dev_list, &fs_devices->devices); 565 device->fs_devices = fs_devices; 566 fs_devices->num_devices++; 567 } 568 return fs_devices; 569 error: 570 free_fs_devices(fs_devices); 571 return ERR_PTR(-ENOMEM); 572 } 573 574 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info, 575 struct btrfs_fs_devices *fs_devices, int step) 576 { 577 struct btrfs_device *device, *next; 578 579 struct block_device *latest_bdev = NULL; 580 u64 latest_devid = 0; 581 u64 latest_transid = 0; 582 583 mutex_lock(&uuid_mutex); 584 again: 585 /* This is the initialized path, it is safe to release the devices. */ 586 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 587 if (device->in_fs_metadata) { 588 if (!device->is_tgtdev_for_dev_replace && 589 (!latest_transid || 590 device->generation > latest_transid)) { 591 latest_devid = device->devid; 592 latest_transid = device->generation; 593 latest_bdev = device->bdev; 594 } 595 continue; 596 } 597 598 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 599 /* 600 * In the first step, keep the device which has 601 * the correct fsid and the devid that is used 602 * for the dev_replace procedure. 603 * In the second step, the dev_replace state is 604 * read from the device tree and it is known 605 * whether the procedure is really active or 606 * not, which means whether this device is 607 * used or whether it should be removed. 608 */ 609 if (step == 0 || device->is_tgtdev_for_dev_replace) { 610 continue; 611 } 612 } 613 if (device->bdev) { 614 blkdev_put(device->bdev, device->mode); 615 device->bdev = NULL; 616 fs_devices->open_devices--; 617 } 618 if (device->writeable) { 619 list_del_init(&device->dev_alloc_list); 620 device->writeable = 0; 621 if (!device->is_tgtdev_for_dev_replace) 622 fs_devices->rw_devices--; 623 } 624 list_del_init(&device->dev_list); 625 fs_devices->num_devices--; 626 rcu_string_free(device->name); 627 kfree(device); 628 } 629 630 if (fs_devices->seed) { 631 fs_devices = fs_devices->seed; 632 goto again; 633 } 634 635 fs_devices->latest_bdev = latest_bdev; 636 fs_devices->latest_devid = latest_devid; 637 fs_devices->latest_trans = latest_transid; 638 639 mutex_unlock(&uuid_mutex); 640 } 641 642 static void __free_device(struct work_struct *work) 643 { 644 struct btrfs_device *device; 645 646 device = container_of(work, struct btrfs_device, rcu_work); 647 648 if (device->bdev) 649 blkdev_put(device->bdev, device->mode); 650 651 rcu_string_free(device->name); 652 kfree(device); 653 } 654 655 static void free_device(struct rcu_head *head) 656 { 657 struct btrfs_device *device; 658 659 device = container_of(head, struct btrfs_device, rcu); 660 661 INIT_WORK(&device->rcu_work, __free_device); 662 schedule_work(&device->rcu_work); 663 } 664 665 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 666 { 667 struct btrfs_device *device; 668 669 if (--fs_devices->opened > 0) 670 return 0; 671 672 mutex_lock(&fs_devices->device_list_mutex); 673 list_for_each_entry(device, &fs_devices->devices, dev_list) { 674 struct btrfs_device *new_device; 675 struct rcu_string *name; 676 677 if (device->bdev) 678 fs_devices->open_devices--; 679 680 if (device->writeable && 681 device->devid != BTRFS_DEV_REPLACE_DEVID) { 682 list_del_init(&device->dev_alloc_list); 683 fs_devices->rw_devices--; 684 } 685 686 if (device->can_discard) 687 fs_devices->num_can_discard--; 688 if (device->missing) 689 fs_devices->missing_devices--; 690 691 new_device = btrfs_alloc_device(NULL, &device->devid, 692 device->uuid); 693 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 694 695 /* Safe because we are under uuid_mutex */ 696 if (device->name) { 697 name = rcu_string_strdup(device->name->str, GFP_NOFS); 698 BUG_ON(!name); /* -ENOMEM */ 699 rcu_assign_pointer(new_device->name, name); 700 } 701 702 list_replace_rcu(&device->dev_list, &new_device->dev_list); 703 new_device->fs_devices = device->fs_devices; 704 705 call_rcu(&device->rcu, free_device); 706 } 707 mutex_unlock(&fs_devices->device_list_mutex); 708 709 WARN_ON(fs_devices->open_devices); 710 WARN_ON(fs_devices->rw_devices); 711 fs_devices->opened = 0; 712 fs_devices->seeding = 0; 713 714 return 0; 715 } 716 717 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 718 { 719 struct btrfs_fs_devices *seed_devices = NULL; 720 int ret; 721 722 mutex_lock(&uuid_mutex); 723 ret = __btrfs_close_devices(fs_devices); 724 if (!fs_devices->opened) { 725 seed_devices = fs_devices->seed; 726 fs_devices->seed = NULL; 727 } 728 mutex_unlock(&uuid_mutex); 729 730 while (seed_devices) { 731 fs_devices = seed_devices; 732 seed_devices = fs_devices->seed; 733 __btrfs_close_devices(fs_devices); 734 free_fs_devices(fs_devices); 735 } 736 /* 737 * Wait for rcu kworkers under __btrfs_close_devices 738 * to finish all blkdev_puts so device is really 739 * free when umount is done. 740 */ 741 rcu_barrier(); 742 return ret; 743 } 744 745 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 746 fmode_t flags, void *holder) 747 { 748 struct request_queue *q; 749 struct block_device *bdev; 750 struct list_head *head = &fs_devices->devices; 751 struct btrfs_device *device; 752 struct block_device *latest_bdev = NULL; 753 struct buffer_head *bh; 754 struct btrfs_super_block *disk_super; 755 u64 latest_devid = 0; 756 u64 latest_transid = 0; 757 u64 devid; 758 int seeding = 1; 759 int ret = 0; 760 761 flags |= FMODE_EXCL; 762 763 list_for_each_entry(device, head, dev_list) { 764 if (device->bdev) 765 continue; 766 if (!device->name) 767 continue; 768 769 /* Just open everything we can; ignore failures here */ 770 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 771 &bdev, &bh)) 772 continue; 773 774 disk_super = (struct btrfs_super_block *)bh->b_data; 775 devid = btrfs_stack_device_id(&disk_super->dev_item); 776 if (devid != device->devid) 777 goto error_brelse; 778 779 if (memcmp(device->uuid, disk_super->dev_item.uuid, 780 BTRFS_UUID_SIZE)) 781 goto error_brelse; 782 783 device->generation = btrfs_super_generation(disk_super); 784 if (!latest_transid || device->generation > latest_transid) { 785 latest_devid = devid; 786 latest_transid = device->generation; 787 latest_bdev = bdev; 788 } 789 790 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 791 device->writeable = 0; 792 } else { 793 device->writeable = !bdev_read_only(bdev); 794 seeding = 0; 795 } 796 797 q = bdev_get_queue(bdev); 798 if (blk_queue_discard(q)) { 799 device->can_discard = 1; 800 fs_devices->num_can_discard++; 801 } 802 803 device->bdev = bdev; 804 device->in_fs_metadata = 0; 805 device->mode = flags; 806 807 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 808 fs_devices->rotating = 1; 809 810 fs_devices->open_devices++; 811 if (device->writeable && 812 device->devid != BTRFS_DEV_REPLACE_DEVID) { 813 fs_devices->rw_devices++; 814 list_add(&device->dev_alloc_list, 815 &fs_devices->alloc_list); 816 } 817 brelse(bh); 818 continue; 819 820 error_brelse: 821 brelse(bh); 822 blkdev_put(bdev, flags); 823 continue; 824 } 825 if (fs_devices->open_devices == 0) { 826 ret = -EINVAL; 827 goto out; 828 } 829 fs_devices->seeding = seeding; 830 fs_devices->opened = 1; 831 fs_devices->latest_bdev = latest_bdev; 832 fs_devices->latest_devid = latest_devid; 833 fs_devices->latest_trans = latest_transid; 834 fs_devices->total_rw_bytes = 0; 835 out: 836 return ret; 837 } 838 839 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 840 fmode_t flags, void *holder) 841 { 842 int ret; 843 844 mutex_lock(&uuid_mutex); 845 if (fs_devices->opened) { 846 fs_devices->opened++; 847 ret = 0; 848 } else { 849 ret = __btrfs_open_devices(fs_devices, flags, holder); 850 } 851 mutex_unlock(&uuid_mutex); 852 return ret; 853 } 854 855 /* 856 * Look for a btrfs signature on a device. This may be called out of the mount path 857 * and we are not allowed to call set_blocksize during the scan. The superblock 858 * is read via pagecache 859 */ 860 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 861 struct btrfs_fs_devices **fs_devices_ret) 862 { 863 struct btrfs_super_block *disk_super; 864 struct block_device *bdev; 865 struct page *page; 866 void *p; 867 int ret = -EINVAL; 868 u64 devid; 869 u64 transid; 870 u64 total_devices; 871 u64 bytenr; 872 pgoff_t index; 873 874 /* 875 * we would like to check all the supers, but that would make 876 * a btrfs mount succeed after a mkfs from a different FS. 877 * So, we need to add a special mount option to scan for 878 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 879 */ 880 bytenr = btrfs_sb_offset(0); 881 flags |= FMODE_EXCL; 882 mutex_lock(&uuid_mutex); 883 884 bdev = blkdev_get_by_path(path, flags, holder); 885 886 if (IS_ERR(bdev)) { 887 ret = PTR_ERR(bdev); 888 goto error; 889 } 890 891 /* make sure our super fits in the device */ 892 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) 893 goto error_bdev_put; 894 895 /* make sure our super fits in the page */ 896 if (sizeof(*disk_super) > PAGE_CACHE_SIZE) 897 goto error_bdev_put; 898 899 /* make sure our super doesn't straddle pages on disk */ 900 index = bytenr >> PAGE_CACHE_SHIFT; 901 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) 902 goto error_bdev_put; 903 904 /* pull in the page with our super */ 905 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 906 index, GFP_NOFS); 907 908 if (IS_ERR_OR_NULL(page)) 909 goto error_bdev_put; 910 911 p = kmap(page); 912 913 /* align our pointer to the offset of the super block */ 914 disk_super = p + (bytenr & ~PAGE_CACHE_MASK); 915 916 if (btrfs_super_bytenr(disk_super) != bytenr || 917 btrfs_super_magic(disk_super) != BTRFS_MAGIC) 918 goto error_unmap; 919 920 devid = btrfs_stack_device_id(&disk_super->dev_item); 921 transid = btrfs_super_generation(disk_super); 922 total_devices = btrfs_super_num_devices(disk_super); 923 924 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 925 if (ret > 0) { 926 if (disk_super->label[0]) { 927 if (disk_super->label[BTRFS_LABEL_SIZE - 1]) 928 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0'; 929 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label); 930 } else { 931 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid); 932 } 933 934 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); 935 ret = 0; 936 } 937 if (!ret && fs_devices_ret) 938 (*fs_devices_ret)->total_devices = total_devices; 939 940 error_unmap: 941 kunmap(page); 942 page_cache_release(page); 943 944 error_bdev_put: 945 blkdev_put(bdev, flags); 946 error: 947 mutex_unlock(&uuid_mutex); 948 return ret; 949 } 950 951 /* helper to account the used device space in the range */ 952 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 953 u64 end, u64 *length) 954 { 955 struct btrfs_key key; 956 struct btrfs_root *root = device->dev_root; 957 struct btrfs_dev_extent *dev_extent; 958 struct btrfs_path *path; 959 u64 extent_end; 960 int ret; 961 int slot; 962 struct extent_buffer *l; 963 964 *length = 0; 965 966 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace) 967 return 0; 968 969 path = btrfs_alloc_path(); 970 if (!path) 971 return -ENOMEM; 972 path->reada = 2; 973 974 key.objectid = device->devid; 975 key.offset = start; 976 key.type = BTRFS_DEV_EXTENT_KEY; 977 978 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 979 if (ret < 0) 980 goto out; 981 if (ret > 0) { 982 ret = btrfs_previous_item(root, path, key.objectid, key.type); 983 if (ret < 0) 984 goto out; 985 } 986 987 while (1) { 988 l = path->nodes[0]; 989 slot = path->slots[0]; 990 if (slot >= btrfs_header_nritems(l)) { 991 ret = btrfs_next_leaf(root, path); 992 if (ret == 0) 993 continue; 994 if (ret < 0) 995 goto out; 996 997 break; 998 } 999 btrfs_item_key_to_cpu(l, &key, slot); 1000 1001 if (key.objectid < device->devid) 1002 goto next; 1003 1004 if (key.objectid > device->devid) 1005 break; 1006 1007 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 1008 goto next; 1009 1010 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1011 extent_end = key.offset + btrfs_dev_extent_length(l, 1012 dev_extent); 1013 if (key.offset <= start && extent_end > end) { 1014 *length = end - start + 1; 1015 break; 1016 } else if (key.offset <= start && extent_end > start) 1017 *length += extent_end - start; 1018 else if (key.offset > start && extent_end <= end) 1019 *length += extent_end - key.offset; 1020 else if (key.offset > start && key.offset <= end) { 1021 *length += end - key.offset + 1; 1022 break; 1023 } else if (key.offset > end) 1024 break; 1025 1026 next: 1027 path->slots[0]++; 1028 } 1029 ret = 0; 1030 out: 1031 btrfs_free_path(path); 1032 return ret; 1033 } 1034 1035 static int contains_pending_extent(struct btrfs_trans_handle *trans, 1036 struct btrfs_device *device, 1037 u64 *start, u64 len) 1038 { 1039 struct extent_map *em; 1040 int ret = 0; 1041 1042 list_for_each_entry(em, &trans->transaction->pending_chunks, list) { 1043 struct map_lookup *map; 1044 int i; 1045 1046 map = (struct map_lookup *)em->bdev; 1047 for (i = 0; i < map->num_stripes; i++) { 1048 if (map->stripes[i].dev != device) 1049 continue; 1050 if (map->stripes[i].physical >= *start + len || 1051 map->stripes[i].physical + em->orig_block_len <= 1052 *start) 1053 continue; 1054 *start = map->stripes[i].physical + 1055 em->orig_block_len; 1056 ret = 1; 1057 } 1058 } 1059 1060 return ret; 1061 } 1062 1063 1064 /* 1065 * find_free_dev_extent - find free space in the specified device 1066 * @device: the device which we search the free space in 1067 * @num_bytes: the size of the free space that we need 1068 * @start: store the start of the free space. 1069 * @len: the size of the free space. that we find, or the size of the max 1070 * free space if we don't find suitable free space 1071 * 1072 * this uses a pretty simple search, the expectation is that it is 1073 * called very infrequently and that a given device has a small number 1074 * of extents 1075 * 1076 * @start is used to store the start of the free space if we find. But if we 1077 * don't find suitable free space, it will be used to store the start position 1078 * of the max free space. 1079 * 1080 * @len is used to store the size of the free space that we find. 1081 * But if we don't find suitable free space, it is used to store the size of 1082 * the max free space. 1083 */ 1084 int find_free_dev_extent(struct btrfs_trans_handle *trans, 1085 struct btrfs_device *device, u64 num_bytes, 1086 u64 *start, u64 *len) 1087 { 1088 struct btrfs_key key; 1089 struct btrfs_root *root = device->dev_root; 1090 struct btrfs_dev_extent *dev_extent; 1091 struct btrfs_path *path; 1092 u64 hole_size; 1093 u64 max_hole_start; 1094 u64 max_hole_size; 1095 u64 extent_end; 1096 u64 search_start; 1097 u64 search_end = device->total_bytes; 1098 int ret; 1099 int slot; 1100 struct extent_buffer *l; 1101 1102 /* FIXME use last free of some kind */ 1103 1104 /* we don't want to overwrite the superblock on the drive, 1105 * so we make sure to start at an offset of at least 1MB 1106 */ 1107 search_start = max(root->fs_info->alloc_start, 1024ull * 1024); 1108 1109 path = btrfs_alloc_path(); 1110 if (!path) 1111 return -ENOMEM; 1112 again: 1113 max_hole_start = search_start; 1114 max_hole_size = 0; 1115 hole_size = 0; 1116 1117 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) { 1118 ret = -ENOSPC; 1119 goto out; 1120 } 1121 1122 path->reada = 2; 1123 path->search_commit_root = 1; 1124 path->skip_locking = 1; 1125 1126 key.objectid = device->devid; 1127 key.offset = search_start; 1128 key.type = BTRFS_DEV_EXTENT_KEY; 1129 1130 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1131 if (ret < 0) 1132 goto out; 1133 if (ret > 0) { 1134 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1135 if (ret < 0) 1136 goto out; 1137 } 1138 1139 while (1) { 1140 l = path->nodes[0]; 1141 slot = path->slots[0]; 1142 if (slot >= btrfs_header_nritems(l)) { 1143 ret = btrfs_next_leaf(root, path); 1144 if (ret == 0) 1145 continue; 1146 if (ret < 0) 1147 goto out; 1148 1149 break; 1150 } 1151 btrfs_item_key_to_cpu(l, &key, slot); 1152 1153 if (key.objectid < device->devid) 1154 goto next; 1155 1156 if (key.objectid > device->devid) 1157 break; 1158 1159 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 1160 goto next; 1161 1162 if (key.offset > search_start) { 1163 hole_size = key.offset - search_start; 1164 1165 /* 1166 * Have to check before we set max_hole_start, otherwise 1167 * we could end up sending back this offset anyway. 1168 */ 1169 if (contains_pending_extent(trans, device, 1170 &search_start, 1171 hole_size)) 1172 hole_size = 0; 1173 1174 if (hole_size > max_hole_size) { 1175 max_hole_start = search_start; 1176 max_hole_size = hole_size; 1177 } 1178 1179 /* 1180 * If this free space is greater than which we need, 1181 * it must be the max free space that we have found 1182 * until now, so max_hole_start must point to the start 1183 * of this free space and the length of this free space 1184 * is stored in max_hole_size. Thus, we return 1185 * max_hole_start and max_hole_size and go back to the 1186 * caller. 1187 */ 1188 if (hole_size >= num_bytes) { 1189 ret = 0; 1190 goto out; 1191 } 1192 } 1193 1194 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1195 extent_end = key.offset + btrfs_dev_extent_length(l, 1196 dev_extent); 1197 if (extent_end > search_start) 1198 search_start = extent_end; 1199 next: 1200 path->slots[0]++; 1201 cond_resched(); 1202 } 1203 1204 /* 1205 * At this point, search_start should be the end of 1206 * allocated dev extents, and when shrinking the device, 1207 * search_end may be smaller than search_start. 1208 */ 1209 if (search_end > search_start) 1210 hole_size = search_end - search_start; 1211 1212 if (hole_size > max_hole_size) { 1213 max_hole_start = search_start; 1214 max_hole_size = hole_size; 1215 } 1216 1217 if (contains_pending_extent(trans, device, &search_start, hole_size)) { 1218 btrfs_release_path(path); 1219 goto again; 1220 } 1221 1222 /* See above. */ 1223 if (hole_size < num_bytes) 1224 ret = -ENOSPC; 1225 else 1226 ret = 0; 1227 1228 out: 1229 btrfs_free_path(path); 1230 *start = max_hole_start; 1231 if (len) 1232 *len = max_hole_size; 1233 return ret; 1234 } 1235 1236 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1237 struct btrfs_device *device, 1238 u64 start) 1239 { 1240 int ret; 1241 struct btrfs_path *path; 1242 struct btrfs_root *root = device->dev_root; 1243 struct btrfs_key key; 1244 struct btrfs_key found_key; 1245 struct extent_buffer *leaf = NULL; 1246 struct btrfs_dev_extent *extent = NULL; 1247 1248 path = btrfs_alloc_path(); 1249 if (!path) 1250 return -ENOMEM; 1251 1252 key.objectid = device->devid; 1253 key.offset = start; 1254 key.type = BTRFS_DEV_EXTENT_KEY; 1255 again: 1256 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1257 if (ret > 0) { 1258 ret = btrfs_previous_item(root, path, key.objectid, 1259 BTRFS_DEV_EXTENT_KEY); 1260 if (ret) 1261 goto out; 1262 leaf = path->nodes[0]; 1263 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1264 extent = btrfs_item_ptr(leaf, path->slots[0], 1265 struct btrfs_dev_extent); 1266 BUG_ON(found_key.offset > start || found_key.offset + 1267 btrfs_dev_extent_length(leaf, extent) < start); 1268 key = found_key; 1269 btrfs_release_path(path); 1270 goto again; 1271 } else if (ret == 0) { 1272 leaf = path->nodes[0]; 1273 extent = btrfs_item_ptr(leaf, path->slots[0], 1274 struct btrfs_dev_extent); 1275 } else { 1276 btrfs_error(root->fs_info, ret, "Slot search failed"); 1277 goto out; 1278 } 1279 1280 if (device->bytes_used > 0) { 1281 u64 len = btrfs_dev_extent_length(leaf, extent); 1282 device->bytes_used -= len; 1283 spin_lock(&root->fs_info->free_chunk_lock); 1284 root->fs_info->free_chunk_space += len; 1285 spin_unlock(&root->fs_info->free_chunk_lock); 1286 } 1287 ret = btrfs_del_item(trans, root, path); 1288 if (ret) { 1289 btrfs_error(root->fs_info, ret, 1290 "Failed to remove dev extent item"); 1291 } 1292 out: 1293 btrfs_free_path(path); 1294 return ret; 1295 } 1296 1297 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1298 struct btrfs_device *device, 1299 u64 chunk_tree, u64 chunk_objectid, 1300 u64 chunk_offset, u64 start, u64 num_bytes) 1301 { 1302 int ret; 1303 struct btrfs_path *path; 1304 struct btrfs_root *root = device->dev_root; 1305 struct btrfs_dev_extent *extent; 1306 struct extent_buffer *leaf; 1307 struct btrfs_key key; 1308 1309 WARN_ON(!device->in_fs_metadata); 1310 WARN_ON(device->is_tgtdev_for_dev_replace); 1311 path = btrfs_alloc_path(); 1312 if (!path) 1313 return -ENOMEM; 1314 1315 key.objectid = device->devid; 1316 key.offset = start; 1317 key.type = BTRFS_DEV_EXTENT_KEY; 1318 ret = btrfs_insert_empty_item(trans, root, path, &key, 1319 sizeof(*extent)); 1320 if (ret) 1321 goto out; 1322 1323 leaf = path->nodes[0]; 1324 extent = btrfs_item_ptr(leaf, path->slots[0], 1325 struct btrfs_dev_extent); 1326 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 1327 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 1328 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1329 1330 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 1331 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE); 1332 1333 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1334 btrfs_mark_buffer_dirty(leaf); 1335 out: 1336 btrfs_free_path(path); 1337 return ret; 1338 } 1339 1340 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1341 { 1342 struct extent_map_tree *em_tree; 1343 struct extent_map *em; 1344 struct rb_node *n; 1345 u64 ret = 0; 1346 1347 em_tree = &fs_info->mapping_tree.map_tree; 1348 read_lock(&em_tree->lock); 1349 n = rb_last(&em_tree->map); 1350 if (n) { 1351 em = rb_entry(n, struct extent_map, rb_node); 1352 ret = em->start + em->len; 1353 } 1354 read_unlock(&em_tree->lock); 1355 1356 return ret; 1357 } 1358 1359 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1360 u64 *devid_ret) 1361 { 1362 int ret; 1363 struct btrfs_key key; 1364 struct btrfs_key found_key; 1365 struct btrfs_path *path; 1366 1367 path = btrfs_alloc_path(); 1368 if (!path) 1369 return -ENOMEM; 1370 1371 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1372 key.type = BTRFS_DEV_ITEM_KEY; 1373 key.offset = (u64)-1; 1374 1375 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1376 if (ret < 0) 1377 goto error; 1378 1379 BUG_ON(ret == 0); /* Corruption */ 1380 1381 ret = btrfs_previous_item(fs_info->chunk_root, path, 1382 BTRFS_DEV_ITEMS_OBJECTID, 1383 BTRFS_DEV_ITEM_KEY); 1384 if (ret) { 1385 *devid_ret = 1; 1386 } else { 1387 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1388 path->slots[0]); 1389 *devid_ret = found_key.offset + 1; 1390 } 1391 ret = 0; 1392 error: 1393 btrfs_free_path(path); 1394 return ret; 1395 } 1396 1397 /* 1398 * the device information is stored in the chunk root 1399 * the btrfs_device struct should be fully filled in 1400 */ 1401 static int btrfs_add_device(struct btrfs_trans_handle *trans, 1402 struct btrfs_root *root, 1403 struct btrfs_device *device) 1404 { 1405 int ret; 1406 struct btrfs_path *path; 1407 struct btrfs_dev_item *dev_item; 1408 struct extent_buffer *leaf; 1409 struct btrfs_key key; 1410 unsigned long ptr; 1411 1412 root = root->fs_info->chunk_root; 1413 1414 path = btrfs_alloc_path(); 1415 if (!path) 1416 return -ENOMEM; 1417 1418 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1419 key.type = BTRFS_DEV_ITEM_KEY; 1420 key.offset = device->devid; 1421 1422 ret = btrfs_insert_empty_item(trans, root, path, &key, 1423 sizeof(*dev_item)); 1424 if (ret) 1425 goto out; 1426 1427 leaf = path->nodes[0]; 1428 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1429 1430 btrfs_set_device_id(leaf, dev_item, device->devid); 1431 btrfs_set_device_generation(leaf, dev_item, 0); 1432 btrfs_set_device_type(leaf, dev_item, device->type); 1433 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1434 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1435 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1436 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 1437 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1438 btrfs_set_device_group(leaf, dev_item, 0); 1439 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1440 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1441 btrfs_set_device_start_offset(leaf, dev_item, 0); 1442 1443 ptr = btrfs_device_uuid(dev_item); 1444 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1445 ptr = btrfs_device_fsid(dev_item); 1446 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1447 btrfs_mark_buffer_dirty(leaf); 1448 1449 ret = 0; 1450 out: 1451 btrfs_free_path(path); 1452 return ret; 1453 } 1454 1455 /* 1456 * Function to update ctime/mtime for a given device path. 1457 * Mainly used for ctime/mtime based probe like libblkid. 1458 */ 1459 static void update_dev_time(char *path_name) 1460 { 1461 struct file *filp; 1462 1463 filp = filp_open(path_name, O_RDWR, 0); 1464 if (!filp) 1465 return; 1466 file_update_time(filp); 1467 filp_close(filp, NULL); 1468 return; 1469 } 1470 1471 static int btrfs_rm_dev_item(struct btrfs_root *root, 1472 struct btrfs_device *device) 1473 { 1474 int ret; 1475 struct btrfs_path *path; 1476 struct btrfs_key key; 1477 struct btrfs_trans_handle *trans; 1478 1479 root = root->fs_info->chunk_root; 1480 1481 path = btrfs_alloc_path(); 1482 if (!path) 1483 return -ENOMEM; 1484 1485 trans = btrfs_start_transaction(root, 0); 1486 if (IS_ERR(trans)) { 1487 btrfs_free_path(path); 1488 return PTR_ERR(trans); 1489 } 1490 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1491 key.type = BTRFS_DEV_ITEM_KEY; 1492 key.offset = device->devid; 1493 lock_chunks(root); 1494 1495 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1496 if (ret < 0) 1497 goto out; 1498 1499 if (ret > 0) { 1500 ret = -ENOENT; 1501 goto out; 1502 } 1503 1504 ret = btrfs_del_item(trans, root, path); 1505 if (ret) 1506 goto out; 1507 out: 1508 btrfs_free_path(path); 1509 unlock_chunks(root); 1510 btrfs_commit_transaction(trans, root); 1511 return ret; 1512 } 1513 1514 int btrfs_rm_device(struct btrfs_root *root, char *device_path) 1515 { 1516 struct btrfs_device *device; 1517 struct btrfs_device *next_device; 1518 struct block_device *bdev; 1519 struct buffer_head *bh = NULL; 1520 struct btrfs_super_block *disk_super; 1521 struct btrfs_fs_devices *cur_devices; 1522 u64 all_avail; 1523 u64 devid; 1524 u64 num_devices; 1525 u8 *dev_uuid; 1526 unsigned seq; 1527 int ret = 0; 1528 bool clear_super = false; 1529 1530 mutex_lock(&uuid_mutex); 1531 1532 do { 1533 seq = read_seqbegin(&root->fs_info->profiles_lock); 1534 1535 all_avail = root->fs_info->avail_data_alloc_bits | 1536 root->fs_info->avail_system_alloc_bits | 1537 root->fs_info->avail_metadata_alloc_bits; 1538 } while (read_seqretry(&root->fs_info->profiles_lock, seq)); 1539 1540 num_devices = root->fs_info->fs_devices->num_devices; 1541 btrfs_dev_replace_lock(&root->fs_info->dev_replace); 1542 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) { 1543 WARN_ON(num_devices < 1); 1544 num_devices--; 1545 } 1546 btrfs_dev_replace_unlock(&root->fs_info->dev_replace); 1547 1548 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) { 1549 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET; 1550 goto out; 1551 } 1552 1553 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) { 1554 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET; 1555 goto out; 1556 } 1557 1558 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) && 1559 root->fs_info->fs_devices->rw_devices <= 2) { 1560 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET; 1561 goto out; 1562 } 1563 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) && 1564 root->fs_info->fs_devices->rw_devices <= 3) { 1565 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET; 1566 goto out; 1567 } 1568 1569 if (strcmp(device_path, "missing") == 0) { 1570 struct list_head *devices; 1571 struct btrfs_device *tmp; 1572 1573 device = NULL; 1574 devices = &root->fs_info->fs_devices->devices; 1575 /* 1576 * It is safe to read the devices since the volume_mutex 1577 * is held. 1578 */ 1579 list_for_each_entry(tmp, devices, dev_list) { 1580 if (tmp->in_fs_metadata && 1581 !tmp->is_tgtdev_for_dev_replace && 1582 !tmp->bdev) { 1583 device = tmp; 1584 break; 1585 } 1586 } 1587 bdev = NULL; 1588 bh = NULL; 1589 disk_super = NULL; 1590 if (!device) { 1591 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 1592 goto out; 1593 } 1594 } else { 1595 ret = btrfs_get_bdev_and_sb(device_path, 1596 FMODE_WRITE | FMODE_EXCL, 1597 root->fs_info->bdev_holder, 0, 1598 &bdev, &bh); 1599 if (ret) 1600 goto out; 1601 disk_super = (struct btrfs_super_block *)bh->b_data; 1602 devid = btrfs_stack_device_id(&disk_super->dev_item); 1603 dev_uuid = disk_super->dev_item.uuid; 1604 device = btrfs_find_device(root->fs_info, devid, dev_uuid, 1605 disk_super->fsid); 1606 if (!device) { 1607 ret = -ENOENT; 1608 goto error_brelse; 1609 } 1610 } 1611 1612 if (device->is_tgtdev_for_dev_replace) { 1613 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 1614 goto error_brelse; 1615 } 1616 1617 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1618 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 1619 goto error_brelse; 1620 } 1621 1622 if (device->writeable) { 1623 lock_chunks(root); 1624 list_del_init(&device->dev_alloc_list); 1625 unlock_chunks(root); 1626 root->fs_info->fs_devices->rw_devices--; 1627 clear_super = true; 1628 } 1629 1630 mutex_unlock(&uuid_mutex); 1631 ret = btrfs_shrink_device(device, 0); 1632 mutex_lock(&uuid_mutex); 1633 if (ret) 1634 goto error_undo; 1635 1636 /* 1637 * TODO: the superblock still includes this device in its num_devices 1638 * counter although write_all_supers() is not locked out. This 1639 * could give a filesystem state which requires a degraded mount. 1640 */ 1641 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1642 if (ret) 1643 goto error_undo; 1644 1645 spin_lock(&root->fs_info->free_chunk_lock); 1646 root->fs_info->free_chunk_space = device->total_bytes - 1647 device->bytes_used; 1648 spin_unlock(&root->fs_info->free_chunk_lock); 1649 1650 device->in_fs_metadata = 0; 1651 btrfs_scrub_cancel_dev(root->fs_info, device); 1652 1653 /* 1654 * the device list mutex makes sure that we don't change 1655 * the device list while someone else is writing out all 1656 * the device supers. Whoever is writing all supers, should 1657 * lock the device list mutex before getting the number of 1658 * devices in the super block (super_copy). Conversely, 1659 * whoever updates the number of devices in the super block 1660 * (super_copy) should hold the device list mutex. 1661 */ 1662 1663 cur_devices = device->fs_devices; 1664 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1665 list_del_rcu(&device->dev_list); 1666 1667 device->fs_devices->num_devices--; 1668 device->fs_devices->total_devices--; 1669 1670 if (device->missing) 1671 root->fs_info->fs_devices->missing_devices--; 1672 1673 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1674 struct btrfs_device, dev_list); 1675 if (device->bdev == root->fs_info->sb->s_bdev) 1676 root->fs_info->sb->s_bdev = next_device->bdev; 1677 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1678 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1679 1680 if (device->bdev) 1681 device->fs_devices->open_devices--; 1682 1683 call_rcu(&device->rcu, free_device); 1684 1685 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 1686 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices); 1687 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1688 1689 if (cur_devices->open_devices == 0) { 1690 struct btrfs_fs_devices *fs_devices; 1691 fs_devices = root->fs_info->fs_devices; 1692 while (fs_devices) { 1693 if (fs_devices->seed == cur_devices) { 1694 fs_devices->seed = cur_devices->seed; 1695 break; 1696 } 1697 fs_devices = fs_devices->seed; 1698 } 1699 cur_devices->seed = NULL; 1700 lock_chunks(root); 1701 __btrfs_close_devices(cur_devices); 1702 unlock_chunks(root); 1703 free_fs_devices(cur_devices); 1704 } 1705 1706 root->fs_info->num_tolerated_disk_barrier_failures = 1707 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 1708 1709 /* 1710 * at this point, the device is zero sized. We want to 1711 * remove it from the devices list and zero out the old super 1712 */ 1713 if (clear_super && disk_super) { 1714 u64 bytenr; 1715 int i; 1716 1717 /* make sure this device isn't detected as part of 1718 * the FS anymore 1719 */ 1720 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 1721 set_buffer_dirty(bh); 1722 sync_dirty_buffer(bh); 1723 1724 /* clear the mirror copies of super block on the disk 1725 * being removed, 0th copy is been taken care above and 1726 * the below would take of the rest 1727 */ 1728 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) { 1729 bytenr = btrfs_sb_offset(i); 1730 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 1731 i_size_read(bdev->bd_inode)) 1732 break; 1733 1734 brelse(bh); 1735 bh = __bread(bdev, bytenr / 4096, 1736 BTRFS_SUPER_INFO_SIZE); 1737 if (!bh) 1738 continue; 1739 1740 disk_super = (struct btrfs_super_block *)bh->b_data; 1741 1742 if (btrfs_super_bytenr(disk_super) != bytenr || 1743 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1744 continue; 1745 } 1746 memset(&disk_super->magic, 0, 1747 sizeof(disk_super->magic)); 1748 set_buffer_dirty(bh); 1749 sync_dirty_buffer(bh); 1750 } 1751 } 1752 1753 ret = 0; 1754 1755 if (bdev) { 1756 /* Notify udev that device has changed */ 1757 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1758 1759 /* Update ctime/mtime for device path for libblkid */ 1760 update_dev_time(device_path); 1761 } 1762 1763 error_brelse: 1764 brelse(bh); 1765 if (bdev) 1766 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 1767 out: 1768 mutex_unlock(&uuid_mutex); 1769 return ret; 1770 error_undo: 1771 if (device->writeable) { 1772 lock_chunks(root); 1773 list_add(&device->dev_alloc_list, 1774 &root->fs_info->fs_devices->alloc_list); 1775 unlock_chunks(root); 1776 root->fs_info->fs_devices->rw_devices++; 1777 } 1778 goto error_brelse; 1779 } 1780 1781 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info, 1782 struct btrfs_device *srcdev) 1783 { 1784 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 1785 1786 list_del_rcu(&srcdev->dev_list); 1787 list_del_rcu(&srcdev->dev_alloc_list); 1788 fs_info->fs_devices->num_devices--; 1789 if (srcdev->missing) { 1790 fs_info->fs_devices->missing_devices--; 1791 fs_info->fs_devices->rw_devices++; 1792 } 1793 if (srcdev->can_discard) 1794 fs_info->fs_devices->num_can_discard--; 1795 if (srcdev->bdev) { 1796 fs_info->fs_devices->open_devices--; 1797 1798 /* zero out the old super */ 1799 btrfs_scratch_superblock(srcdev); 1800 } 1801 1802 call_rcu(&srcdev->rcu, free_device); 1803 } 1804 1805 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 1806 struct btrfs_device *tgtdev) 1807 { 1808 struct btrfs_device *next_device; 1809 1810 WARN_ON(!tgtdev); 1811 mutex_lock(&fs_info->fs_devices->device_list_mutex); 1812 if (tgtdev->bdev) { 1813 btrfs_scratch_superblock(tgtdev); 1814 fs_info->fs_devices->open_devices--; 1815 } 1816 fs_info->fs_devices->num_devices--; 1817 if (tgtdev->can_discard) 1818 fs_info->fs_devices->num_can_discard++; 1819 1820 next_device = list_entry(fs_info->fs_devices->devices.next, 1821 struct btrfs_device, dev_list); 1822 if (tgtdev->bdev == fs_info->sb->s_bdev) 1823 fs_info->sb->s_bdev = next_device->bdev; 1824 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev) 1825 fs_info->fs_devices->latest_bdev = next_device->bdev; 1826 list_del_rcu(&tgtdev->dev_list); 1827 1828 call_rcu(&tgtdev->rcu, free_device); 1829 1830 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 1831 } 1832 1833 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 1834 struct btrfs_device **device) 1835 { 1836 int ret = 0; 1837 struct btrfs_super_block *disk_super; 1838 u64 devid; 1839 u8 *dev_uuid; 1840 struct block_device *bdev; 1841 struct buffer_head *bh; 1842 1843 *device = NULL; 1844 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 1845 root->fs_info->bdev_holder, 0, &bdev, &bh); 1846 if (ret) 1847 return ret; 1848 disk_super = (struct btrfs_super_block *)bh->b_data; 1849 devid = btrfs_stack_device_id(&disk_super->dev_item); 1850 dev_uuid = disk_super->dev_item.uuid; 1851 *device = btrfs_find_device(root->fs_info, devid, dev_uuid, 1852 disk_super->fsid); 1853 brelse(bh); 1854 if (!*device) 1855 ret = -ENOENT; 1856 blkdev_put(bdev, FMODE_READ); 1857 return ret; 1858 } 1859 1860 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, 1861 char *device_path, 1862 struct btrfs_device **device) 1863 { 1864 *device = NULL; 1865 if (strcmp(device_path, "missing") == 0) { 1866 struct list_head *devices; 1867 struct btrfs_device *tmp; 1868 1869 devices = &root->fs_info->fs_devices->devices; 1870 /* 1871 * It is safe to read the devices since the volume_mutex 1872 * is held by the caller. 1873 */ 1874 list_for_each_entry(tmp, devices, dev_list) { 1875 if (tmp->in_fs_metadata && !tmp->bdev) { 1876 *device = tmp; 1877 break; 1878 } 1879 } 1880 1881 if (!*device) { 1882 btrfs_err(root->fs_info, "no missing device found"); 1883 return -ENOENT; 1884 } 1885 1886 return 0; 1887 } else { 1888 return btrfs_find_device_by_path(root, device_path, device); 1889 } 1890 } 1891 1892 /* 1893 * does all the dirty work required for changing file system's UUID. 1894 */ 1895 static int btrfs_prepare_sprout(struct btrfs_root *root) 1896 { 1897 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 1898 struct btrfs_fs_devices *old_devices; 1899 struct btrfs_fs_devices *seed_devices; 1900 struct btrfs_super_block *disk_super = root->fs_info->super_copy; 1901 struct btrfs_device *device; 1902 u64 super_flags; 1903 1904 BUG_ON(!mutex_is_locked(&uuid_mutex)); 1905 if (!fs_devices->seeding) 1906 return -EINVAL; 1907 1908 seed_devices = __alloc_fs_devices(); 1909 if (IS_ERR(seed_devices)) 1910 return PTR_ERR(seed_devices); 1911 1912 old_devices = clone_fs_devices(fs_devices); 1913 if (IS_ERR(old_devices)) { 1914 kfree(seed_devices); 1915 return PTR_ERR(old_devices); 1916 } 1917 1918 list_add(&old_devices->list, &fs_uuids); 1919 1920 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 1921 seed_devices->opened = 1; 1922 INIT_LIST_HEAD(&seed_devices->devices); 1923 INIT_LIST_HEAD(&seed_devices->alloc_list); 1924 mutex_init(&seed_devices->device_list_mutex); 1925 1926 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1927 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 1928 synchronize_rcu); 1929 1930 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 1931 list_for_each_entry(device, &seed_devices->devices, dev_list) { 1932 device->fs_devices = seed_devices; 1933 } 1934 1935 fs_devices->seeding = 0; 1936 fs_devices->num_devices = 0; 1937 fs_devices->open_devices = 0; 1938 fs_devices->seed = seed_devices; 1939 1940 generate_random_uuid(fs_devices->fsid); 1941 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1942 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1943 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1944 1945 super_flags = btrfs_super_flags(disk_super) & 1946 ~BTRFS_SUPER_FLAG_SEEDING; 1947 btrfs_set_super_flags(disk_super, super_flags); 1948 1949 return 0; 1950 } 1951 1952 /* 1953 * strore the expected generation for seed devices in device items. 1954 */ 1955 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 1956 struct btrfs_root *root) 1957 { 1958 struct btrfs_path *path; 1959 struct extent_buffer *leaf; 1960 struct btrfs_dev_item *dev_item; 1961 struct btrfs_device *device; 1962 struct btrfs_key key; 1963 u8 fs_uuid[BTRFS_UUID_SIZE]; 1964 u8 dev_uuid[BTRFS_UUID_SIZE]; 1965 u64 devid; 1966 int ret; 1967 1968 path = btrfs_alloc_path(); 1969 if (!path) 1970 return -ENOMEM; 1971 1972 root = root->fs_info->chunk_root; 1973 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1974 key.offset = 0; 1975 key.type = BTRFS_DEV_ITEM_KEY; 1976 1977 while (1) { 1978 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1979 if (ret < 0) 1980 goto error; 1981 1982 leaf = path->nodes[0]; 1983 next_slot: 1984 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1985 ret = btrfs_next_leaf(root, path); 1986 if (ret > 0) 1987 break; 1988 if (ret < 0) 1989 goto error; 1990 leaf = path->nodes[0]; 1991 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1992 btrfs_release_path(path); 1993 continue; 1994 } 1995 1996 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1997 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 1998 key.type != BTRFS_DEV_ITEM_KEY) 1999 break; 2000 2001 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2002 struct btrfs_dev_item); 2003 devid = btrfs_device_id(leaf, dev_item); 2004 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2005 BTRFS_UUID_SIZE); 2006 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2007 BTRFS_UUID_SIZE); 2008 device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2009 fs_uuid); 2010 BUG_ON(!device); /* Logic error */ 2011 2012 if (device->fs_devices->seeding) { 2013 btrfs_set_device_generation(leaf, dev_item, 2014 device->generation); 2015 btrfs_mark_buffer_dirty(leaf); 2016 } 2017 2018 path->slots[0]++; 2019 goto next_slot; 2020 } 2021 ret = 0; 2022 error: 2023 btrfs_free_path(path); 2024 return ret; 2025 } 2026 2027 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 2028 { 2029 struct request_queue *q; 2030 struct btrfs_trans_handle *trans; 2031 struct btrfs_device *device; 2032 struct block_device *bdev; 2033 struct list_head *devices; 2034 struct super_block *sb = root->fs_info->sb; 2035 struct rcu_string *name; 2036 u64 total_bytes; 2037 int seeding_dev = 0; 2038 int ret = 0; 2039 2040 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 2041 return -EROFS; 2042 2043 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2044 root->fs_info->bdev_holder); 2045 if (IS_ERR(bdev)) 2046 return PTR_ERR(bdev); 2047 2048 if (root->fs_info->fs_devices->seeding) { 2049 seeding_dev = 1; 2050 down_write(&sb->s_umount); 2051 mutex_lock(&uuid_mutex); 2052 } 2053 2054 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2055 2056 devices = &root->fs_info->fs_devices->devices; 2057 2058 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2059 list_for_each_entry(device, devices, dev_list) { 2060 if (device->bdev == bdev) { 2061 ret = -EEXIST; 2062 mutex_unlock( 2063 &root->fs_info->fs_devices->device_list_mutex); 2064 goto error; 2065 } 2066 } 2067 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2068 2069 device = btrfs_alloc_device(root->fs_info, NULL, NULL); 2070 if (IS_ERR(device)) { 2071 /* we can safely leave the fs_devices entry around */ 2072 ret = PTR_ERR(device); 2073 goto error; 2074 } 2075 2076 name = rcu_string_strdup(device_path, GFP_NOFS); 2077 if (!name) { 2078 kfree(device); 2079 ret = -ENOMEM; 2080 goto error; 2081 } 2082 rcu_assign_pointer(device->name, name); 2083 2084 trans = btrfs_start_transaction(root, 0); 2085 if (IS_ERR(trans)) { 2086 rcu_string_free(device->name); 2087 kfree(device); 2088 ret = PTR_ERR(trans); 2089 goto error; 2090 } 2091 2092 lock_chunks(root); 2093 2094 q = bdev_get_queue(bdev); 2095 if (blk_queue_discard(q)) 2096 device->can_discard = 1; 2097 device->writeable = 1; 2098 device->generation = trans->transid; 2099 device->io_width = root->sectorsize; 2100 device->io_align = root->sectorsize; 2101 device->sector_size = root->sectorsize; 2102 device->total_bytes = i_size_read(bdev->bd_inode); 2103 device->disk_total_bytes = device->total_bytes; 2104 device->dev_root = root->fs_info->dev_root; 2105 device->bdev = bdev; 2106 device->in_fs_metadata = 1; 2107 device->is_tgtdev_for_dev_replace = 0; 2108 device->mode = FMODE_EXCL; 2109 device->dev_stats_valid = 1; 2110 set_blocksize(device->bdev, 4096); 2111 2112 if (seeding_dev) { 2113 sb->s_flags &= ~MS_RDONLY; 2114 ret = btrfs_prepare_sprout(root); 2115 BUG_ON(ret); /* -ENOMEM */ 2116 } 2117 2118 device->fs_devices = root->fs_info->fs_devices; 2119 2120 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2121 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); 2122 list_add(&device->dev_alloc_list, 2123 &root->fs_info->fs_devices->alloc_list); 2124 root->fs_info->fs_devices->num_devices++; 2125 root->fs_info->fs_devices->open_devices++; 2126 root->fs_info->fs_devices->rw_devices++; 2127 root->fs_info->fs_devices->total_devices++; 2128 if (device->can_discard) 2129 root->fs_info->fs_devices->num_can_discard++; 2130 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 2131 2132 spin_lock(&root->fs_info->free_chunk_lock); 2133 root->fs_info->free_chunk_space += device->total_bytes; 2134 spin_unlock(&root->fs_info->free_chunk_lock); 2135 2136 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2137 root->fs_info->fs_devices->rotating = 1; 2138 2139 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy); 2140 btrfs_set_super_total_bytes(root->fs_info->super_copy, 2141 total_bytes + device->total_bytes); 2142 2143 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy); 2144 btrfs_set_super_num_devices(root->fs_info->super_copy, 2145 total_bytes + 1); 2146 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2147 2148 if (seeding_dev) { 2149 ret = init_first_rw_device(trans, root, device); 2150 if (ret) { 2151 btrfs_abort_transaction(trans, root, ret); 2152 goto error_trans; 2153 } 2154 ret = btrfs_finish_sprout(trans, root); 2155 if (ret) { 2156 btrfs_abort_transaction(trans, root, ret); 2157 goto error_trans; 2158 } 2159 } else { 2160 ret = btrfs_add_device(trans, root, device); 2161 if (ret) { 2162 btrfs_abort_transaction(trans, root, ret); 2163 goto error_trans; 2164 } 2165 } 2166 2167 /* 2168 * we've got more storage, clear any full flags on the space 2169 * infos 2170 */ 2171 btrfs_clear_space_info_full(root->fs_info); 2172 2173 unlock_chunks(root); 2174 root->fs_info->num_tolerated_disk_barrier_failures = 2175 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 2176 ret = btrfs_commit_transaction(trans, root); 2177 2178 if (seeding_dev) { 2179 mutex_unlock(&uuid_mutex); 2180 up_write(&sb->s_umount); 2181 2182 if (ret) /* transaction commit */ 2183 return ret; 2184 2185 ret = btrfs_relocate_sys_chunks(root); 2186 if (ret < 0) 2187 btrfs_error(root->fs_info, ret, 2188 "Failed to relocate sys chunks after " 2189 "device initialization. This can be fixed " 2190 "using the \"btrfs balance\" command."); 2191 trans = btrfs_attach_transaction(root); 2192 if (IS_ERR(trans)) { 2193 if (PTR_ERR(trans) == -ENOENT) 2194 return 0; 2195 return PTR_ERR(trans); 2196 } 2197 ret = btrfs_commit_transaction(trans, root); 2198 } 2199 2200 /* Update ctime/mtime for libblkid */ 2201 update_dev_time(device_path); 2202 return ret; 2203 2204 error_trans: 2205 unlock_chunks(root); 2206 btrfs_end_transaction(trans, root); 2207 rcu_string_free(device->name); 2208 kfree(device); 2209 error: 2210 blkdev_put(bdev, FMODE_EXCL); 2211 if (seeding_dev) { 2212 mutex_unlock(&uuid_mutex); 2213 up_write(&sb->s_umount); 2214 } 2215 return ret; 2216 } 2217 2218 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path, 2219 struct btrfs_device **device_out) 2220 { 2221 struct request_queue *q; 2222 struct btrfs_device *device; 2223 struct block_device *bdev; 2224 struct btrfs_fs_info *fs_info = root->fs_info; 2225 struct list_head *devices; 2226 struct rcu_string *name; 2227 u64 devid = BTRFS_DEV_REPLACE_DEVID; 2228 int ret = 0; 2229 2230 *device_out = NULL; 2231 if (fs_info->fs_devices->seeding) 2232 return -EINVAL; 2233 2234 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2235 fs_info->bdev_holder); 2236 if (IS_ERR(bdev)) 2237 return PTR_ERR(bdev); 2238 2239 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2240 2241 devices = &fs_info->fs_devices->devices; 2242 list_for_each_entry(device, devices, dev_list) { 2243 if (device->bdev == bdev) { 2244 ret = -EEXIST; 2245 goto error; 2246 } 2247 } 2248 2249 device = btrfs_alloc_device(NULL, &devid, NULL); 2250 if (IS_ERR(device)) { 2251 ret = PTR_ERR(device); 2252 goto error; 2253 } 2254 2255 name = rcu_string_strdup(device_path, GFP_NOFS); 2256 if (!name) { 2257 kfree(device); 2258 ret = -ENOMEM; 2259 goto error; 2260 } 2261 rcu_assign_pointer(device->name, name); 2262 2263 q = bdev_get_queue(bdev); 2264 if (blk_queue_discard(q)) 2265 device->can_discard = 1; 2266 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2267 device->writeable = 1; 2268 device->generation = 0; 2269 device->io_width = root->sectorsize; 2270 device->io_align = root->sectorsize; 2271 device->sector_size = root->sectorsize; 2272 device->total_bytes = i_size_read(bdev->bd_inode); 2273 device->disk_total_bytes = device->total_bytes; 2274 device->dev_root = fs_info->dev_root; 2275 device->bdev = bdev; 2276 device->in_fs_metadata = 1; 2277 device->is_tgtdev_for_dev_replace = 1; 2278 device->mode = FMODE_EXCL; 2279 device->dev_stats_valid = 1; 2280 set_blocksize(device->bdev, 4096); 2281 device->fs_devices = fs_info->fs_devices; 2282 list_add(&device->dev_list, &fs_info->fs_devices->devices); 2283 fs_info->fs_devices->num_devices++; 2284 fs_info->fs_devices->open_devices++; 2285 if (device->can_discard) 2286 fs_info->fs_devices->num_can_discard++; 2287 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2288 2289 *device_out = device; 2290 return ret; 2291 2292 error: 2293 blkdev_put(bdev, FMODE_EXCL); 2294 return ret; 2295 } 2296 2297 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, 2298 struct btrfs_device *tgtdev) 2299 { 2300 WARN_ON(fs_info->fs_devices->rw_devices == 0); 2301 tgtdev->io_width = fs_info->dev_root->sectorsize; 2302 tgtdev->io_align = fs_info->dev_root->sectorsize; 2303 tgtdev->sector_size = fs_info->dev_root->sectorsize; 2304 tgtdev->dev_root = fs_info->dev_root; 2305 tgtdev->in_fs_metadata = 1; 2306 } 2307 2308 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2309 struct btrfs_device *device) 2310 { 2311 int ret; 2312 struct btrfs_path *path; 2313 struct btrfs_root *root; 2314 struct btrfs_dev_item *dev_item; 2315 struct extent_buffer *leaf; 2316 struct btrfs_key key; 2317 2318 root = device->dev_root->fs_info->chunk_root; 2319 2320 path = btrfs_alloc_path(); 2321 if (!path) 2322 return -ENOMEM; 2323 2324 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2325 key.type = BTRFS_DEV_ITEM_KEY; 2326 key.offset = device->devid; 2327 2328 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2329 if (ret < 0) 2330 goto out; 2331 2332 if (ret > 0) { 2333 ret = -ENOENT; 2334 goto out; 2335 } 2336 2337 leaf = path->nodes[0]; 2338 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2339 2340 btrfs_set_device_id(leaf, dev_item, device->devid); 2341 btrfs_set_device_type(leaf, dev_item, device->type); 2342 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2343 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2344 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2345 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); 2346 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 2347 btrfs_mark_buffer_dirty(leaf); 2348 2349 out: 2350 btrfs_free_path(path); 2351 return ret; 2352 } 2353 2354 static int __btrfs_grow_device(struct btrfs_trans_handle *trans, 2355 struct btrfs_device *device, u64 new_size) 2356 { 2357 struct btrfs_super_block *super_copy = 2358 device->dev_root->fs_info->super_copy; 2359 u64 old_total = btrfs_super_total_bytes(super_copy); 2360 u64 diff = new_size - device->total_bytes; 2361 2362 if (!device->writeable) 2363 return -EACCES; 2364 if (new_size <= device->total_bytes || 2365 device->is_tgtdev_for_dev_replace) 2366 return -EINVAL; 2367 2368 btrfs_set_super_total_bytes(super_copy, old_total + diff); 2369 device->fs_devices->total_rw_bytes += diff; 2370 2371 device->total_bytes = new_size; 2372 device->disk_total_bytes = new_size; 2373 btrfs_clear_space_info_full(device->dev_root->fs_info); 2374 2375 return btrfs_update_device(trans, device); 2376 } 2377 2378 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2379 struct btrfs_device *device, u64 new_size) 2380 { 2381 int ret; 2382 lock_chunks(device->dev_root); 2383 ret = __btrfs_grow_device(trans, device, new_size); 2384 unlock_chunks(device->dev_root); 2385 return ret; 2386 } 2387 2388 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 2389 struct btrfs_root *root, 2390 u64 chunk_tree, u64 chunk_objectid, 2391 u64 chunk_offset) 2392 { 2393 int ret; 2394 struct btrfs_path *path; 2395 struct btrfs_key key; 2396 2397 root = root->fs_info->chunk_root; 2398 path = btrfs_alloc_path(); 2399 if (!path) 2400 return -ENOMEM; 2401 2402 key.objectid = chunk_objectid; 2403 key.offset = chunk_offset; 2404 key.type = BTRFS_CHUNK_ITEM_KEY; 2405 2406 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2407 if (ret < 0) 2408 goto out; 2409 else if (ret > 0) { /* Logic error or corruption */ 2410 btrfs_error(root->fs_info, -ENOENT, 2411 "Failed lookup while freeing chunk."); 2412 ret = -ENOENT; 2413 goto out; 2414 } 2415 2416 ret = btrfs_del_item(trans, root, path); 2417 if (ret < 0) 2418 btrfs_error(root->fs_info, ret, 2419 "Failed to delete chunk item."); 2420 out: 2421 btrfs_free_path(path); 2422 return ret; 2423 } 2424 2425 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 2426 chunk_offset) 2427 { 2428 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 2429 struct btrfs_disk_key *disk_key; 2430 struct btrfs_chunk *chunk; 2431 u8 *ptr; 2432 int ret = 0; 2433 u32 num_stripes; 2434 u32 array_size; 2435 u32 len = 0; 2436 u32 cur; 2437 struct btrfs_key key; 2438 2439 array_size = btrfs_super_sys_array_size(super_copy); 2440 2441 ptr = super_copy->sys_chunk_array; 2442 cur = 0; 2443 2444 while (cur < array_size) { 2445 disk_key = (struct btrfs_disk_key *)ptr; 2446 btrfs_disk_key_to_cpu(&key, disk_key); 2447 2448 len = sizeof(*disk_key); 2449 2450 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2451 chunk = (struct btrfs_chunk *)(ptr + len); 2452 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2453 len += btrfs_chunk_item_size(num_stripes); 2454 } else { 2455 ret = -EIO; 2456 break; 2457 } 2458 if (key.objectid == chunk_objectid && 2459 key.offset == chunk_offset) { 2460 memmove(ptr, ptr + len, array_size - (cur + len)); 2461 array_size -= len; 2462 btrfs_set_super_sys_array_size(super_copy, array_size); 2463 } else { 2464 ptr += len; 2465 cur += len; 2466 } 2467 } 2468 return ret; 2469 } 2470 2471 static int btrfs_relocate_chunk(struct btrfs_root *root, 2472 u64 chunk_tree, u64 chunk_objectid, 2473 u64 chunk_offset) 2474 { 2475 struct extent_map_tree *em_tree; 2476 struct btrfs_root *extent_root; 2477 struct btrfs_trans_handle *trans; 2478 struct extent_map *em; 2479 struct map_lookup *map; 2480 int ret; 2481 int i; 2482 2483 root = root->fs_info->chunk_root; 2484 extent_root = root->fs_info->extent_root; 2485 em_tree = &root->fs_info->mapping_tree.map_tree; 2486 2487 ret = btrfs_can_relocate(extent_root, chunk_offset); 2488 if (ret) 2489 return -ENOSPC; 2490 2491 /* step one, relocate all the extents inside this chunk */ 2492 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 2493 if (ret) 2494 return ret; 2495 2496 trans = btrfs_start_transaction(root, 0); 2497 if (IS_ERR(trans)) { 2498 ret = PTR_ERR(trans); 2499 btrfs_std_error(root->fs_info, ret); 2500 return ret; 2501 } 2502 2503 lock_chunks(root); 2504 2505 /* 2506 * step two, delete the device extents and the 2507 * chunk tree entries 2508 */ 2509 read_lock(&em_tree->lock); 2510 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 2511 read_unlock(&em_tree->lock); 2512 2513 BUG_ON(!em || em->start > chunk_offset || 2514 em->start + em->len < chunk_offset); 2515 map = (struct map_lookup *)em->bdev; 2516 2517 for (i = 0; i < map->num_stripes; i++) { 2518 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, 2519 map->stripes[i].physical); 2520 BUG_ON(ret); 2521 2522 if (map->stripes[i].dev) { 2523 ret = btrfs_update_device(trans, map->stripes[i].dev); 2524 BUG_ON(ret); 2525 } 2526 } 2527 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, 2528 chunk_offset); 2529 2530 BUG_ON(ret); 2531 2532 trace_btrfs_chunk_free(root, map, chunk_offset, em->len); 2533 2534 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2535 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 2536 BUG_ON(ret); 2537 } 2538 2539 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 2540 BUG_ON(ret); 2541 2542 write_lock(&em_tree->lock); 2543 remove_extent_mapping(em_tree, em); 2544 write_unlock(&em_tree->lock); 2545 2546 /* once for the tree */ 2547 free_extent_map(em); 2548 /* once for us */ 2549 free_extent_map(em); 2550 2551 unlock_chunks(root); 2552 btrfs_end_transaction(trans, root); 2553 return 0; 2554 } 2555 2556 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 2557 { 2558 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 2559 struct btrfs_path *path; 2560 struct extent_buffer *leaf; 2561 struct btrfs_chunk *chunk; 2562 struct btrfs_key key; 2563 struct btrfs_key found_key; 2564 u64 chunk_tree = chunk_root->root_key.objectid; 2565 u64 chunk_type; 2566 bool retried = false; 2567 int failed = 0; 2568 int ret; 2569 2570 path = btrfs_alloc_path(); 2571 if (!path) 2572 return -ENOMEM; 2573 2574 again: 2575 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2576 key.offset = (u64)-1; 2577 key.type = BTRFS_CHUNK_ITEM_KEY; 2578 2579 while (1) { 2580 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2581 if (ret < 0) 2582 goto error; 2583 BUG_ON(ret == 0); /* Corruption */ 2584 2585 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2586 key.type); 2587 if (ret < 0) 2588 goto error; 2589 if (ret > 0) 2590 break; 2591 2592 leaf = path->nodes[0]; 2593 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2594 2595 chunk = btrfs_item_ptr(leaf, path->slots[0], 2596 struct btrfs_chunk); 2597 chunk_type = btrfs_chunk_type(leaf, chunk); 2598 btrfs_release_path(path); 2599 2600 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 2601 ret = btrfs_relocate_chunk(chunk_root, chunk_tree, 2602 found_key.objectid, 2603 found_key.offset); 2604 if (ret == -ENOSPC) 2605 failed++; 2606 else if (ret) 2607 BUG(); 2608 } 2609 2610 if (found_key.offset == 0) 2611 break; 2612 key.offset = found_key.offset - 1; 2613 } 2614 ret = 0; 2615 if (failed && !retried) { 2616 failed = 0; 2617 retried = true; 2618 goto again; 2619 } else if (WARN_ON(failed && retried)) { 2620 ret = -ENOSPC; 2621 } 2622 error: 2623 btrfs_free_path(path); 2624 return ret; 2625 } 2626 2627 static int insert_balance_item(struct btrfs_root *root, 2628 struct btrfs_balance_control *bctl) 2629 { 2630 struct btrfs_trans_handle *trans; 2631 struct btrfs_balance_item *item; 2632 struct btrfs_disk_balance_args disk_bargs; 2633 struct btrfs_path *path; 2634 struct extent_buffer *leaf; 2635 struct btrfs_key key; 2636 int ret, err; 2637 2638 path = btrfs_alloc_path(); 2639 if (!path) 2640 return -ENOMEM; 2641 2642 trans = btrfs_start_transaction(root, 0); 2643 if (IS_ERR(trans)) { 2644 btrfs_free_path(path); 2645 return PTR_ERR(trans); 2646 } 2647 2648 key.objectid = BTRFS_BALANCE_OBJECTID; 2649 key.type = BTRFS_BALANCE_ITEM_KEY; 2650 key.offset = 0; 2651 2652 ret = btrfs_insert_empty_item(trans, root, path, &key, 2653 sizeof(*item)); 2654 if (ret) 2655 goto out; 2656 2657 leaf = path->nodes[0]; 2658 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 2659 2660 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 2661 2662 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 2663 btrfs_set_balance_data(leaf, item, &disk_bargs); 2664 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 2665 btrfs_set_balance_meta(leaf, item, &disk_bargs); 2666 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 2667 btrfs_set_balance_sys(leaf, item, &disk_bargs); 2668 2669 btrfs_set_balance_flags(leaf, item, bctl->flags); 2670 2671 btrfs_mark_buffer_dirty(leaf); 2672 out: 2673 btrfs_free_path(path); 2674 err = btrfs_commit_transaction(trans, root); 2675 if (err && !ret) 2676 ret = err; 2677 return ret; 2678 } 2679 2680 static int del_balance_item(struct btrfs_root *root) 2681 { 2682 struct btrfs_trans_handle *trans; 2683 struct btrfs_path *path; 2684 struct btrfs_key key; 2685 int ret, err; 2686 2687 path = btrfs_alloc_path(); 2688 if (!path) 2689 return -ENOMEM; 2690 2691 trans = btrfs_start_transaction(root, 0); 2692 if (IS_ERR(trans)) { 2693 btrfs_free_path(path); 2694 return PTR_ERR(trans); 2695 } 2696 2697 key.objectid = BTRFS_BALANCE_OBJECTID; 2698 key.type = BTRFS_BALANCE_ITEM_KEY; 2699 key.offset = 0; 2700 2701 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2702 if (ret < 0) 2703 goto out; 2704 if (ret > 0) { 2705 ret = -ENOENT; 2706 goto out; 2707 } 2708 2709 ret = btrfs_del_item(trans, root, path); 2710 out: 2711 btrfs_free_path(path); 2712 err = btrfs_commit_transaction(trans, root); 2713 if (err && !ret) 2714 ret = err; 2715 return ret; 2716 } 2717 2718 /* 2719 * This is a heuristic used to reduce the number of chunks balanced on 2720 * resume after balance was interrupted. 2721 */ 2722 static void update_balance_args(struct btrfs_balance_control *bctl) 2723 { 2724 /* 2725 * Turn on soft mode for chunk types that were being converted. 2726 */ 2727 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 2728 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 2729 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 2730 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 2731 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 2732 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 2733 2734 /* 2735 * Turn on usage filter if is not already used. The idea is 2736 * that chunks that we have already balanced should be 2737 * reasonably full. Don't do it for chunks that are being 2738 * converted - that will keep us from relocating unconverted 2739 * (albeit full) chunks. 2740 */ 2741 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 2742 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 2743 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 2744 bctl->data.usage = 90; 2745 } 2746 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 2747 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 2748 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 2749 bctl->sys.usage = 90; 2750 } 2751 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 2752 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 2753 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 2754 bctl->meta.usage = 90; 2755 } 2756 } 2757 2758 /* 2759 * Should be called with both balance and volume mutexes held to 2760 * serialize other volume operations (add_dev/rm_dev/resize) with 2761 * restriper. Same goes for unset_balance_control. 2762 */ 2763 static void set_balance_control(struct btrfs_balance_control *bctl) 2764 { 2765 struct btrfs_fs_info *fs_info = bctl->fs_info; 2766 2767 BUG_ON(fs_info->balance_ctl); 2768 2769 spin_lock(&fs_info->balance_lock); 2770 fs_info->balance_ctl = bctl; 2771 spin_unlock(&fs_info->balance_lock); 2772 } 2773 2774 static void unset_balance_control(struct btrfs_fs_info *fs_info) 2775 { 2776 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 2777 2778 BUG_ON(!fs_info->balance_ctl); 2779 2780 spin_lock(&fs_info->balance_lock); 2781 fs_info->balance_ctl = NULL; 2782 spin_unlock(&fs_info->balance_lock); 2783 2784 kfree(bctl); 2785 } 2786 2787 /* 2788 * Balance filters. Return 1 if chunk should be filtered out 2789 * (should not be balanced). 2790 */ 2791 static int chunk_profiles_filter(u64 chunk_type, 2792 struct btrfs_balance_args *bargs) 2793 { 2794 chunk_type = chunk_to_extended(chunk_type) & 2795 BTRFS_EXTENDED_PROFILE_MASK; 2796 2797 if (bargs->profiles & chunk_type) 2798 return 0; 2799 2800 return 1; 2801 } 2802 2803 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 2804 struct btrfs_balance_args *bargs) 2805 { 2806 struct btrfs_block_group_cache *cache; 2807 u64 chunk_used, user_thresh; 2808 int ret = 1; 2809 2810 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2811 chunk_used = btrfs_block_group_used(&cache->item); 2812 2813 if (bargs->usage == 0) 2814 user_thresh = 1; 2815 else if (bargs->usage > 100) 2816 user_thresh = cache->key.offset; 2817 else 2818 user_thresh = div_factor_fine(cache->key.offset, 2819 bargs->usage); 2820 2821 if (chunk_used < user_thresh) 2822 ret = 0; 2823 2824 btrfs_put_block_group(cache); 2825 return ret; 2826 } 2827 2828 static int chunk_devid_filter(struct extent_buffer *leaf, 2829 struct btrfs_chunk *chunk, 2830 struct btrfs_balance_args *bargs) 2831 { 2832 struct btrfs_stripe *stripe; 2833 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 2834 int i; 2835 2836 for (i = 0; i < num_stripes; i++) { 2837 stripe = btrfs_stripe_nr(chunk, i); 2838 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 2839 return 0; 2840 } 2841 2842 return 1; 2843 } 2844 2845 /* [pstart, pend) */ 2846 static int chunk_drange_filter(struct extent_buffer *leaf, 2847 struct btrfs_chunk *chunk, 2848 u64 chunk_offset, 2849 struct btrfs_balance_args *bargs) 2850 { 2851 struct btrfs_stripe *stripe; 2852 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 2853 u64 stripe_offset; 2854 u64 stripe_length; 2855 int factor; 2856 int i; 2857 2858 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 2859 return 0; 2860 2861 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | 2862 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { 2863 factor = num_stripes / 2; 2864 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { 2865 factor = num_stripes - 1; 2866 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { 2867 factor = num_stripes - 2; 2868 } else { 2869 factor = num_stripes; 2870 } 2871 2872 for (i = 0; i < num_stripes; i++) { 2873 stripe = btrfs_stripe_nr(chunk, i); 2874 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 2875 continue; 2876 2877 stripe_offset = btrfs_stripe_offset(leaf, stripe); 2878 stripe_length = btrfs_chunk_length(leaf, chunk); 2879 do_div(stripe_length, factor); 2880 2881 if (stripe_offset < bargs->pend && 2882 stripe_offset + stripe_length > bargs->pstart) 2883 return 0; 2884 } 2885 2886 return 1; 2887 } 2888 2889 /* [vstart, vend) */ 2890 static int chunk_vrange_filter(struct extent_buffer *leaf, 2891 struct btrfs_chunk *chunk, 2892 u64 chunk_offset, 2893 struct btrfs_balance_args *bargs) 2894 { 2895 if (chunk_offset < bargs->vend && 2896 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 2897 /* at least part of the chunk is inside this vrange */ 2898 return 0; 2899 2900 return 1; 2901 } 2902 2903 static int chunk_soft_convert_filter(u64 chunk_type, 2904 struct btrfs_balance_args *bargs) 2905 { 2906 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 2907 return 0; 2908 2909 chunk_type = chunk_to_extended(chunk_type) & 2910 BTRFS_EXTENDED_PROFILE_MASK; 2911 2912 if (bargs->target == chunk_type) 2913 return 1; 2914 2915 return 0; 2916 } 2917 2918 static int should_balance_chunk(struct btrfs_root *root, 2919 struct extent_buffer *leaf, 2920 struct btrfs_chunk *chunk, u64 chunk_offset) 2921 { 2922 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 2923 struct btrfs_balance_args *bargs = NULL; 2924 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 2925 2926 /* type filter */ 2927 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 2928 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 2929 return 0; 2930 } 2931 2932 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 2933 bargs = &bctl->data; 2934 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 2935 bargs = &bctl->sys; 2936 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 2937 bargs = &bctl->meta; 2938 2939 /* profiles filter */ 2940 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 2941 chunk_profiles_filter(chunk_type, bargs)) { 2942 return 0; 2943 } 2944 2945 /* usage filter */ 2946 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 2947 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) { 2948 return 0; 2949 } 2950 2951 /* devid filter */ 2952 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 2953 chunk_devid_filter(leaf, chunk, bargs)) { 2954 return 0; 2955 } 2956 2957 /* drange filter, makes sense only with devid filter */ 2958 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 2959 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) { 2960 return 0; 2961 } 2962 2963 /* vrange filter */ 2964 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 2965 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 2966 return 0; 2967 } 2968 2969 /* soft profile changing mode */ 2970 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 2971 chunk_soft_convert_filter(chunk_type, bargs)) { 2972 return 0; 2973 } 2974 2975 /* 2976 * limited by count, must be the last filter 2977 */ 2978 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 2979 if (bargs->limit == 0) 2980 return 0; 2981 else 2982 bargs->limit--; 2983 } 2984 2985 return 1; 2986 } 2987 2988 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 2989 { 2990 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 2991 struct btrfs_root *chunk_root = fs_info->chunk_root; 2992 struct btrfs_root *dev_root = fs_info->dev_root; 2993 struct list_head *devices; 2994 struct btrfs_device *device; 2995 u64 old_size; 2996 u64 size_to_free; 2997 struct btrfs_chunk *chunk; 2998 struct btrfs_path *path; 2999 struct btrfs_key key; 3000 struct btrfs_key found_key; 3001 struct btrfs_trans_handle *trans; 3002 struct extent_buffer *leaf; 3003 int slot; 3004 int ret; 3005 int enospc_errors = 0; 3006 bool counting = true; 3007 u64 limit_data = bctl->data.limit; 3008 u64 limit_meta = bctl->meta.limit; 3009 u64 limit_sys = bctl->sys.limit; 3010 3011 /* step one make some room on all the devices */ 3012 devices = &fs_info->fs_devices->devices; 3013 list_for_each_entry(device, devices, dev_list) { 3014 old_size = device->total_bytes; 3015 size_to_free = div_factor(old_size, 1); 3016 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 3017 if (!device->writeable || 3018 device->total_bytes - device->bytes_used > size_to_free || 3019 device->is_tgtdev_for_dev_replace) 3020 continue; 3021 3022 ret = btrfs_shrink_device(device, old_size - size_to_free); 3023 if (ret == -ENOSPC) 3024 break; 3025 BUG_ON(ret); 3026 3027 trans = btrfs_start_transaction(dev_root, 0); 3028 BUG_ON(IS_ERR(trans)); 3029 3030 ret = btrfs_grow_device(trans, device, old_size); 3031 BUG_ON(ret); 3032 3033 btrfs_end_transaction(trans, dev_root); 3034 } 3035 3036 /* step two, relocate all the chunks */ 3037 path = btrfs_alloc_path(); 3038 if (!path) { 3039 ret = -ENOMEM; 3040 goto error; 3041 } 3042 3043 /* zero out stat counters */ 3044 spin_lock(&fs_info->balance_lock); 3045 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3046 spin_unlock(&fs_info->balance_lock); 3047 again: 3048 if (!counting) { 3049 bctl->data.limit = limit_data; 3050 bctl->meta.limit = limit_meta; 3051 bctl->sys.limit = limit_sys; 3052 } 3053 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3054 key.offset = (u64)-1; 3055 key.type = BTRFS_CHUNK_ITEM_KEY; 3056 3057 while (1) { 3058 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3059 atomic_read(&fs_info->balance_cancel_req)) { 3060 ret = -ECANCELED; 3061 goto error; 3062 } 3063 3064 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3065 if (ret < 0) 3066 goto error; 3067 3068 /* 3069 * this shouldn't happen, it means the last relocate 3070 * failed 3071 */ 3072 if (ret == 0) 3073 BUG(); /* FIXME break ? */ 3074 3075 ret = btrfs_previous_item(chunk_root, path, 0, 3076 BTRFS_CHUNK_ITEM_KEY); 3077 if (ret) { 3078 ret = 0; 3079 break; 3080 } 3081 3082 leaf = path->nodes[0]; 3083 slot = path->slots[0]; 3084 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3085 3086 if (found_key.objectid != key.objectid) 3087 break; 3088 3089 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3090 3091 if (!counting) { 3092 spin_lock(&fs_info->balance_lock); 3093 bctl->stat.considered++; 3094 spin_unlock(&fs_info->balance_lock); 3095 } 3096 3097 ret = should_balance_chunk(chunk_root, leaf, chunk, 3098 found_key.offset); 3099 btrfs_release_path(path); 3100 if (!ret) 3101 goto loop; 3102 3103 if (counting) { 3104 spin_lock(&fs_info->balance_lock); 3105 bctl->stat.expected++; 3106 spin_unlock(&fs_info->balance_lock); 3107 goto loop; 3108 } 3109 3110 ret = btrfs_relocate_chunk(chunk_root, 3111 chunk_root->root_key.objectid, 3112 found_key.objectid, 3113 found_key.offset); 3114 if (ret && ret != -ENOSPC) 3115 goto error; 3116 if (ret == -ENOSPC) { 3117 enospc_errors++; 3118 } else { 3119 spin_lock(&fs_info->balance_lock); 3120 bctl->stat.completed++; 3121 spin_unlock(&fs_info->balance_lock); 3122 } 3123 loop: 3124 if (found_key.offset == 0) 3125 break; 3126 key.offset = found_key.offset - 1; 3127 } 3128 3129 if (counting) { 3130 btrfs_release_path(path); 3131 counting = false; 3132 goto again; 3133 } 3134 error: 3135 btrfs_free_path(path); 3136 if (enospc_errors) { 3137 btrfs_info(fs_info, "%d enospc errors during balance", 3138 enospc_errors); 3139 if (!ret) 3140 ret = -ENOSPC; 3141 } 3142 3143 return ret; 3144 } 3145 3146 /** 3147 * alloc_profile_is_valid - see if a given profile is valid and reduced 3148 * @flags: profile to validate 3149 * @extended: if true @flags is treated as an extended profile 3150 */ 3151 static int alloc_profile_is_valid(u64 flags, int extended) 3152 { 3153 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3154 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3155 3156 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3157 3158 /* 1) check that all other bits are zeroed */ 3159 if (flags & ~mask) 3160 return 0; 3161 3162 /* 2) see if profile is reduced */ 3163 if (flags == 0) 3164 return !extended; /* "0" is valid for usual profiles */ 3165 3166 /* true if exactly one bit set */ 3167 return (flags & (flags - 1)) == 0; 3168 } 3169 3170 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3171 { 3172 /* cancel requested || normal exit path */ 3173 return atomic_read(&fs_info->balance_cancel_req) || 3174 (atomic_read(&fs_info->balance_pause_req) == 0 && 3175 atomic_read(&fs_info->balance_cancel_req) == 0); 3176 } 3177 3178 static void __cancel_balance(struct btrfs_fs_info *fs_info) 3179 { 3180 int ret; 3181 3182 unset_balance_control(fs_info); 3183 ret = del_balance_item(fs_info->tree_root); 3184 if (ret) 3185 btrfs_std_error(fs_info, ret); 3186 3187 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3188 } 3189 3190 /* 3191 * Should be called with both balance and volume mutexes held 3192 */ 3193 int btrfs_balance(struct btrfs_balance_control *bctl, 3194 struct btrfs_ioctl_balance_args *bargs) 3195 { 3196 struct btrfs_fs_info *fs_info = bctl->fs_info; 3197 u64 allowed; 3198 int mixed = 0; 3199 int ret; 3200 u64 num_devices; 3201 unsigned seq; 3202 3203 if (btrfs_fs_closing(fs_info) || 3204 atomic_read(&fs_info->balance_pause_req) || 3205 atomic_read(&fs_info->balance_cancel_req)) { 3206 ret = -EINVAL; 3207 goto out; 3208 } 3209 3210 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 3211 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 3212 mixed = 1; 3213 3214 /* 3215 * In case of mixed groups both data and meta should be picked, 3216 * and identical options should be given for both of them. 3217 */ 3218 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 3219 if (mixed && (bctl->flags & allowed)) { 3220 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3221 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3222 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3223 btrfs_err(fs_info, "with mixed groups data and " 3224 "metadata balance options must be the same"); 3225 ret = -EINVAL; 3226 goto out; 3227 } 3228 } 3229 3230 num_devices = fs_info->fs_devices->num_devices; 3231 btrfs_dev_replace_lock(&fs_info->dev_replace); 3232 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 3233 BUG_ON(num_devices < 1); 3234 num_devices--; 3235 } 3236 btrfs_dev_replace_unlock(&fs_info->dev_replace); 3237 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 3238 if (num_devices == 1) 3239 allowed |= BTRFS_BLOCK_GROUP_DUP; 3240 else if (num_devices > 1) 3241 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3242 if (num_devices > 2) 3243 allowed |= BTRFS_BLOCK_GROUP_RAID5; 3244 if (num_devices > 3) 3245 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3246 BTRFS_BLOCK_GROUP_RAID6); 3247 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3248 (!alloc_profile_is_valid(bctl->data.target, 1) || 3249 (bctl->data.target & ~allowed))) { 3250 btrfs_err(fs_info, "unable to start balance with target " 3251 "data profile %llu", 3252 bctl->data.target); 3253 ret = -EINVAL; 3254 goto out; 3255 } 3256 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3257 (!alloc_profile_is_valid(bctl->meta.target, 1) || 3258 (bctl->meta.target & ~allowed))) { 3259 btrfs_err(fs_info, 3260 "unable to start balance with target metadata profile %llu", 3261 bctl->meta.target); 3262 ret = -EINVAL; 3263 goto out; 3264 } 3265 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3266 (!alloc_profile_is_valid(bctl->sys.target, 1) || 3267 (bctl->sys.target & ~allowed))) { 3268 btrfs_err(fs_info, 3269 "unable to start balance with target system profile %llu", 3270 bctl->sys.target); 3271 ret = -EINVAL; 3272 goto out; 3273 } 3274 3275 /* allow dup'ed data chunks only in mixed mode */ 3276 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3277 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) { 3278 btrfs_err(fs_info, "dup for data is not allowed"); 3279 ret = -EINVAL; 3280 goto out; 3281 } 3282 3283 /* allow to reduce meta or sys integrity only if force set */ 3284 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3285 BTRFS_BLOCK_GROUP_RAID10 | 3286 BTRFS_BLOCK_GROUP_RAID5 | 3287 BTRFS_BLOCK_GROUP_RAID6; 3288 do { 3289 seq = read_seqbegin(&fs_info->profiles_lock); 3290 3291 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3292 (fs_info->avail_system_alloc_bits & allowed) && 3293 !(bctl->sys.target & allowed)) || 3294 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3295 (fs_info->avail_metadata_alloc_bits & allowed) && 3296 !(bctl->meta.target & allowed))) { 3297 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3298 btrfs_info(fs_info, "force reducing metadata integrity"); 3299 } else { 3300 btrfs_err(fs_info, "balance will reduce metadata " 3301 "integrity, use force if you want this"); 3302 ret = -EINVAL; 3303 goto out; 3304 } 3305 } 3306 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3307 3308 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3309 int num_tolerated_disk_barrier_failures; 3310 u64 target = bctl->sys.target; 3311 3312 num_tolerated_disk_barrier_failures = 3313 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 3314 if (num_tolerated_disk_barrier_failures > 0 && 3315 (target & 3316 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | 3317 BTRFS_AVAIL_ALLOC_BIT_SINGLE))) 3318 num_tolerated_disk_barrier_failures = 0; 3319 else if (num_tolerated_disk_barrier_failures > 1 && 3320 (target & 3321 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))) 3322 num_tolerated_disk_barrier_failures = 1; 3323 3324 fs_info->num_tolerated_disk_barrier_failures = 3325 num_tolerated_disk_barrier_failures; 3326 } 3327 3328 ret = insert_balance_item(fs_info->tree_root, bctl); 3329 if (ret && ret != -EEXIST) 3330 goto out; 3331 3332 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 3333 BUG_ON(ret == -EEXIST); 3334 set_balance_control(bctl); 3335 } else { 3336 BUG_ON(ret != -EEXIST); 3337 spin_lock(&fs_info->balance_lock); 3338 update_balance_args(bctl); 3339 spin_unlock(&fs_info->balance_lock); 3340 } 3341 3342 atomic_inc(&fs_info->balance_running); 3343 mutex_unlock(&fs_info->balance_mutex); 3344 3345 ret = __btrfs_balance(fs_info); 3346 3347 mutex_lock(&fs_info->balance_mutex); 3348 atomic_dec(&fs_info->balance_running); 3349 3350 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3351 fs_info->num_tolerated_disk_barrier_failures = 3352 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 3353 } 3354 3355 if (bargs) { 3356 memset(bargs, 0, sizeof(*bargs)); 3357 update_ioctl_balance_args(fs_info, 0, bargs); 3358 } 3359 3360 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3361 balance_need_close(fs_info)) { 3362 __cancel_balance(fs_info); 3363 } 3364 3365 wake_up(&fs_info->balance_wait_q); 3366 3367 return ret; 3368 out: 3369 if (bctl->flags & BTRFS_BALANCE_RESUME) 3370 __cancel_balance(fs_info); 3371 else { 3372 kfree(bctl); 3373 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3374 } 3375 return ret; 3376 } 3377 3378 static int balance_kthread(void *data) 3379 { 3380 struct btrfs_fs_info *fs_info = data; 3381 int ret = 0; 3382 3383 mutex_lock(&fs_info->volume_mutex); 3384 mutex_lock(&fs_info->balance_mutex); 3385 3386 if (fs_info->balance_ctl) { 3387 btrfs_info(fs_info, "continuing balance"); 3388 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3389 } 3390 3391 mutex_unlock(&fs_info->balance_mutex); 3392 mutex_unlock(&fs_info->volume_mutex); 3393 3394 return ret; 3395 } 3396 3397 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 3398 { 3399 struct task_struct *tsk; 3400 3401 spin_lock(&fs_info->balance_lock); 3402 if (!fs_info->balance_ctl) { 3403 spin_unlock(&fs_info->balance_lock); 3404 return 0; 3405 } 3406 spin_unlock(&fs_info->balance_lock); 3407 3408 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) { 3409 btrfs_info(fs_info, "force skipping balance"); 3410 return 0; 3411 } 3412 3413 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3414 return PTR_ERR_OR_ZERO(tsk); 3415 } 3416 3417 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 3418 { 3419 struct btrfs_balance_control *bctl; 3420 struct btrfs_balance_item *item; 3421 struct btrfs_disk_balance_args disk_bargs; 3422 struct btrfs_path *path; 3423 struct extent_buffer *leaf; 3424 struct btrfs_key key; 3425 int ret; 3426 3427 path = btrfs_alloc_path(); 3428 if (!path) 3429 return -ENOMEM; 3430 3431 key.objectid = BTRFS_BALANCE_OBJECTID; 3432 key.type = BTRFS_BALANCE_ITEM_KEY; 3433 key.offset = 0; 3434 3435 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 3436 if (ret < 0) 3437 goto out; 3438 if (ret > 0) { /* ret = -ENOENT; */ 3439 ret = 0; 3440 goto out; 3441 } 3442 3443 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3444 if (!bctl) { 3445 ret = -ENOMEM; 3446 goto out; 3447 } 3448 3449 leaf = path->nodes[0]; 3450 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3451 3452 bctl->fs_info = fs_info; 3453 bctl->flags = btrfs_balance_flags(leaf, item); 3454 bctl->flags |= BTRFS_BALANCE_RESUME; 3455 3456 btrfs_balance_data(leaf, item, &disk_bargs); 3457 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 3458 btrfs_balance_meta(leaf, item, &disk_bargs); 3459 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 3460 btrfs_balance_sys(leaf, item, &disk_bargs); 3461 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3462 3463 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3464 3465 mutex_lock(&fs_info->volume_mutex); 3466 mutex_lock(&fs_info->balance_mutex); 3467 3468 set_balance_control(bctl); 3469 3470 mutex_unlock(&fs_info->balance_mutex); 3471 mutex_unlock(&fs_info->volume_mutex); 3472 out: 3473 btrfs_free_path(path); 3474 return ret; 3475 } 3476 3477 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 3478 { 3479 int ret = 0; 3480 3481 mutex_lock(&fs_info->balance_mutex); 3482 if (!fs_info->balance_ctl) { 3483 mutex_unlock(&fs_info->balance_mutex); 3484 return -ENOTCONN; 3485 } 3486 3487 if (atomic_read(&fs_info->balance_running)) { 3488 atomic_inc(&fs_info->balance_pause_req); 3489 mutex_unlock(&fs_info->balance_mutex); 3490 3491 wait_event(fs_info->balance_wait_q, 3492 atomic_read(&fs_info->balance_running) == 0); 3493 3494 mutex_lock(&fs_info->balance_mutex); 3495 /* we are good with balance_ctl ripped off from under us */ 3496 BUG_ON(atomic_read(&fs_info->balance_running)); 3497 atomic_dec(&fs_info->balance_pause_req); 3498 } else { 3499 ret = -ENOTCONN; 3500 } 3501 3502 mutex_unlock(&fs_info->balance_mutex); 3503 return ret; 3504 } 3505 3506 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 3507 { 3508 if (fs_info->sb->s_flags & MS_RDONLY) 3509 return -EROFS; 3510 3511 mutex_lock(&fs_info->balance_mutex); 3512 if (!fs_info->balance_ctl) { 3513 mutex_unlock(&fs_info->balance_mutex); 3514 return -ENOTCONN; 3515 } 3516 3517 atomic_inc(&fs_info->balance_cancel_req); 3518 /* 3519 * if we are running just wait and return, balance item is 3520 * deleted in btrfs_balance in this case 3521 */ 3522 if (atomic_read(&fs_info->balance_running)) { 3523 mutex_unlock(&fs_info->balance_mutex); 3524 wait_event(fs_info->balance_wait_q, 3525 atomic_read(&fs_info->balance_running) == 0); 3526 mutex_lock(&fs_info->balance_mutex); 3527 } else { 3528 /* __cancel_balance needs volume_mutex */ 3529 mutex_unlock(&fs_info->balance_mutex); 3530 mutex_lock(&fs_info->volume_mutex); 3531 mutex_lock(&fs_info->balance_mutex); 3532 3533 if (fs_info->balance_ctl) 3534 __cancel_balance(fs_info); 3535 3536 mutex_unlock(&fs_info->volume_mutex); 3537 } 3538 3539 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running)); 3540 atomic_dec(&fs_info->balance_cancel_req); 3541 mutex_unlock(&fs_info->balance_mutex); 3542 return 0; 3543 } 3544 3545 static int btrfs_uuid_scan_kthread(void *data) 3546 { 3547 struct btrfs_fs_info *fs_info = data; 3548 struct btrfs_root *root = fs_info->tree_root; 3549 struct btrfs_key key; 3550 struct btrfs_key max_key; 3551 struct btrfs_path *path = NULL; 3552 int ret = 0; 3553 struct extent_buffer *eb; 3554 int slot; 3555 struct btrfs_root_item root_item; 3556 u32 item_size; 3557 struct btrfs_trans_handle *trans = NULL; 3558 3559 path = btrfs_alloc_path(); 3560 if (!path) { 3561 ret = -ENOMEM; 3562 goto out; 3563 } 3564 3565 key.objectid = 0; 3566 key.type = BTRFS_ROOT_ITEM_KEY; 3567 key.offset = 0; 3568 3569 max_key.objectid = (u64)-1; 3570 max_key.type = BTRFS_ROOT_ITEM_KEY; 3571 max_key.offset = (u64)-1; 3572 3573 path->keep_locks = 1; 3574 3575 while (1) { 3576 ret = btrfs_search_forward(root, &key, path, 0); 3577 if (ret) { 3578 if (ret > 0) 3579 ret = 0; 3580 break; 3581 } 3582 3583 if (key.type != BTRFS_ROOT_ITEM_KEY || 3584 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 3585 key.objectid != BTRFS_FS_TREE_OBJECTID) || 3586 key.objectid > BTRFS_LAST_FREE_OBJECTID) 3587 goto skip; 3588 3589 eb = path->nodes[0]; 3590 slot = path->slots[0]; 3591 item_size = btrfs_item_size_nr(eb, slot); 3592 if (item_size < sizeof(root_item)) 3593 goto skip; 3594 3595 read_extent_buffer(eb, &root_item, 3596 btrfs_item_ptr_offset(eb, slot), 3597 (int)sizeof(root_item)); 3598 if (btrfs_root_refs(&root_item) == 0) 3599 goto skip; 3600 3601 if (!btrfs_is_empty_uuid(root_item.uuid) || 3602 !btrfs_is_empty_uuid(root_item.received_uuid)) { 3603 if (trans) 3604 goto update_tree; 3605 3606 btrfs_release_path(path); 3607 /* 3608 * 1 - subvol uuid item 3609 * 1 - received_subvol uuid item 3610 */ 3611 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 3612 if (IS_ERR(trans)) { 3613 ret = PTR_ERR(trans); 3614 break; 3615 } 3616 continue; 3617 } else { 3618 goto skip; 3619 } 3620 update_tree: 3621 if (!btrfs_is_empty_uuid(root_item.uuid)) { 3622 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 3623 root_item.uuid, 3624 BTRFS_UUID_KEY_SUBVOL, 3625 key.objectid); 3626 if (ret < 0) { 3627 btrfs_warn(fs_info, "uuid_tree_add failed %d", 3628 ret); 3629 break; 3630 } 3631 } 3632 3633 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 3634 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 3635 root_item.received_uuid, 3636 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 3637 key.objectid); 3638 if (ret < 0) { 3639 btrfs_warn(fs_info, "uuid_tree_add failed %d", 3640 ret); 3641 break; 3642 } 3643 } 3644 3645 skip: 3646 if (trans) { 3647 ret = btrfs_end_transaction(trans, fs_info->uuid_root); 3648 trans = NULL; 3649 if (ret) 3650 break; 3651 } 3652 3653 btrfs_release_path(path); 3654 if (key.offset < (u64)-1) { 3655 key.offset++; 3656 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 3657 key.offset = 0; 3658 key.type = BTRFS_ROOT_ITEM_KEY; 3659 } else if (key.objectid < (u64)-1) { 3660 key.offset = 0; 3661 key.type = BTRFS_ROOT_ITEM_KEY; 3662 key.objectid++; 3663 } else { 3664 break; 3665 } 3666 cond_resched(); 3667 } 3668 3669 out: 3670 btrfs_free_path(path); 3671 if (trans && !IS_ERR(trans)) 3672 btrfs_end_transaction(trans, fs_info->uuid_root); 3673 if (ret) 3674 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 3675 else 3676 fs_info->update_uuid_tree_gen = 1; 3677 up(&fs_info->uuid_tree_rescan_sem); 3678 return 0; 3679 } 3680 3681 /* 3682 * Callback for btrfs_uuid_tree_iterate(). 3683 * returns: 3684 * 0 check succeeded, the entry is not outdated. 3685 * < 0 if an error occured. 3686 * > 0 if the check failed, which means the caller shall remove the entry. 3687 */ 3688 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 3689 u8 *uuid, u8 type, u64 subid) 3690 { 3691 struct btrfs_key key; 3692 int ret = 0; 3693 struct btrfs_root *subvol_root; 3694 3695 if (type != BTRFS_UUID_KEY_SUBVOL && 3696 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 3697 goto out; 3698 3699 key.objectid = subid; 3700 key.type = BTRFS_ROOT_ITEM_KEY; 3701 key.offset = (u64)-1; 3702 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 3703 if (IS_ERR(subvol_root)) { 3704 ret = PTR_ERR(subvol_root); 3705 if (ret == -ENOENT) 3706 ret = 1; 3707 goto out; 3708 } 3709 3710 switch (type) { 3711 case BTRFS_UUID_KEY_SUBVOL: 3712 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 3713 ret = 1; 3714 break; 3715 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 3716 if (memcmp(uuid, subvol_root->root_item.received_uuid, 3717 BTRFS_UUID_SIZE)) 3718 ret = 1; 3719 break; 3720 } 3721 3722 out: 3723 return ret; 3724 } 3725 3726 static int btrfs_uuid_rescan_kthread(void *data) 3727 { 3728 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 3729 int ret; 3730 3731 /* 3732 * 1st step is to iterate through the existing UUID tree and 3733 * to delete all entries that contain outdated data. 3734 * 2nd step is to add all missing entries to the UUID tree. 3735 */ 3736 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 3737 if (ret < 0) { 3738 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 3739 up(&fs_info->uuid_tree_rescan_sem); 3740 return ret; 3741 } 3742 return btrfs_uuid_scan_kthread(data); 3743 } 3744 3745 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 3746 { 3747 struct btrfs_trans_handle *trans; 3748 struct btrfs_root *tree_root = fs_info->tree_root; 3749 struct btrfs_root *uuid_root; 3750 struct task_struct *task; 3751 int ret; 3752 3753 /* 3754 * 1 - root node 3755 * 1 - root item 3756 */ 3757 trans = btrfs_start_transaction(tree_root, 2); 3758 if (IS_ERR(trans)) 3759 return PTR_ERR(trans); 3760 3761 uuid_root = btrfs_create_tree(trans, fs_info, 3762 BTRFS_UUID_TREE_OBJECTID); 3763 if (IS_ERR(uuid_root)) { 3764 btrfs_abort_transaction(trans, tree_root, 3765 PTR_ERR(uuid_root)); 3766 return PTR_ERR(uuid_root); 3767 } 3768 3769 fs_info->uuid_root = uuid_root; 3770 3771 ret = btrfs_commit_transaction(trans, tree_root); 3772 if (ret) 3773 return ret; 3774 3775 down(&fs_info->uuid_tree_rescan_sem); 3776 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 3777 if (IS_ERR(task)) { 3778 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 3779 btrfs_warn(fs_info, "failed to start uuid_scan task"); 3780 up(&fs_info->uuid_tree_rescan_sem); 3781 return PTR_ERR(task); 3782 } 3783 3784 return 0; 3785 } 3786 3787 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 3788 { 3789 struct task_struct *task; 3790 3791 down(&fs_info->uuid_tree_rescan_sem); 3792 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 3793 if (IS_ERR(task)) { 3794 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 3795 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 3796 up(&fs_info->uuid_tree_rescan_sem); 3797 return PTR_ERR(task); 3798 } 3799 3800 return 0; 3801 } 3802 3803 /* 3804 * shrinking a device means finding all of the device extents past 3805 * the new size, and then following the back refs to the chunks. 3806 * The chunk relocation code actually frees the device extent 3807 */ 3808 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 3809 { 3810 struct btrfs_trans_handle *trans; 3811 struct btrfs_root *root = device->dev_root; 3812 struct btrfs_dev_extent *dev_extent = NULL; 3813 struct btrfs_path *path; 3814 u64 length; 3815 u64 chunk_tree; 3816 u64 chunk_objectid; 3817 u64 chunk_offset; 3818 int ret; 3819 int slot; 3820 int failed = 0; 3821 bool retried = false; 3822 struct extent_buffer *l; 3823 struct btrfs_key key; 3824 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 3825 u64 old_total = btrfs_super_total_bytes(super_copy); 3826 u64 old_size = device->total_bytes; 3827 u64 diff = device->total_bytes - new_size; 3828 3829 if (device->is_tgtdev_for_dev_replace) 3830 return -EINVAL; 3831 3832 path = btrfs_alloc_path(); 3833 if (!path) 3834 return -ENOMEM; 3835 3836 path->reada = 2; 3837 3838 lock_chunks(root); 3839 3840 device->total_bytes = new_size; 3841 if (device->writeable) { 3842 device->fs_devices->total_rw_bytes -= diff; 3843 spin_lock(&root->fs_info->free_chunk_lock); 3844 root->fs_info->free_chunk_space -= diff; 3845 spin_unlock(&root->fs_info->free_chunk_lock); 3846 } 3847 unlock_chunks(root); 3848 3849 again: 3850 key.objectid = device->devid; 3851 key.offset = (u64)-1; 3852 key.type = BTRFS_DEV_EXTENT_KEY; 3853 3854 do { 3855 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3856 if (ret < 0) 3857 goto done; 3858 3859 ret = btrfs_previous_item(root, path, 0, key.type); 3860 if (ret < 0) 3861 goto done; 3862 if (ret) { 3863 ret = 0; 3864 btrfs_release_path(path); 3865 break; 3866 } 3867 3868 l = path->nodes[0]; 3869 slot = path->slots[0]; 3870 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 3871 3872 if (key.objectid != device->devid) { 3873 btrfs_release_path(path); 3874 break; 3875 } 3876 3877 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 3878 length = btrfs_dev_extent_length(l, dev_extent); 3879 3880 if (key.offset + length <= new_size) { 3881 btrfs_release_path(path); 3882 break; 3883 } 3884 3885 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 3886 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 3887 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 3888 btrfs_release_path(path); 3889 3890 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, 3891 chunk_offset); 3892 if (ret && ret != -ENOSPC) 3893 goto done; 3894 if (ret == -ENOSPC) 3895 failed++; 3896 } while (key.offset-- > 0); 3897 3898 if (failed && !retried) { 3899 failed = 0; 3900 retried = true; 3901 goto again; 3902 } else if (failed && retried) { 3903 ret = -ENOSPC; 3904 lock_chunks(root); 3905 3906 device->total_bytes = old_size; 3907 if (device->writeable) 3908 device->fs_devices->total_rw_bytes += diff; 3909 spin_lock(&root->fs_info->free_chunk_lock); 3910 root->fs_info->free_chunk_space += diff; 3911 spin_unlock(&root->fs_info->free_chunk_lock); 3912 unlock_chunks(root); 3913 goto done; 3914 } 3915 3916 /* Shrinking succeeded, else we would be at "done". */ 3917 trans = btrfs_start_transaction(root, 0); 3918 if (IS_ERR(trans)) { 3919 ret = PTR_ERR(trans); 3920 goto done; 3921 } 3922 3923 lock_chunks(root); 3924 3925 device->disk_total_bytes = new_size; 3926 /* Now btrfs_update_device() will change the on-disk size. */ 3927 ret = btrfs_update_device(trans, device); 3928 if (ret) { 3929 unlock_chunks(root); 3930 btrfs_end_transaction(trans, root); 3931 goto done; 3932 } 3933 WARN_ON(diff > old_total); 3934 btrfs_set_super_total_bytes(super_copy, old_total - diff); 3935 unlock_chunks(root); 3936 btrfs_end_transaction(trans, root); 3937 done: 3938 btrfs_free_path(path); 3939 return ret; 3940 } 3941 3942 static int btrfs_add_system_chunk(struct btrfs_root *root, 3943 struct btrfs_key *key, 3944 struct btrfs_chunk *chunk, int item_size) 3945 { 3946 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 3947 struct btrfs_disk_key disk_key; 3948 u32 array_size; 3949 u8 *ptr; 3950 3951 array_size = btrfs_super_sys_array_size(super_copy); 3952 if (array_size + item_size + sizeof(disk_key) 3953 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 3954 return -EFBIG; 3955 3956 ptr = super_copy->sys_chunk_array + array_size; 3957 btrfs_cpu_key_to_disk(&disk_key, key); 3958 memcpy(ptr, &disk_key, sizeof(disk_key)); 3959 ptr += sizeof(disk_key); 3960 memcpy(ptr, chunk, item_size); 3961 item_size += sizeof(disk_key); 3962 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 3963 return 0; 3964 } 3965 3966 /* 3967 * sort the devices in descending order by max_avail, total_avail 3968 */ 3969 static int btrfs_cmp_device_info(const void *a, const void *b) 3970 { 3971 const struct btrfs_device_info *di_a = a; 3972 const struct btrfs_device_info *di_b = b; 3973 3974 if (di_a->max_avail > di_b->max_avail) 3975 return -1; 3976 if (di_a->max_avail < di_b->max_avail) 3977 return 1; 3978 if (di_a->total_avail > di_b->total_avail) 3979 return -1; 3980 if (di_a->total_avail < di_b->total_avail) 3981 return 1; 3982 return 0; 3983 } 3984 3985 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 3986 [BTRFS_RAID_RAID10] = { 3987 .sub_stripes = 2, 3988 .dev_stripes = 1, 3989 .devs_max = 0, /* 0 == as many as possible */ 3990 .devs_min = 4, 3991 .devs_increment = 2, 3992 .ncopies = 2, 3993 }, 3994 [BTRFS_RAID_RAID1] = { 3995 .sub_stripes = 1, 3996 .dev_stripes = 1, 3997 .devs_max = 2, 3998 .devs_min = 2, 3999 .devs_increment = 2, 4000 .ncopies = 2, 4001 }, 4002 [BTRFS_RAID_DUP] = { 4003 .sub_stripes = 1, 4004 .dev_stripes = 2, 4005 .devs_max = 1, 4006 .devs_min = 1, 4007 .devs_increment = 1, 4008 .ncopies = 2, 4009 }, 4010 [BTRFS_RAID_RAID0] = { 4011 .sub_stripes = 1, 4012 .dev_stripes = 1, 4013 .devs_max = 0, 4014 .devs_min = 2, 4015 .devs_increment = 1, 4016 .ncopies = 1, 4017 }, 4018 [BTRFS_RAID_SINGLE] = { 4019 .sub_stripes = 1, 4020 .dev_stripes = 1, 4021 .devs_max = 1, 4022 .devs_min = 1, 4023 .devs_increment = 1, 4024 .ncopies = 1, 4025 }, 4026 [BTRFS_RAID_RAID5] = { 4027 .sub_stripes = 1, 4028 .dev_stripes = 1, 4029 .devs_max = 0, 4030 .devs_min = 2, 4031 .devs_increment = 1, 4032 .ncopies = 2, 4033 }, 4034 [BTRFS_RAID_RAID6] = { 4035 .sub_stripes = 1, 4036 .dev_stripes = 1, 4037 .devs_max = 0, 4038 .devs_min = 3, 4039 .devs_increment = 1, 4040 .ncopies = 3, 4041 }, 4042 }; 4043 4044 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) 4045 { 4046 /* TODO allow them to set a preferred stripe size */ 4047 return 64 * 1024; 4048 } 4049 4050 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4051 { 4052 if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))) 4053 return; 4054 4055 btrfs_set_fs_incompat(info, RAID56); 4056 } 4057 4058 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \ 4059 - sizeof(struct btrfs_item) \ 4060 - sizeof(struct btrfs_chunk)) \ 4061 / sizeof(struct btrfs_stripe) + 1) 4062 4063 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 4064 - 2 * sizeof(struct btrfs_disk_key) \ 4065 - 2 * sizeof(struct btrfs_chunk)) \ 4066 / sizeof(struct btrfs_stripe) + 1) 4067 4068 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4069 struct btrfs_root *extent_root, u64 start, 4070 u64 type) 4071 { 4072 struct btrfs_fs_info *info = extent_root->fs_info; 4073 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4074 struct list_head *cur; 4075 struct map_lookup *map = NULL; 4076 struct extent_map_tree *em_tree; 4077 struct extent_map *em; 4078 struct btrfs_device_info *devices_info = NULL; 4079 u64 total_avail; 4080 int num_stripes; /* total number of stripes to allocate */ 4081 int data_stripes; /* number of stripes that count for 4082 block group size */ 4083 int sub_stripes; /* sub_stripes info for map */ 4084 int dev_stripes; /* stripes per dev */ 4085 int devs_max; /* max devs to use */ 4086 int devs_min; /* min devs needed */ 4087 int devs_increment; /* ndevs has to be a multiple of this */ 4088 int ncopies; /* how many copies to data has */ 4089 int ret; 4090 u64 max_stripe_size; 4091 u64 max_chunk_size; 4092 u64 stripe_size; 4093 u64 num_bytes; 4094 u64 raid_stripe_len = BTRFS_STRIPE_LEN; 4095 int ndevs; 4096 int i; 4097 int j; 4098 int index; 4099 4100 BUG_ON(!alloc_profile_is_valid(type, 0)); 4101 4102 if (list_empty(&fs_devices->alloc_list)) 4103 return -ENOSPC; 4104 4105 index = __get_raid_index(type); 4106 4107 sub_stripes = btrfs_raid_array[index].sub_stripes; 4108 dev_stripes = btrfs_raid_array[index].dev_stripes; 4109 devs_max = btrfs_raid_array[index].devs_max; 4110 devs_min = btrfs_raid_array[index].devs_min; 4111 devs_increment = btrfs_raid_array[index].devs_increment; 4112 ncopies = btrfs_raid_array[index].ncopies; 4113 4114 if (type & BTRFS_BLOCK_GROUP_DATA) { 4115 max_stripe_size = 1024 * 1024 * 1024; 4116 max_chunk_size = 10 * max_stripe_size; 4117 if (!devs_max) 4118 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4119 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4120 /* for larger filesystems, use larger metadata chunks */ 4121 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024) 4122 max_stripe_size = 1024 * 1024 * 1024; 4123 else 4124 max_stripe_size = 256 * 1024 * 1024; 4125 max_chunk_size = max_stripe_size; 4126 if (!devs_max) 4127 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4128 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4129 max_stripe_size = 32 * 1024 * 1024; 4130 max_chunk_size = 2 * max_stripe_size; 4131 if (!devs_max) 4132 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; 4133 } else { 4134 btrfs_err(info, "invalid chunk type 0x%llx requested", 4135 type); 4136 BUG_ON(1); 4137 } 4138 4139 /* we don't want a chunk larger than 10% of writeable space */ 4140 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4141 max_chunk_size); 4142 4143 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, 4144 GFP_NOFS); 4145 if (!devices_info) 4146 return -ENOMEM; 4147 4148 cur = fs_devices->alloc_list.next; 4149 4150 /* 4151 * in the first pass through the devices list, we gather information 4152 * about the available holes on each device. 4153 */ 4154 ndevs = 0; 4155 while (cur != &fs_devices->alloc_list) { 4156 struct btrfs_device *device; 4157 u64 max_avail; 4158 u64 dev_offset; 4159 4160 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 4161 4162 cur = cur->next; 4163 4164 if (!device->writeable) { 4165 WARN(1, KERN_ERR 4166 "BTRFS: read-only device in alloc_list\n"); 4167 continue; 4168 } 4169 4170 if (!device->in_fs_metadata || 4171 device->is_tgtdev_for_dev_replace) 4172 continue; 4173 4174 if (device->total_bytes > device->bytes_used) 4175 total_avail = device->total_bytes - device->bytes_used; 4176 else 4177 total_avail = 0; 4178 4179 /* If there is no space on this device, skip it. */ 4180 if (total_avail == 0) 4181 continue; 4182 4183 ret = find_free_dev_extent(trans, device, 4184 max_stripe_size * dev_stripes, 4185 &dev_offset, &max_avail); 4186 if (ret && ret != -ENOSPC) 4187 goto error; 4188 4189 if (ret == 0) 4190 max_avail = max_stripe_size * dev_stripes; 4191 4192 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) 4193 continue; 4194 4195 if (ndevs == fs_devices->rw_devices) { 4196 WARN(1, "%s: found more than %llu devices\n", 4197 __func__, fs_devices->rw_devices); 4198 break; 4199 } 4200 devices_info[ndevs].dev_offset = dev_offset; 4201 devices_info[ndevs].max_avail = max_avail; 4202 devices_info[ndevs].total_avail = total_avail; 4203 devices_info[ndevs].dev = device; 4204 ++ndevs; 4205 } 4206 4207 /* 4208 * now sort the devices by hole size / available space 4209 */ 4210 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4211 btrfs_cmp_device_info, NULL); 4212 4213 /* round down to number of usable stripes */ 4214 ndevs -= ndevs % devs_increment; 4215 4216 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { 4217 ret = -ENOSPC; 4218 goto error; 4219 } 4220 4221 if (devs_max && ndevs > devs_max) 4222 ndevs = devs_max; 4223 /* 4224 * the primary goal is to maximize the number of stripes, so use as many 4225 * devices as possible, even if the stripes are not maximum sized. 4226 */ 4227 stripe_size = devices_info[ndevs-1].max_avail; 4228 num_stripes = ndevs * dev_stripes; 4229 4230 /* 4231 * this will have to be fixed for RAID1 and RAID10 over 4232 * more drives 4233 */ 4234 data_stripes = num_stripes / ncopies; 4235 4236 if (type & BTRFS_BLOCK_GROUP_RAID5) { 4237 raid_stripe_len = find_raid56_stripe_len(ndevs - 1, 4238 btrfs_super_stripesize(info->super_copy)); 4239 data_stripes = num_stripes - 1; 4240 } 4241 if (type & BTRFS_BLOCK_GROUP_RAID6) { 4242 raid_stripe_len = find_raid56_stripe_len(ndevs - 2, 4243 btrfs_super_stripesize(info->super_copy)); 4244 data_stripes = num_stripes - 2; 4245 } 4246 4247 /* 4248 * Use the number of data stripes to figure out how big this chunk 4249 * is really going to be in terms of logical address space, 4250 * and compare that answer with the max chunk size 4251 */ 4252 if (stripe_size * data_stripes > max_chunk_size) { 4253 u64 mask = (1ULL << 24) - 1; 4254 stripe_size = max_chunk_size; 4255 do_div(stripe_size, data_stripes); 4256 4257 /* bump the answer up to a 16MB boundary */ 4258 stripe_size = (stripe_size + mask) & ~mask; 4259 4260 /* but don't go higher than the limits we found 4261 * while searching for free extents 4262 */ 4263 if (stripe_size > devices_info[ndevs-1].max_avail) 4264 stripe_size = devices_info[ndevs-1].max_avail; 4265 } 4266 4267 do_div(stripe_size, dev_stripes); 4268 4269 /* align to BTRFS_STRIPE_LEN */ 4270 do_div(stripe_size, raid_stripe_len); 4271 stripe_size *= raid_stripe_len; 4272 4273 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 4274 if (!map) { 4275 ret = -ENOMEM; 4276 goto error; 4277 } 4278 map->num_stripes = num_stripes; 4279 4280 for (i = 0; i < ndevs; ++i) { 4281 for (j = 0; j < dev_stripes; ++j) { 4282 int s = i * dev_stripes + j; 4283 map->stripes[s].dev = devices_info[i].dev; 4284 map->stripes[s].physical = devices_info[i].dev_offset + 4285 j * stripe_size; 4286 } 4287 } 4288 map->sector_size = extent_root->sectorsize; 4289 map->stripe_len = raid_stripe_len; 4290 map->io_align = raid_stripe_len; 4291 map->io_width = raid_stripe_len; 4292 map->type = type; 4293 map->sub_stripes = sub_stripes; 4294 4295 num_bytes = stripe_size * data_stripes; 4296 4297 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); 4298 4299 em = alloc_extent_map(); 4300 if (!em) { 4301 kfree(map); 4302 ret = -ENOMEM; 4303 goto error; 4304 } 4305 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 4306 em->bdev = (struct block_device *)map; 4307 em->start = start; 4308 em->len = num_bytes; 4309 em->block_start = 0; 4310 em->block_len = em->len; 4311 em->orig_block_len = stripe_size; 4312 4313 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4314 write_lock(&em_tree->lock); 4315 ret = add_extent_mapping(em_tree, em, 0); 4316 if (!ret) { 4317 list_add_tail(&em->list, &trans->transaction->pending_chunks); 4318 atomic_inc(&em->refs); 4319 } 4320 write_unlock(&em_tree->lock); 4321 if (ret) { 4322 free_extent_map(em); 4323 goto error; 4324 } 4325 4326 ret = btrfs_make_block_group(trans, extent_root, 0, type, 4327 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4328 start, num_bytes); 4329 if (ret) 4330 goto error_del_extent; 4331 4332 free_extent_map(em); 4333 check_raid56_incompat_flag(extent_root->fs_info, type); 4334 4335 kfree(devices_info); 4336 return 0; 4337 4338 error_del_extent: 4339 write_lock(&em_tree->lock); 4340 remove_extent_mapping(em_tree, em); 4341 write_unlock(&em_tree->lock); 4342 4343 /* One for our allocation */ 4344 free_extent_map(em); 4345 /* One for the tree reference */ 4346 free_extent_map(em); 4347 error: 4348 kfree(devices_info); 4349 return ret; 4350 } 4351 4352 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 4353 struct btrfs_root *extent_root, 4354 u64 chunk_offset, u64 chunk_size) 4355 { 4356 struct btrfs_key key; 4357 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 4358 struct btrfs_device *device; 4359 struct btrfs_chunk *chunk; 4360 struct btrfs_stripe *stripe; 4361 struct extent_map_tree *em_tree; 4362 struct extent_map *em; 4363 struct map_lookup *map; 4364 size_t item_size; 4365 u64 dev_offset; 4366 u64 stripe_size; 4367 int i = 0; 4368 int ret; 4369 4370 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4371 read_lock(&em_tree->lock); 4372 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size); 4373 read_unlock(&em_tree->lock); 4374 4375 if (!em) { 4376 btrfs_crit(extent_root->fs_info, "unable to find logical " 4377 "%Lu len %Lu", chunk_offset, chunk_size); 4378 return -EINVAL; 4379 } 4380 4381 if (em->start != chunk_offset || em->len != chunk_size) { 4382 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted" 4383 " %Lu-%Lu, found %Lu-%Lu", chunk_offset, 4384 chunk_size, em->start, em->len); 4385 free_extent_map(em); 4386 return -EINVAL; 4387 } 4388 4389 map = (struct map_lookup *)em->bdev; 4390 item_size = btrfs_chunk_item_size(map->num_stripes); 4391 stripe_size = em->orig_block_len; 4392 4393 chunk = kzalloc(item_size, GFP_NOFS); 4394 if (!chunk) { 4395 ret = -ENOMEM; 4396 goto out; 4397 } 4398 4399 for (i = 0; i < map->num_stripes; i++) { 4400 device = map->stripes[i].dev; 4401 dev_offset = map->stripes[i].physical; 4402 4403 device->bytes_used += stripe_size; 4404 ret = btrfs_update_device(trans, device); 4405 if (ret) 4406 goto out; 4407 ret = btrfs_alloc_dev_extent(trans, device, 4408 chunk_root->root_key.objectid, 4409 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4410 chunk_offset, dev_offset, 4411 stripe_size); 4412 if (ret) 4413 goto out; 4414 } 4415 4416 spin_lock(&extent_root->fs_info->free_chunk_lock); 4417 extent_root->fs_info->free_chunk_space -= (stripe_size * 4418 map->num_stripes); 4419 spin_unlock(&extent_root->fs_info->free_chunk_lock); 4420 4421 stripe = &chunk->stripe; 4422 for (i = 0; i < map->num_stripes; i++) { 4423 device = map->stripes[i].dev; 4424 dev_offset = map->stripes[i].physical; 4425 4426 btrfs_set_stack_stripe_devid(stripe, device->devid); 4427 btrfs_set_stack_stripe_offset(stripe, dev_offset); 4428 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 4429 stripe++; 4430 } 4431 4432 btrfs_set_stack_chunk_length(chunk, chunk_size); 4433 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 4434 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 4435 btrfs_set_stack_chunk_type(chunk, map->type); 4436 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 4437 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 4438 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 4439 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 4440 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 4441 4442 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4443 key.type = BTRFS_CHUNK_ITEM_KEY; 4444 key.offset = chunk_offset; 4445 4446 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 4447 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 4448 /* 4449 * TODO: Cleanup of inserted chunk root in case of 4450 * failure. 4451 */ 4452 ret = btrfs_add_system_chunk(chunk_root, &key, chunk, 4453 item_size); 4454 } 4455 4456 out: 4457 kfree(chunk); 4458 free_extent_map(em); 4459 return ret; 4460 } 4461 4462 /* 4463 * Chunk allocation falls into two parts. The first part does works 4464 * that make the new allocated chunk useable, but not do any operation 4465 * that modifies the chunk tree. The second part does the works that 4466 * require modifying the chunk tree. This division is important for the 4467 * bootstrap process of adding storage to a seed btrfs. 4468 */ 4469 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4470 struct btrfs_root *extent_root, u64 type) 4471 { 4472 u64 chunk_offset; 4473 4474 chunk_offset = find_next_chunk(extent_root->fs_info); 4475 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 4476 } 4477 4478 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 4479 struct btrfs_root *root, 4480 struct btrfs_device *device) 4481 { 4482 u64 chunk_offset; 4483 u64 sys_chunk_offset; 4484 u64 alloc_profile; 4485 struct btrfs_fs_info *fs_info = root->fs_info; 4486 struct btrfs_root *extent_root = fs_info->extent_root; 4487 int ret; 4488 4489 chunk_offset = find_next_chunk(fs_info); 4490 alloc_profile = btrfs_get_alloc_profile(extent_root, 0); 4491 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset, 4492 alloc_profile); 4493 if (ret) 4494 return ret; 4495 4496 sys_chunk_offset = find_next_chunk(root->fs_info); 4497 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0); 4498 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset, 4499 alloc_profile); 4500 if (ret) { 4501 btrfs_abort_transaction(trans, root, ret); 4502 goto out; 4503 } 4504 4505 ret = btrfs_add_device(trans, fs_info->chunk_root, device); 4506 if (ret) 4507 btrfs_abort_transaction(trans, root, ret); 4508 out: 4509 return ret; 4510 } 4511 4512 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 4513 { 4514 struct extent_map *em; 4515 struct map_lookup *map; 4516 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 4517 int readonly = 0; 4518 int i; 4519 4520 read_lock(&map_tree->map_tree.lock); 4521 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 4522 read_unlock(&map_tree->map_tree.lock); 4523 if (!em) 4524 return 1; 4525 4526 if (btrfs_test_opt(root, DEGRADED)) { 4527 free_extent_map(em); 4528 return 0; 4529 } 4530 4531 map = (struct map_lookup *)em->bdev; 4532 for (i = 0; i < map->num_stripes; i++) { 4533 if (!map->stripes[i].dev->writeable) { 4534 readonly = 1; 4535 break; 4536 } 4537 } 4538 free_extent_map(em); 4539 return readonly; 4540 } 4541 4542 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 4543 { 4544 extent_map_tree_init(&tree->map_tree); 4545 } 4546 4547 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 4548 { 4549 struct extent_map *em; 4550 4551 while (1) { 4552 write_lock(&tree->map_tree.lock); 4553 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 4554 if (em) 4555 remove_extent_mapping(&tree->map_tree, em); 4556 write_unlock(&tree->map_tree.lock); 4557 if (!em) 4558 break; 4559 /* once for us */ 4560 free_extent_map(em); 4561 /* once for the tree */ 4562 free_extent_map(em); 4563 } 4564 } 4565 4566 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 4567 { 4568 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 4569 struct extent_map *em; 4570 struct map_lookup *map; 4571 struct extent_map_tree *em_tree = &map_tree->map_tree; 4572 int ret; 4573 4574 read_lock(&em_tree->lock); 4575 em = lookup_extent_mapping(em_tree, logical, len); 4576 read_unlock(&em_tree->lock); 4577 4578 /* 4579 * We could return errors for these cases, but that could get ugly and 4580 * we'd probably do the same thing which is just not do anything else 4581 * and exit, so return 1 so the callers don't try to use other copies. 4582 */ 4583 if (!em) { 4584 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical, 4585 logical+len); 4586 return 1; 4587 } 4588 4589 if (em->start > logical || em->start + em->len < logical) { 4590 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got " 4591 "%Lu-%Lu", logical, logical+len, em->start, 4592 em->start + em->len); 4593 free_extent_map(em); 4594 return 1; 4595 } 4596 4597 map = (struct map_lookup *)em->bdev; 4598 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 4599 ret = map->num_stripes; 4600 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 4601 ret = map->sub_stripes; 4602 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 4603 ret = 2; 4604 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 4605 ret = 3; 4606 else 4607 ret = 1; 4608 free_extent_map(em); 4609 4610 btrfs_dev_replace_lock(&fs_info->dev_replace); 4611 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) 4612 ret++; 4613 btrfs_dev_replace_unlock(&fs_info->dev_replace); 4614 4615 return ret; 4616 } 4617 4618 unsigned long btrfs_full_stripe_len(struct btrfs_root *root, 4619 struct btrfs_mapping_tree *map_tree, 4620 u64 logical) 4621 { 4622 struct extent_map *em; 4623 struct map_lookup *map; 4624 struct extent_map_tree *em_tree = &map_tree->map_tree; 4625 unsigned long len = root->sectorsize; 4626 4627 read_lock(&em_tree->lock); 4628 em = lookup_extent_mapping(em_tree, logical, len); 4629 read_unlock(&em_tree->lock); 4630 BUG_ON(!em); 4631 4632 BUG_ON(em->start > logical || em->start + em->len < logical); 4633 map = (struct map_lookup *)em->bdev; 4634 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 4635 BTRFS_BLOCK_GROUP_RAID6)) { 4636 len = map->stripe_len * nr_data_stripes(map); 4637 } 4638 free_extent_map(em); 4639 return len; 4640 } 4641 4642 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, 4643 u64 logical, u64 len, int mirror_num) 4644 { 4645 struct extent_map *em; 4646 struct map_lookup *map; 4647 struct extent_map_tree *em_tree = &map_tree->map_tree; 4648 int ret = 0; 4649 4650 read_lock(&em_tree->lock); 4651 em = lookup_extent_mapping(em_tree, logical, len); 4652 read_unlock(&em_tree->lock); 4653 BUG_ON(!em); 4654 4655 BUG_ON(em->start > logical || em->start + em->len < logical); 4656 map = (struct map_lookup *)em->bdev; 4657 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 4658 BTRFS_BLOCK_GROUP_RAID6)) 4659 ret = 1; 4660 free_extent_map(em); 4661 return ret; 4662 } 4663 4664 static int find_live_mirror(struct btrfs_fs_info *fs_info, 4665 struct map_lookup *map, int first, int num, 4666 int optimal, int dev_replace_is_ongoing) 4667 { 4668 int i; 4669 int tolerance; 4670 struct btrfs_device *srcdev; 4671 4672 if (dev_replace_is_ongoing && 4673 fs_info->dev_replace.cont_reading_from_srcdev_mode == 4674 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 4675 srcdev = fs_info->dev_replace.srcdev; 4676 else 4677 srcdev = NULL; 4678 4679 /* 4680 * try to avoid the drive that is the source drive for a 4681 * dev-replace procedure, only choose it if no other non-missing 4682 * mirror is available 4683 */ 4684 for (tolerance = 0; tolerance < 2; tolerance++) { 4685 if (map->stripes[optimal].dev->bdev && 4686 (tolerance || map->stripes[optimal].dev != srcdev)) 4687 return optimal; 4688 for (i = first; i < first + num; i++) { 4689 if (map->stripes[i].dev->bdev && 4690 (tolerance || map->stripes[i].dev != srcdev)) 4691 return i; 4692 } 4693 } 4694 4695 /* we couldn't find one that doesn't fail. Just return something 4696 * and the io error handling code will clean up eventually 4697 */ 4698 return optimal; 4699 } 4700 4701 static inline int parity_smaller(u64 a, u64 b) 4702 { 4703 return a > b; 4704 } 4705 4706 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 4707 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) 4708 { 4709 struct btrfs_bio_stripe s; 4710 int i; 4711 u64 l; 4712 int again = 1; 4713 4714 while (again) { 4715 again = 0; 4716 for (i = 0; i < bbio->num_stripes - 1; i++) { 4717 if (parity_smaller(raid_map[i], raid_map[i+1])) { 4718 s = bbio->stripes[i]; 4719 l = raid_map[i]; 4720 bbio->stripes[i] = bbio->stripes[i+1]; 4721 raid_map[i] = raid_map[i+1]; 4722 bbio->stripes[i+1] = s; 4723 raid_map[i+1] = l; 4724 again = 1; 4725 } 4726 } 4727 } 4728 } 4729 4730 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 4731 u64 logical, u64 *length, 4732 struct btrfs_bio **bbio_ret, 4733 int mirror_num, u64 **raid_map_ret) 4734 { 4735 struct extent_map *em; 4736 struct map_lookup *map; 4737 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 4738 struct extent_map_tree *em_tree = &map_tree->map_tree; 4739 u64 offset; 4740 u64 stripe_offset; 4741 u64 stripe_end_offset; 4742 u64 stripe_nr; 4743 u64 stripe_nr_orig; 4744 u64 stripe_nr_end; 4745 u64 stripe_len; 4746 u64 *raid_map = NULL; 4747 int stripe_index; 4748 int i; 4749 int ret = 0; 4750 int num_stripes; 4751 int max_errors = 0; 4752 struct btrfs_bio *bbio = NULL; 4753 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 4754 int dev_replace_is_ongoing = 0; 4755 int num_alloc_stripes; 4756 int patch_the_first_stripe_for_dev_replace = 0; 4757 u64 physical_to_patch_in_first_stripe = 0; 4758 u64 raid56_full_stripe_start = (u64)-1; 4759 4760 read_lock(&em_tree->lock); 4761 em = lookup_extent_mapping(em_tree, logical, *length); 4762 read_unlock(&em_tree->lock); 4763 4764 if (!em) { 4765 btrfs_crit(fs_info, "unable to find logical %llu len %llu", 4766 logical, *length); 4767 return -EINVAL; 4768 } 4769 4770 if (em->start > logical || em->start + em->len < logical) { 4771 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, " 4772 "found %Lu-%Lu", logical, em->start, 4773 em->start + em->len); 4774 free_extent_map(em); 4775 return -EINVAL; 4776 } 4777 4778 map = (struct map_lookup *)em->bdev; 4779 offset = logical - em->start; 4780 4781 stripe_len = map->stripe_len; 4782 stripe_nr = offset; 4783 /* 4784 * stripe_nr counts the total number of stripes we have to stride 4785 * to get to this block 4786 */ 4787 do_div(stripe_nr, stripe_len); 4788 4789 stripe_offset = stripe_nr * stripe_len; 4790 BUG_ON(offset < stripe_offset); 4791 4792 /* stripe_offset is the offset of this block in its stripe*/ 4793 stripe_offset = offset - stripe_offset; 4794 4795 /* if we're here for raid56, we need to know the stripe aligned start */ 4796 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { 4797 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 4798 raid56_full_stripe_start = offset; 4799 4800 /* allow a write of a full stripe, but make sure we don't 4801 * allow straddling of stripes 4802 */ 4803 do_div(raid56_full_stripe_start, full_stripe_len); 4804 raid56_full_stripe_start *= full_stripe_len; 4805 } 4806 4807 if (rw & REQ_DISCARD) { 4808 /* we don't discard raid56 yet */ 4809 if (map->type & 4810 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { 4811 ret = -EOPNOTSUPP; 4812 goto out; 4813 } 4814 *length = min_t(u64, em->len - offset, *length); 4815 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 4816 u64 max_len; 4817 /* For writes to RAID[56], allow a full stripeset across all disks. 4818 For other RAID types and for RAID[56] reads, just allow a single 4819 stripe (on a single disk). */ 4820 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && 4821 (rw & REQ_WRITE)) { 4822 max_len = stripe_len * nr_data_stripes(map) - 4823 (offset - raid56_full_stripe_start); 4824 } else { 4825 /* we limit the length of each bio to what fits in a stripe */ 4826 max_len = stripe_len - stripe_offset; 4827 } 4828 *length = min_t(u64, em->len - offset, max_len); 4829 } else { 4830 *length = em->len - offset; 4831 } 4832 4833 /* This is for when we're called from btrfs_merge_bio_hook() and all 4834 it cares about is the length */ 4835 if (!bbio_ret) 4836 goto out; 4837 4838 btrfs_dev_replace_lock(dev_replace); 4839 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 4840 if (!dev_replace_is_ongoing) 4841 btrfs_dev_replace_unlock(dev_replace); 4842 4843 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 4844 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) && 4845 dev_replace->tgtdev != NULL) { 4846 /* 4847 * in dev-replace case, for repair case (that's the only 4848 * case where the mirror is selected explicitly when 4849 * calling btrfs_map_block), blocks left of the left cursor 4850 * can also be read from the target drive. 4851 * For REQ_GET_READ_MIRRORS, the target drive is added as 4852 * the last one to the array of stripes. For READ, it also 4853 * needs to be supported using the same mirror number. 4854 * If the requested block is not left of the left cursor, 4855 * EIO is returned. This can happen because btrfs_num_copies() 4856 * returns one more in the dev-replace case. 4857 */ 4858 u64 tmp_length = *length; 4859 struct btrfs_bio *tmp_bbio = NULL; 4860 int tmp_num_stripes; 4861 u64 srcdev_devid = dev_replace->srcdev->devid; 4862 int index_srcdev = 0; 4863 int found = 0; 4864 u64 physical_of_found = 0; 4865 4866 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, 4867 logical, &tmp_length, &tmp_bbio, 0, NULL); 4868 if (ret) { 4869 WARN_ON(tmp_bbio != NULL); 4870 goto out; 4871 } 4872 4873 tmp_num_stripes = tmp_bbio->num_stripes; 4874 if (mirror_num > tmp_num_stripes) { 4875 /* 4876 * REQ_GET_READ_MIRRORS does not contain this 4877 * mirror, that means that the requested area 4878 * is not left of the left cursor 4879 */ 4880 ret = -EIO; 4881 kfree(tmp_bbio); 4882 goto out; 4883 } 4884 4885 /* 4886 * process the rest of the function using the mirror_num 4887 * of the source drive. Therefore look it up first. 4888 * At the end, patch the device pointer to the one of the 4889 * target drive. 4890 */ 4891 for (i = 0; i < tmp_num_stripes; i++) { 4892 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) { 4893 /* 4894 * In case of DUP, in order to keep it 4895 * simple, only add the mirror with the 4896 * lowest physical address 4897 */ 4898 if (found && 4899 physical_of_found <= 4900 tmp_bbio->stripes[i].physical) 4901 continue; 4902 index_srcdev = i; 4903 found = 1; 4904 physical_of_found = 4905 tmp_bbio->stripes[i].physical; 4906 } 4907 } 4908 4909 if (found) { 4910 mirror_num = index_srcdev + 1; 4911 patch_the_first_stripe_for_dev_replace = 1; 4912 physical_to_patch_in_first_stripe = physical_of_found; 4913 } else { 4914 WARN_ON(1); 4915 ret = -EIO; 4916 kfree(tmp_bbio); 4917 goto out; 4918 } 4919 4920 kfree(tmp_bbio); 4921 } else if (mirror_num > map->num_stripes) { 4922 mirror_num = 0; 4923 } 4924 4925 num_stripes = 1; 4926 stripe_index = 0; 4927 stripe_nr_orig = stripe_nr; 4928 stripe_nr_end = ALIGN(offset + *length, map->stripe_len); 4929 do_div(stripe_nr_end, map->stripe_len); 4930 stripe_end_offset = stripe_nr_end * map->stripe_len - 4931 (offset + *length); 4932 4933 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 4934 if (rw & REQ_DISCARD) 4935 num_stripes = min_t(u64, map->num_stripes, 4936 stripe_nr_end - stripe_nr_orig); 4937 stripe_index = do_div(stripe_nr, map->num_stripes); 4938 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 4939 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) 4940 num_stripes = map->num_stripes; 4941 else if (mirror_num) 4942 stripe_index = mirror_num - 1; 4943 else { 4944 stripe_index = find_live_mirror(fs_info, map, 0, 4945 map->num_stripes, 4946 current->pid % map->num_stripes, 4947 dev_replace_is_ongoing); 4948 mirror_num = stripe_index + 1; 4949 } 4950 4951 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 4952 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) { 4953 num_stripes = map->num_stripes; 4954 } else if (mirror_num) { 4955 stripe_index = mirror_num - 1; 4956 } else { 4957 mirror_num = 1; 4958 } 4959 4960 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 4961 int factor = map->num_stripes / map->sub_stripes; 4962 4963 stripe_index = do_div(stripe_nr, factor); 4964 stripe_index *= map->sub_stripes; 4965 4966 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 4967 num_stripes = map->sub_stripes; 4968 else if (rw & REQ_DISCARD) 4969 num_stripes = min_t(u64, map->sub_stripes * 4970 (stripe_nr_end - stripe_nr_orig), 4971 map->num_stripes); 4972 else if (mirror_num) 4973 stripe_index += mirror_num - 1; 4974 else { 4975 int old_stripe_index = stripe_index; 4976 stripe_index = find_live_mirror(fs_info, map, 4977 stripe_index, 4978 map->sub_stripes, stripe_index + 4979 current->pid % map->sub_stripes, 4980 dev_replace_is_ongoing); 4981 mirror_num = stripe_index - old_stripe_index + 1; 4982 } 4983 4984 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 4985 BTRFS_BLOCK_GROUP_RAID6)) { 4986 u64 tmp; 4987 4988 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1) 4989 && raid_map_ret) { 4990 int i, rot; 4991 4992 /* push stripe_nr back to the start of the full stripe */ 4993 stripe_nr = raid56_full_stripe_start; 4994 do_div(stripe_nr, stripe_len); 4995 4996 stripe_index = do_div(stripe_nr, nr_data_stripes(map)); 4997 4998 /* RAID[56] write or recovery. Return all stripes */ 4999 num_stripes = map->num_stripes; 5000 max_errors = nr_parity_stripes(map); 5001 5002 raid_map = kmalloc_array(num_stripes, sizeof(u64), 5003 GFP_NOFS); 5004 if (!raid_map) { 5005 ret = -ENOMEM; 5006 goto out; 5007 } 5008 5009 /* Work out the disk rotation on this stripe-set */ 5010 tmp = stripe_nr; 5011 rot = do_div(tmp, num_stripes); 5012 5013 /* Fill in the logical address of each stripe */ 5014 tmp = stripe_nr * nr_data_stripes(map); 5015 for (i = 0; i < nr_data_stripes(map); i++) 5016 raid_map[(i+rot) % num_stripes] = 5017 em->start + (tmp + i) * map->stripe_len; 5018 5019 raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 5020 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5021 raid_map[(i+rot+1) % num_stripes] = 5022 RAID6_Q_STRIPE; 5023 5024 *length = map->stripe_len; 5025 stripe_index = 0; 5026 stripe_offset = 0; 5027 } else { 5028 /* 5029 * Mirror #0 or #1 means the original data block. 5030 * Mirror #2 is RAID5 parity block. 5031 * Mirror #3 is RAID6 Q block. 5032 */ 5033 stripe_index = do_div(stripe_nr, nr_data_stripes(map)); 5034 if (mirror_num > 1) 5035 stripe_index = nr_data_stripes(map) + 5036 mirror_num - 2; 5037 5038 /* We distribute the parity blocks across stripes */ 5039 tmp = stripe_nr + stripe_index; 5040 stripe_index = do_div(tmp, map->num_stripes); 5041 } 5042 } else { 5043 /* 5044 * after this do_div call, stripe_nr is the number of stripes 5045 * on this device we have to walk to find the data, and 5046 * stripe_index is the number of our device in the stripe array 5047 */ 5048 stripe_index = do_div(stripe_nr, map->num_stripes); 5049 mirror_num = stripe_index + 1; 5050 } 5051 BUG_ON(stripe_index >= map->num_stripes); 5052 5053 num_alloc_stripes = num_stripes; 5054 if (dev_replace_is_ongoing) { 5055 if (rw & (REQ_WRITE | REQ_DISCARD)) 5056 num_alloc_stripes <<= 1; 5057 if (rw & REQ_GET_READ_MIRRORS) 5058 num_alloc_stripes++; 5059 } 5060 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS); 5061 if (!bbio) { 5062 kfree(raid_map); 5063 ret = -ENOMEM; 5064 goto out; 5065 } 5066 atomic_set(&bbio->error, 0); 5067 5068 if (rw & REQ_DISCARD) { 5069 int factor = 0; 5070 int sub_stripes = 0; 5071 u64 stripes_per_dev = 0; 5072 u32 remaining_stripes = 0; 5073 u32 last_stripe = 0; 5074 5075 if (map->type & 5076 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 5077 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5078 sub_stripes = 1; 5079 else 5080 sub_stripes = map->sub_stripes; 5081 5082 factor = map->num_stripes / sub_stripes; 5083 stripes_per_dev = div_u64_rem(stripe_nr_end - 5084 stripe_nr_orig, 5085 factor, 5086 &remaining_stripes); 5087 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5088 last_stripe *= sub_stripes; 5089 } 5090 5091 for (i = 0; i < num_stripes; i++) { 5092 bbio->stripes[i].physical = 5093 map->stripes[stripe_index].physical + 5094 stripe_offset + stripe_nr * map->stripe_len; 5095 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5096 5097 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5098 BTRFS_BLOCK_GROUP_RAID10)) { 5099 bbio->stripes[i].length = stripes_per_dev * 5100 map->stripe_len; 5101 5102 if (i / sub_stripes < remaining_stripes) 5103 bbio->stripes[i].length += 5104 map->stripe_len; 5105 5106 /* 5107 * Special for the first stripe and 5108 * the last stripe: 5109 * 5110 * |-------|...|-------| 5111 * |----------| 5112 * off end_off 5113 */ 5114 if (i < sub_stripes) 5115 bbio->stripes[i].length -= 5116 stripe_offset; 5117 5118 if (stripe_index >= last_stripe && 5119 stripe_index <= (last_stripe + 5120 sub_stripes - 1)) 5121 bbio->stripes[i].length -= 5122 stripe_end_offset; 5123 5124 if (i == sub_stripes - 1) 5125 stripe_offset = 0; 5126 } else 5127 bbio->stripes[i].length = *length; 5128 5129 stripe_index++; 5130 if (stripe_index == map->num_stripes) { 5131 /* This could only happen for RAID0/10 */ 5132 stripe_index = 0; 5133 stripe_nr++; 5134 } 5135 } 5136 } else { 5137 for (i = 0; i < num_stripes; i++) { 5138 bbio->stripes[i].physical = 5139 map->stripes[stripe_index].physical + 5140 stripe_offset + 5141 stripe_nr * map->stripe_len; 5142 bbio->stripes[i].dev = 5143 map->stripes[stripe_index].dev; 5144 stripe_index++; 5145 } 5146 } 5147 5148 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) { 5149 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 5150 BTRFS_BLOCK_GROUP_RAID10 | 5151 BTRFS_BLOCK_GROUP_RAID5 | 5152 BTRFS_BLOCK_GROUP_DUP)) { 5153 max_errors = 1; 5154 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { 5155 max_errors = 2; 5156 } 5157 } 5158 5159 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && 5160 dev_replace->tgtdev != NULL) { 5161 int index_where_to_add; 5162 u64 srcdev_devid = dev_replace->srcdev->devid; 5163 5164 /* 5165 * duplicate the write operations while the dev replace 5166 * procedure is running. Since the copying of the old disk 5167 * to the new disk takes place at run time while the 5168 * filesystem is mounted writable, the regular write 5169 * operations to the old disk have to be duplicated to go 5170 * to the new disk as well. 5171 * Note that device->missing is handled by the caller, and 5172 * that the write to the old disk is already set up in the 5173 * stripes array. 5174 */ 5175 index_where_to_add = num_stripes; 5176 for (i = 0; i < num_stripes; i++) { 5177 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5178 /* write to new disk, too */ 5179 struct btrfs_bio_stripe *new = 5180 bbio->stripes + index_where_to_add; 5181 struct btrfs_bio_stripe *old = 5182 bbio->stripes + i; 5183 5184 new->physical = old->physical; 5185 new->length = old->length; 5186 new->dev = dev_replace->tgtdev; 5187 index_where_to_add++; 5188 max_errors++; 5189 } 5190 } 5191 num_stripes = index_where_to_add; 5192 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) && 5193 dev_replace->tgtdev != NULL) { 5194 u64 srcdev_devid = dev_replace->srcdev->devid; 5195 int index_srcdev = 0; 5196 int found = 0; 5197 u64 physical_of_found = 0; 5198 5199 /* 5200 * During the dev-replace procedure, the target drive can 5201 * also be used to read data in case it is needed to repair 5202 * a corrupt block elsewhere. This is possible if the 5203 * requested area is left of the left cursor. In this area, 5204 * the target drive is a full copy of the source drive. 5205 */ 5206 for (i = 0; i < num_stripes; i++) { 5207 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5208 /* 5209 * In case of DUP, in order to keep it 5210 * simple, only add the mirror with the 5211 * lowest physical address 5212 */ 5213 if (found && 5214 physical_of_found <= 5215 bbio->stripes[i].physical) 5216 continue; 5217 index_srcdev = i; 5218 found = 1; 5219 physical_of_found = bbio->stripes[i].physical; 5220 } 5221 } 5222 if (found) { 5223 u64 length = map->stripe_len; 5224 5225 if (physical_of_found + length <= 5226 dev_replace->cursor_left) { 5227 struct btrfs_bio_stripe *tgtdev_stripe = 5228 bbio->stripes + num_stripes; 5229 5230 tgtdev_stripe->physical = physical_of_found; 5231 tgtdev_stripe->length = 5232 bbio->stripes[index_srcdev].length; 5233 tgtdev_stripe->dev = dev_replace->tgtdev; 5234 5235 num_stripes++; 5236 } 5237 } 5238 } 5239 5240 *bbio_ret = bbio; 5241 bbio->num_stripes = num_stripes; 5242 bbio->max_errors = max_errors; 5243 bbio->mirror_num = mirror_num; 5244 5245 /* 5246 * this is the case that REQ_READ && dev_replace_is_ongoing && 5247 * mirror_num == num_stripes + 1 && dev_replace target drive is 5248 * available as a mirror 5249 */ 5250 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 5251 WARN_ON(num_stripes > 1); 5252 bbio->stripes[0].dev = dev_replace->tgtdev; 5253 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5254 bbio->mirror_num = map->num_stripes + 1; 5255 } 5256 if (raid_map) { 5257 sort_parity_stripes(bbio, raid_map); 5258 *raid_map_ret = raid_map; 5259 } 5260 out: 5261 if (dev_replace_is_ongoing) 5262 btrfs_dev_replace_unlock(dev_replace); 5263 free_extent_map(em); 5264 return ret; 5265 } 5266 5267 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 5268 u64 logical, u64 *length, 5269 struct btrfs_bio **bbio_ret, int mirror_num) 5270 { 5271 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5272 mirror_num, NULL); 5273 } 5274 5275 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5276 u64 chunk_start, u64 physical, u64 devid, 5277 u64 **logical, int *naddrs, int *stripe_len) 5278 { 5279 struct extent_map_tree *em_tree = &map_tree->map_tree; 5280 struct extent_map *em; 5281 struct map_lookup *map; 5282 u64 *buf; 5283 u64 bytenr; 5284 u64 length; 5285 u64 stripe_nr; 5286 u64 rmap_len; 5287 int i, j, nr = 0; 5288 5289 read_lock(&em_tree->lock); 5290 em = lookup_extent_mapping(em_tree, chunk_start, 1); 5291 read_unlock(&em_tree->lock); 5292 5293 if (!em) { 5294 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n", 5295 chunk_start); 5296 return -EIO; 5297 } 5298 5299 if (em->start != chunk_start) { 5300 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n", 5301 em->start, chunk_start); 5302 free_extent_map(em); 5303 return -EIO; 5304 } 5305 map = (struct map_lookup *)em->bdev; 5306 5307 length = em->len; 5308 rmap_len = map->stripe_len; 5309 5310 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5311 do_div(length, map->num_stripes / map->sub_stripes); 5312 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5313 do_div(length, map->num_stripes); 5314 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 5315 BTRFS_BLOCK_GROUP_RAID6)) { 5316 do_div(length, nr_data_stripes(map)); 5317 rmap_len = map->stripe_len * nr_data_stripes(map); 5318 } 5319 5320 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 5321 BUG_ON(!buf); /* -ENOMEM */ 5322 5323 for (i = 0; i < map->num_stripes; i++) { 5324 if (devid && map->stripes[i].dev->devid != devid) 5325 continue; 5326 if (map->stripes[i].physical > physical || 5327 map->stripes[i].physical + length <= physical) 5328 continue; 5329 5330 stripe_nr = physical - map->stripes[i].physical; 5331 do_div(stripe_nr, map->stripe_len); 5332 5333 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5334 stripe_nr = stripe_nr * map->num_stripes + i; 5335 do_div(stripe_nr, map->sub_stripes); 5336 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5337 stripe_nr = stripe_nr * map->num_stripes + i; 5338 } /* else if RAID[56], multiply by nr_data_stripes(). 5339 * Alternatively, just use rmap_len below instead of 5340 * map->stripe_len */ 5341 5342 bytenr = chunk_start + stripe_nr * rmap_len; 5343 WARN_ON(nr >= map->num_stripes); 5344 for (j = 0; j < nr; j++) { 5345 if (buf[j] == bytenr) 5346 break; 5347 } 5348 if (j == nr) { 5349 WARN_ON(nr >= map->num_stripes); 5350 buf[nr++] = bytenr; 5351 } 5352 } 5353 5354 *logical = buf; 5355 *naddrs = nr; 5356 *stripe_len = rmap_len; 5357 5358 free_extent_map(em); 5359 return 0; 5360 } 5361 5362 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err) 5363 { 5364 if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED)) 5365 bio_endio_nodec(bio, err); 5366 else 5367 bio_endio(bio, err); 5368 kfree(bbio); 5369 } 5370 5371 static void btrfs_end_bio(struct bio *bio, int err) 5372 { 5373 struct btrfs_bio *bbio = bio->bi_private; 5374 struct btrfs_device *dev = bbio->stripes[0].dev; 5375 int is_orig_bio = 0; 5376 5377 if (err) { 5378 atomic_inc(&bbio->error); 5379 if (err == -EIO || err == -EREMOTEIO) { 5380 unsigned int stripe_index = 5381 btrfs_io_bio(bio)->stripe_index; 5382 5383 BUG_ON(stripe_index >= bbio->num_stripes); 5384 dev = bbio->stripes[stripe_index].dev; 5385 if (dev->bdev) { 5386 if (bio->bi_rw & WRITE) 5387 btrfs_dev_stat_inc(dev, 5388 BTRFS_DEV_STAT_WRITE_ERRS); 5389 else 5390 btrfs_dev_stat_inc(dev, 5391 BTRFS_DEV_STAT_READ_ERRS); 5392 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) 5393 btrfs_dev_stat_inc(dev, 5394 BTRFS_DEV_STAT_FLUSH_ERRS); 5395 btrfs_dev_stat_print_on_error(dev); 5396 } 5397 } 5398 } 5399 5400 if (bio == bbio->orig_bio) 5401 is_orig_bio = 1; 5402 5403 btrfs_bio_counter_dec(bbio->fs_info); 5404 5405 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5406 if (!is_orig_bio) { 5407 bio_put(bio); 5408 bio = bbio->orig_bio; 5409 } 5410 5411 bio->bi_private = bbio->private; 5412 bio->bi_end_io = bbio->end_io; 5413 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5414 /* only send an error to the higher layers if it is 5415 * beyond the tolerance of the btrfs bio 5416 */ 5417 if (atomic_read(&bbio->error) > bbio->max_errors) { 5418 err = -EIO; 5419 } else { 5420 /* 5421 * this bio is actually up to date, we didn't 5422 * go over the max number of errors 5423 */ 5424 set_bit(BIO_UPTODATE, &bio->bi_flags); 5425 err = 0; 5426 } 5427 5428 btrfs_end_bbio(bbio, bio, err); 5429 } else if (!is_orig_bio) { 5430 bio_put(bio); 5431 } 5432 } 5433 5434 /* 5435 * see run_scheduled_bios for a description of why bios are collected for 5436 * async submit. 5437 * 5438 * This will add one bio to the pending list for a device and make sure 5439 * the work struct is scheduled. 5440 */ 5441 static noinline void btrfs_schedule_bio(struct btrfs_root *root, 5442 struct btrfs_device *device, 5443 int rw, struct bio *bio) 5444 { 5445 int should_queue = 1; 5446 struct btrfs_pending_bios *pending_bios; 5447 5448 if (device->missing || !device->bdev) { 5449 bio_endio(bio, -EIO); 5450 return; 5451 } 5452 5453 /* don't bother with additional async steps for reads, right now */ 5454 if (!(rw & REQ_WRITE)) { 5455 bio_get(bio); 5456 btrfsic_submit_bio(rw, bio); 5457 bio_put(bio); 5458 return; 5459 } 5460 5461 /* 5462 * nr_async_bios allows us to reliably return congestion to the 5463 * higher layers. Otherwise, the async bio makes it appear we have 5464 * made progress against dirty pages when we've really just put it 5465 * on a queue for later 5466 */ 5467 atomic_inc(&root->fs_info->nr_async_bios); 5468 WARN_ON(bio->bi_next); 5469 bio->bi_next = NULL; 5470 bio->bi_rw |= rw; 5471 5472 spin_lock(&device->io_lock); 5473 if (bio->bi_rw & REQ_SYNC) 5474 pending_bios = &device->pending_sync_bios; 5475 else 5476 pending_bios = &device->pending_bios; 5477 5478 if (pending_bios->tail) 5479 pending_bios->tail->bi_next = bio; 5480 5481 pending_bios->tail = bio; 5482 if (!pending_bios->head) 5483 pending_bios->head = bio; 5484 if (device->running_pending) 5485 should_queue = 0; 5486 5487 spin_unlock(&device->io_lock); 5488 5489 if (should_queue) 5490 btrfs_queue_work(root->fs_info->submit_workers, 5491 &device->work); 5492 } 5493 5494 static int bio_size_ok(struct block_device *bdev, struct bio *bio, 5495 sector_t sector) 5496 { 5497 struct bio_vec *prev; 5498 struct request_queue *q = bdev_get_queue(bdev); 5499 unsigned int max_sectors = queue_max_sectors(q); 5500 struct bvec_merge_data bvm = { 5501 .bi_bdev = bdev, 5502 .bi_sector = sector, 5503 .bi_rw = bio->bi_rw, 5504 }; 5505 5506 if (WARN_ON(bio->bi_vcnt == 0)) 5507 return 1; 5508 5509 prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 5510 if (bio_sectors(bio) > max_sectors) 5511 return 0; 5512 5513 if (!q->merge_bvec_fn) 5514 return 1; 5515 5516 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; 5517 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) 5518 return 0; 5519 return 1; 5520 } 5521 5522 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, 5523 struct bio *bio, u64 physical, int dev_nr, 5524 int rw, int async) 5525 { 5526 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 5527 5528 bio->bi_private = bbio; 5529 btrfs_io_bio(bio)->stripe_index = dev_nr; 5530 bio->bi_end_io = btrfs_end_bio; 5531 bio->bi_iter.bi_sector = physical >> 9; 5532 #ifdef DEBUG 5533 { 5534 struct rcu_string *name; 5535 5536 rcu_read_lock(); 5537 name = rcu_dereference(dev->name); 5538 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu " 5539 "(%s id %llu), size=%u\n", rw, 5540 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, 5541 name->str, dev->devid, bio->bi_size); 5542 rcu_read_unlock(); 5543 } 5544 #endif 5545 bio->bi_bdev = dev->bdev; 5546 5547 btrfs_bio_counter_inc_noblocked(root->fs_info); 5548 5549 if (async) 5550 btrfs_schedule_bio(root, dev, rw, bio); 5551 else 5552 btrfsic_submit_bio(rw, bio); 5553 } 5554 5555 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, 5556 struct bio *first_bio, struct btrfs_device *dev, 5557 int dev_nr, int rw, int async) 5558 { 5559 struct bio_vec *bvec = first_bio->bi_io_vec; 5560 struct bio *bio; 5561 int nr_vecs = bio_get_nr_vecs(dev->bdev); 5562 u64 physical = bbio->stripes[dev_nr].physical; 5563 5564 again: 5565 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS); 5566 if (!bio) 5567 return -ENOMEM; 5568 5569 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { 5570 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, 5571 bvec->bv_offset) < bvec->bv_len) { 5572 u64 len = bio->bi_iter.bi_size; 5573 5574 atomic_inc(&bbio->stripes_pending); 5575 submit_stripe_bio(root, bbio, bio, physical, dev_nr, 5576 rw, async); 5577 physical += len; 5578 goto again; 5579 } 5580 bvec++; 5581 } 5582 5583 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async); 5584 return 0; 5585 } 5586 5587 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 5588 { 5589 atomic_inc(&bbio->error); 5590 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5591 /* Shoud be the original bio. */ 5592 WARN_ON(bio != bbio->orig_bio); 5593 5594 bio->bi_private = bbio->private; 5595 bio->bi_end_io = bbio->end_io; 5596 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5597 bio->bi_iter.bi_sector = logical >> 9; 5598 5599 btrfs_end_bbio(bbio, bio, -EIO); 5600 } 5601 } 5602 5603 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 5604 int mirror_num, int async_submit) 5605 { 5606 struct btrfs_device *dev; 5607 struct bio *first_bio = bio; 5608 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 5609 u64 length = 0; 5610 u64 map_length; 5611 u64 *raid_map = NULL; 5612 int ret; 5613 int dev_nr = 0; 5614 int total_devs = 1; 5615 struct btrfs_bio *bbio = NULL; 5616 5617 length = bio->bi_iter.bi_size; 5618 map_length = length; 5619 5620 btrfs_bio_counter_inc_blocked(root->fs_info); 5621 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, 5622 mirror_num, &raid_map); 5623 if (ret) { 5624 btrfs_bio_counter_dec(root->fs_info); 5625 return ret; 5626 } 5627 5628 total_devs = bbio->num_stripes; 5629 bbio->orig_bio = first_bio; 5630 bbio->private = first_bio->bi_private; 5631 bbio->end_io = first_bio->bi_end_io; 5632 bbio->fs_info = root->fs_info; 5633 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 5634 5635 if (raid_map) { 5636 /* In this case, map_length has been set to the length of 5637 a single stripe; not the whole write */ 5638 if (rw & WRITE) { 5639 ret = raid56_parity_write(root, bio, bbio, 5640 raid_map, map_length); 5641 } else { 5642 ret = raid56_parity_recover(root, bio, bbio, 5643 raid_map, map_length, 5644 mirror_num); 5645 } 5646 /* 5647 * FIXME, replace dosen't support raid56 yet, please fix 5648 * it in the future. 5649 */ 5650 btrfs_bio_counter_dec(root->fs_info); 5651 return ret; 5652 } 5653 5654 if (map_length < length) { 5655 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu", 5656 logical, length, map_length); 5657 BUG(); 5658 } 5659 5660 while (dev_nr < total_devs) { 5661 dev = bbio->stripes[dev_nr].dev; 5662 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) { 5663 bbio_error(bbio, first_bio, logical); 5664 dev_nr++; 5665 continue; 5666 } 5667 5668 /* 5669 * Check and see if we're ok with this bio based on it's size 5670 * and offset with the given device. 5671 */ 5672 if (!bio_size_ok(dev->bdev, first_bio, 5673 bbio->stripes[dev_nr].physical >> 9)) { 5674 ret = breakup_stripe_bio(root, bbio, first_bio, dev, 5675 dev_nr, rw, async_submit); 5676 BUG_ON(ret); 5677 dev_nr++; 5678 continue; 5679 } 5680 5681 if (dev_nr < total_devs - 1) { 5682 bio = btrfs_bio_clone(first_bio, GFP_NOFS); 5683 BUG_ON(!bio); /* -ENOMEM */ 5684 } else { 5685 bio = first_bio; 5686 bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED; 5687 } 5688 5689 submit_stripe_bio(root, bbio, bio, 5690 bbio->stripes[dev_nr].physical, dev_nr, rw, 5691 async_submit); 5692 dev_nr++; 5693 } 5694 btrfs_bio_counter_dec(root->fs_info); 5695 return 0; 5696 } 5697 5698 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 5699 u8 *uuid, u8 *fsid) 5700 { 5701 struct btrfs_device *device; 5702 struct btrfs_fs_devices *cur_devices; 5703 5704 cur_devices = fs_info->fs_devices; 5705 while (cur_devices) { 5706 if (!fsid || 5707 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 5708 device = __find_device(&cur_devices->devices, 5709 devid, uuid); 5710 if (device) 5711 return device; 5712 } 5713 cur_devices = cur_devices->seed; 5714 } 5715 return NULL; 5716 } 5717 5718 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 5719 u64 devid, u8 *dev_uuid) 5720 { 5721 struct btrfs_device *device; 5722 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 5723 5724 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 5725 if (IS_ERR(device)) 5726 return NULL; 5727 5728 list_add(&device->dev_list, &fs_devices->devices); 5729 device->fs_devices = fs_devices; 5730 fs_devices->num_devices++; 5731 5732 device->missing = 1; 5733 fs_devices->missing_devices++; 5734 5735 return device; 5736 } 5737 5738 /** 5739 * btrfs_alloc_device - allocate struct btrfs_device 5740 * @fs_info: used only for generating a new devid, can be NULL if 5741 * devid is provided (i.e. @devid != NULL). 5742 * @devid: a pointer to devid for this device. If NULL a new devid 5743 * is generated. 5744 * @uuid: a pointer to UUID for this device. If NULL a new UUID 5745 * is generated. 5746 * 5747 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 5748 * on error. Returned struct is not linked onto any lists and can be 5749 * destroyed with kfree() right away. 5750 */ 5751 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 5752 const u64 *devid, 5753 const u8 *uuid) 5754 { 5755 struct btrfs_device *dev; 5756 u64 tmp; 5757 5758 if (WARN_ON(!devid && !fs_info)) 5759 return ERR_PTR(-EINVAL); 5760 5761 dev = __alloc_device(); 5762 if (IS_ERR(dev)) 5763 return dev; 5764 5765 if (devid) 5766 tmp = *devid; 5767 else { 5768 int ret; 5769 5770 ret = find_next_devid(fs_info, &tmp); 5771 if (ret) { 5772 kfree(dev); 5773 return ERR_PTR(ret); 5774 } 5775 } 5776 dev->devid = tmp; 5777 5778 if (uuid) 5779 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 5780 else 5781 generate_random_uuid(dev->uuid); 5782 5783 btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); 5784 5785 return dev; 5786 } 5787 5788 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 5789 struct extent_buffer *leaf, 5790 struct btrfs_chunk *chunk) 5791 { 5792 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 5793 struct map_lookup *map; 5794 struct extent_map *em; 5795 u64 logical; 5796 u64 length; 5797 u64 devid; 5798 u8 uuid[BTRFS_UUID_SIZE]; 5799 int num_stripes; 5800 int ret; 5801 int i; 5802 5803 logical = key->offset; 5804 length = btrfs_chunk_length(leaf, chunk); 5805 5806 read_lock(&map_tree->map_tree.lock); 5807 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 5808 read_unlock(&map_tree->map_tree.lock); 5809 5810 /* already mapped? */ 5811 if (em && em->start <= logical && em->start + em->len > logical) { 5812 free_extent_map(em); 5813 return 0; 5814 } else if (em) { 5815 free_extent_map(em); 5816 } 5817 5818 em = alloc_extent_map(); 5819 if (!em) 5820 return -ENOMEM; 5821 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 5822 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 5823 if (!map) { 5824 free_extent_map(em); 5825 return -ENOMEM; 5826 } 5827 5828 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5829 em->bdev = (struct block_device *)map; 5830 em->start = logical; 5831 em->len = length; 5832 em->orig_start = 0; 5833 em->block_start = 0; 5834 em->block_len = em->len; 5835 5836 map->num_stripes = num_stripes; 5837 map->io_width = btrfs_chunk_io_width(leaf, chunk); 5838 map->io_align = btrfs_chunk_io_align(leaf, chunk); 5839 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 5840 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 5841 map->type = btrfs_chunk_type(leaf, chunk); 5842 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 5843 for (i = 0; i < num_stripes; i++) { 5844 map->stripes[i].physical = 5845 btrfs_stripe_offset_nr(leaf, chunk, i); 5846 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 5847 read_extent_buffer(leaf, uuid, (unsigned long) 5848 btrfs_stripe_dev_uuid_nr(chunk, i), 5849 BTRFS_UUID_SIZE); 5850 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid, 5851 uuid, NULL); 5852 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 5853 free_extent_map(em); 5854 return -EIO; 5855 } 5856 if (!map->stripes[i].dev) { 5857 map->stripes[i].dev = 5858 add_missing_dev(root, devid, uuid); 5859 if (!map->stripes[i].dev) { 5860 free_extent_map(em); 5861 return -EIO; 5862 } 5863 } 5864 map->stripes[i].dev->in_fs_metadata = 1; 5865 } 5866 5867 write_lock(&map_tree->map_tree.lock); 5868 ret = add_extent_mapping(&map_tree->map_tree, em, 0); 5869 write_unlock(&map_tree->map_tree.lock); 5870 BUG_ON(ret); /* Tree corruption */ 5871 free_extent_map(em); 5872 5873 return 0; 5874 } 5875 5876 static void fill_device_from_item(struct extent_buffer *leaf, 5877 struct btrfs_dev_item *dev_item, 5878 struct btrfs_device *device) 5879 { 5880 unsigned long ptr; 5881 5882 device->devid = btrfs_device_id(leaf, dev_item); 5883 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 5884 device->total_bytes = device->disk_total_bytes; 5885 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 5886 device->type = btrfs_device_type(leaf, dev_item); 5887 device->io_align = btrfs_device_io_align(leaf, dev_item); 5888 device->io_width = btrfs_device_io_width(leaf, dev_item); 5889 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 5890 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 5891 device->is_tgtdev_for_dev_replace = 0; 5892 5893 ptr = btrfs_device_uuid(dev_item); 5894 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 5895 } 5896 5897 static int open_seed_devices(struct btrfs_root *root, u8 *fsid) 5898 { 5899 struct btrfs_fs_devices *fs_devices; 5900 int ret; 5901 5902 BUG_ON(!mutex_is_locked(&uuid_mutex)); 5903 5904 fs_devices = root->fs_info->fs_devices->seed; 5905 while (fs_devices) { 5906 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 5907 ret = 0; 5908 goto out; 5909 } 5910 fs_devices = fs_devices->seed; 5911 } 5912 5913 fs_devices = find_fsid(fsid); 5914 if (!fs_devices) { 5915 ret = -ENOENT; 5916 goto out; 5917 } 5918 5919 fs_devices = clone_fs_devices(fs_devices); 5920 if (IS_ERR(fs_devices)) { 5921 ret = PTR_ERR(fs_devices); 5922 goto out; 5923 } 5924 5925 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 5926 root->fs_info->bdev_holder); 5927 if (ret) { 5928 free_fs_devices(fs_devices); 5929 goto out; 5930 } 5931 5932 if (!fs_devices->seeding) { 5933 __btrfs_close_devices(fs_devices); 5934 free_fs_devices(fs_devices); 5935 ret = -EINVAL; 5936 goto out; 5937 } 5938 5939 fs_devices->seed = root->fs_info->fs_devices->seed; 5940 root->fs_info->fs_devices->seed = fs_devices; 5941 out: 5942 return ret; 5943 } 5944 5945 static int read_one_dev(struct btrfs_root *root, 5946 struct extent_buffer *leaf, 5947 struct btrfs_dev_item *dev_item) 5948 { 5949 struct btrfs_device *device; 5950 u64 devid; 5951 int ret; 5952 u8 fs_uuid[BTRFS_UUID_SIZE]; 5953 u8 dev_uuid[BTRFS_UUID_SIZE]; 5954 5955 devid = btrfs_device_id(leaf, dev_item); 5956 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 5957 BTRFS_UUID_SIZE); 5958 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 5959 BTRFS_UUID_SIZE); 5960 5961 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 5962 ret = open_seed_devices(root, fs_uuid); 5963 if (ret && !btrfs_test_opt(root, DEGRADED)) 5964 return ret; 5965 } 5966 5967 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid); 5968 if (!device || !device->bdev) { 5969 if (!btrfs_test_opt(root, DEGRADED)) 5970 return -EIO; 5971 5972 if (!device) { 5973 btrfs_warn(root->fs_info, "devid %llu missing", devid); 5974 device = add_missing_dev(root, devid, dev_uuid); 5975 if (!device) 5976 return -ENOMEM; 5977 } else if (!device->missing) { 5978 /* 5979 * this happens when a device that was properly setup 5980 * in the device info lists suddenly goes bad. 5981 * device->bdev is NULL, and so we have to set 5982 * device->missing to one here 5983 */ 5984 root->fs_info->fs_devices->missing_devices++; 5985 device->missing = 1; 5986 } 5987 } 5988 5989 if (device->fs_devices != root->fs_info->fs_devices) { 5990 BUG_ON(device->writeable); 5991 if (device->generation != 5992 btrfs_device_generation(leaf, dev_item)) 5993 return -EINVAL; 5994 } 5995 5996 fill_device_from_item(leaf, dev_item, device); 5997 device->in_fs_metadata = 1; 5998 if (device->writeable && !device->is_tgtdev_for_dev_replace) { 5999 device->fs_devices->total_rw_bytes += device->total_bytes; 6000 spin_lock(&root->fs_info->free_chunk_lock); 6001 root->fs_info->free_chunk_space += device->total_bytes - 6002 device->bytes_used; 6003 spin_unlock(&root->fs_info->free_chunk_lock); 6004 } 6005 ret = 0; 6006 return ret; 6007 } 6008 6009 int btrfs_read_sys_array(struct btrfs_root *root) 6010 { 6011 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 6012 struct extent_buffer *sb; 6013 struct btrfs_disk_key *disk_key; 6014 struct btrfs_chunk *chunk; 6015 u8 *ptr; 6016 unsigned long sb_ptr; 6017 int ret = 0; 6018 u32 num_stripes; 6019 u32 array_size; 6020 u32 len = 0; 6021 u32 cur; 6022 struct btrfs_key key; 6023 6024 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, 6025 BTRFS_SUPER_INFO_SIZE); 6026 if (!sb) 6027 return -ENOMEM; 6028 btrfs_set_buffer_uptodate(sb); 6029 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6030 /* 6031 * The sb extent buffer is artifical and just used to read the system array. 6032 * btrfs_set_buffer_uptodate() call does not properly mark all it's 6033 * pages up-to-date when the page is larger: extent does not cover the 6034 * whole page and consequently check_page_uptodate does not find all 6035 * the page's extents up-to-date (the hole beyond sb), 6036 * write_extent_buffer then triggers a WARN_ON. 6037 * 6038 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6039 * but sb spans only this function. Add an explicit SetPageUptodate call 6040 * to silence the warning eg. on PowerPC 64. 6041 */ 6042 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) 6043 SetPageUptodate(sb->pages[0]); 6044 6045 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6046 array_size = btrfs_super_sys_array_size(super_copy); 6047 6048 ptr = super_copy->sys_chunk_array; 6049 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); 6050 cur = 0; 6051 6052 while (cur < array_size) { 6053 disk_key = (struct btrfs_disk_key *)ptr; 6054 btrfs_disk_key_to_cpu(&key, disk_key); 6055 6056 len = sizeof(*disk_key); ptr += len; 6057 sb_ptr += len; 6058 cur += len; 6059 6060 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6061 chunk = (struct btrfs_chunk *)sb_ptr; 6062 ret = read_one_chunk(root, &key, sb, chunk); 6063 if (ret) 6064 break; 6065 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6066 len = btrfs_chunk_item_size(num_stripes); 6067 } else { 6068 ret = -EIO; 6069 break; 6070 } 6071 ptr += len; 6072 sb_ptr += len; 6073 cur += len; 6074 } 6075 free_extent_buffer(sb); 6076 return ret; 6077 } 6078 6079 int btrfs_read_chunk_tree(struct btrfs_root *root) 6080 { 6081 struct btrfs_path *path; 6082 struct extent_buffer *leaf; 6083 struct btrfs_key key; 6084 struct btrfs_key found_key; 6085 int ret; 6086 int slot; 6087 6088 root = root->fs_info->chunk_root; 6089 6090 path = btrfs_alloc_path(); 6091 if (!path) 6092 return -ENOMEM; 6093 6094 mutex_lock(&uuid_mutex); 6095 lock_chunks(root); 6096 6097 /* 6098 * Read all device items, and then all the chunk items. All 6099 * device items are found before any chunk item (their object id 6100 * is smaller than the lowest possible object id for a chunk 6101 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 6102 */ 6103 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 6104 key.offset = 0; 6105 key.type = 0; 6106 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6107 if (ret < 0) 6108 goto error; 6109 while (1) { 6110 leaf = path->nodes[0]; 6111 slot = path->slots[0]; 6112 if (slot >= btrfs_header_nritems(leaf)) { 6113 ret = btrfs_next_leaf(root, path); 6114 if (ret == 0) 6115 continue; 6116 if (ret < 0) 6117 goto error; 6118 break; 6119 } 6120 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6121 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 6122 struct btrfs_dev_item *dev_item; 6123 dev_item = btrfs_item_ptr(leaf, slot, 6124 struct btrfs_dev_item); 6125 ret = read_one_dev(root, leaf, dev_item); 6126 if (ret) 6127 goto error; 6128 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6129 struct btrfs_chunk *chunk; 6130 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6131 ret = read_one_chunk(root, &found_key, leaf, chunk); 6132 if (ret) 6133 goto error; 6134 } 6135 path->slots[0]++; 6136 } 6137 ret = 0; 6138 error: 6139 unlock_chunks(root); 6140 mutex_unlock(&uuid_mutex); 6141 6142 btrfs_free_path(path); 6143 return ret; 6144 } 6145 6146 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 6147 { 6148 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6149 struct btrfs_device *device; 6150 6151 while (fs_devices) { 6152 mutex_lock(&fs_devices->device_list_mutex); 6153 list_for_each_entry(device, &fs_devices->devices, dev_list) 6154 device->dev_root = fs_info->dev_root; 6155 mutex_unlock(&fs_devices->device_list_mutex); 6156 6157 fs_devices = fs_devices->seed; 6158 } 6159 } 6160 6161 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 6162 { 6163 int i; 6164 6165 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6166 btrfs_dev_stat_reset(dev, i); 6167 } 6168 6169 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 6170 { 6171 struct btrfs_key key; 6172 struct btrfs_key found_key; 6173 struct btrfs_root *dev_root = fs_info->dev_root; 6174 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6175 struct extent_buffer *eb; 6176 int slot; 6177 int ret = 0; 6178 struct btrfs_device *device; 6179 struct btrfs_path *path = NULL; 6180 int i; 6181 6182 path = btrfs_alloc_path(); 6183 if (!path) { 6184 ret = -ENOMEM; 6185 goto out; 6186 } 6187 6188 mutex_lock(&fs_devices->device_list_mutex); 6189 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6190 int item_size; 6191 struct btrfs_dev_stats_item *ptr; 6192 6193 key.objectid = 0; 6194 key.type = BTRFS_DEV_STATS_KEY; 6195 key.offset = device->devid; 6196 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 6197 if (ret) { 6198 __btrfs_reset_dev_stats(device); 6199 device->dev_stats_valid = 1; 6200 btrfs_release_path(path); 6201 continue; 6202 } 6203 slot = path->slots[0]; 6204 eb = path->nodes[0]; 6205 btrfs_item_key_to_cpu(eb, &found_key, slot); 6206 item_size = btrfs_item_size_nr(eb, slot); 6207 6208 ptr = btrfs_item_ptr(eb, slot, 6209 struct btrfs_dev_stats_item); 6210 6211 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6212 if (item_size >= (1 + i) * sizeof(__le64)) 6213 btrfs_dev_stat_set(device, i, 6214 btrfs_dev_stats_value(eb, ptr, i)); 6215 else 6216 btrfs_dev_stat_reset(device, i); 6217 } 6218 6219 device->dev_stats_valid = 1; 6220 btrfs_dev_stat_print_on_load(device); 6221 btrfs_release_path(path); 6222 } 6223 mutex_unlock(&fs_devices->device_list_mutex); 6224 6225 out: 6226 btrfs_free_path(path); 6227 return ret < 0 ? ret : 0; 6228 } 6229 6230 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 6231 struct btrfs_root *dev_root, 6232 struct btrfs_device *device) 6233 { 6234 struct btrfs_path *path; 6235 struct btrfs_key key; 6236 struct extent_buffer *eb; 6237 struct btrfs_dev_stats_item *ptr; 6238 int ret; 6239 int i; 6240 6241 key.objectid = 0; 6242 key.type = BTRFS_DEV_STATS_KEY; 6243 key.offset = device->devid; 6244 6245 path = btrfs_alloc_path(); 6246 BUG_ON(!path); 6247 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 6248 if (ret < 0) { 6249 printk_in_rcu(KERN_WARNING "BTRFS: " 6250 "error %d while searching for dev_stats item for device %s!\n", 6251 ret, rcu_str_deref(device->name)); 6252 goto out; 6253 } 6254 6255 if (ret == 0 && 6256 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 6257 /* need to delete old one and insert a new one */ 6258 ret = btrfs_del_item(trans, dev_root, path); 6259 if (ret != 0) { 6260 printk_in_rcu(KERN_WARNING "BTRFS: " 6261 "delete too small dev_stats item for device %s failed %d!\n", 6262 rcu_str_deref(device->name), ret); 6263 goto out; 6264 } 6265 ret = 1; 6266 } 6267 6268 if (ret == 1) { 6269 /* need to insert a new item */ 6270 btrfs_release_path(path); 6271 ret = btrfs_insert_empty_item(trans, dev_root, path, 6272 &key, sizeof(*ptr)); 6273 if (ret < 0) { 6274 printk_in_rcu(KERN_WARNING "BTRFS: " 6275 "insert dev_stats item for device %s failed %d!\n", 6276 rcu_str_deref(device->name), ret); 6277 goto out; 6278 } 6279 } 6280 6281 eb = path->nodes[0]; 6282 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 6283 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6284 btrfs_set_dev_stats_value(eb, ptr, i, 6285 btrfs_dev_stat_read(device, i)); 6286 btrfs_mark_buffer_dirty(eb); 6287 6288 out: 6289 btrfs_free_path(path); 6290 return ret; 6291 } 6292 6293 /* 6294 * called from commit_transaction. Writes all changed device stats to disk. 6295 */ 6296 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 6297 struct btrfs_fs_info *fs_info) 6298 { 6299 struct btrfs_root *dev_root = fs_info->dev_root; 6300 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6301 struct btrfs_device *device; 6302 int ret = 0; 6303 6304 mutex_lock(&fs_devices->device_list_mutex); 6305 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6306 if (!device->dev_stats_valid || !device->dev_stats_dirty) 6307 continue; 6308 6309 ret = update_dev_stat_item(trans, dev_root, device); 6310 if (!ret) 6311 device->dev_stats_dirty = 0; 6312 } 6313 mutex_unlock(&fs_devices->device_list_mutex); 6314 6315 return ret; 6316 } 6317 6318 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 6319 { 6320 btrfs_dev_stat_inc(dev, index); 6321 btrfs_dev_stat_print_on_error(dev); 6322 } 6323 6324 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 6325 { 6326 if (!dev->dev_stats_valid) 6327 return; 6328 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: " 6329 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 6330 rcu_str_deref(dev->name), 6331 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 6332 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 6333 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 6334 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 6335 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 6336 } 6337 6338 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 6339 { 6340 int i; 6341 6342 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6343 if (btrfs_dev_stat_read(dev, i) != 0) 6344 break; 6345 if (i == BTRFS_DEV_STAT_VALUES_MAX) 6346 return; /* all values == 0, suppress message */ 6347 6348 printk_in_rcu(KERN_INFO "BTRFS: " 6349 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 6350 rcu_str_deref(dev->name), 6351 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 6352 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 6353 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 6354 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 6355 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 6356 } 6357 6358 int btrfs_get_dev_stats(struct btrfs_root *root, 6359 struct btrfs_ioctl_get_dev_stats *stats) 6360 { 6361 struct btrfs_device *dev; 6362 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6363 int i; 6364 6365 mutex_lock(&fs_devices->device_list_mutex); 6366 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL); 6367 mutex_unlock(&fs_devices->device_list_mutex); 6368 6369 if (!dev) { 6370 btrfs_warn(root->fs_info, "get dev_stats failed, device not found"); 6371 return -ENODEV; 6372 } else if (!dev->dev_stats_valid) { 6373 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid"); 6374 return -ENODEV; 6375 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 6376 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6377 if (stats->nr_items > i) 6378 stats->values[i] = 6379 btrfs_dev_stat_read_and_reset(dev, i); 6380 else 6381 btrfs_dev_stat_reset(dev, i); 6382 } 6383 } else { 6384 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6385 if (stats->nr_items > i) 6386 stats->values[i] = btrfs_dev_stat_read(dev, i); 6387 } 6388 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 6389 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 6390 return 0; 6391 } 6392 6393 int btrfs_scratch_superblock(struct btrfs_device *device) 6394 { 6395 struct buffer_head *bh; 6396 struct btrfs_super_block *disk_super; 6397 6398 bh = btrfs_read_dev_super(device->bdev); 6399 if (!bh) 6400 return -EINVAL; 6401 disk_super = (struct btrfs_super_block *)bh->b_data; 6402 6403 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 6404 set_buffer_dirty(bh); 6405 sync_dirty_buffer(bh); 6406 brelse(bh); 6407 6408 return 0; 6409 } 6410