1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/random.h> 24 #include <linux/iocontext.h> 25 #include <linux/capability.h> 26 #include <asm/div64.h> 27 #include "compat.h" 28 #include "ctree.h" 29 #include "extent_map.h" 30 #include "disk-io.h" 31 #include "transaction.h" 32 #include "print-tree.h" 33 #include "volumes.h" 34 #include "async-thread.h" 35 36 struct map_lookup { 37 u64 type; 38 int io_align; 39 int io_width; 40 int stripe_len; 41 int sector_size; 42 int num_stripes; 43 int sub_stripes; 44 struct btrfs_bio_stripe stripes[]; 45 }; 46 47 static int init_first_rw_device(struct btrfs_trans_handle *trans, 48 struct btrfs_root *root, 49 struct btrfs_device *device); 50 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 51 52 #define map_lookup_size(n) (sizeof(struct map_lookup) + \ 53 (sizeof(struct btrfs_bio_stripe) * (n))) 54 55 static DEFINE_MUTEX(uuid_mutex); 56 static LIST_HEAD(fs_uuids); 57 58 void btrfs_lock_volumes(void) 59 { 60 mutex_lock(&uuid_mutex); 61 } 62 63 void btrfs_unlock_volumes(void) 64 { 65 mutex_unlock(&uuid_mutex); 66 } 67 68 static void lock_chunks(struct btrfs_root *root) 69 { 70 mutex_lock(&root->fs_info->chunk_mutex); 71 } 72 73 static void unlock_chunks(struct btrfs_root *root) 74 { 75 mutex_unlock(&root->fs_info->chunk_mutex); 76 } 77 78 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 79 { 80 struct btrfs_device *device; 81 WARN_ON(fs_devices->opened); 82 while (!list_empty(&fs_devices->devices)) { 83 device = list_entry(fs_devices->devices.next, 84 struct btrfs_device, dev_list); 85 list_del(&device->dev_list); 86 kfree(device->name); 87 kfree(device); 88 } 89 kfree(fs_devices); 90 } 91 92 int btrfs_cleanup_fs_uuids(void) 93 { 94 struct btrfs_fs_devices *fs_devices; 95 96 while (!list_empty(&fs_uuids)) { 97 fs_devices = list_entry(fs_uuids.next, 98 struct btrfs_fs_devices, list); 99 list_del(&fs_devices->list); 100 free_fs_devices(fs_devices); 101 } 102 return 0; 103 } 104 105 static noinline struct btrfs_device *__find_device(struct list_head *head, 106 u64 devid, u8 *uuid) 107 { 108 struct btrfs_device *dev; 109 110 list_for_each_entry(dev, head, dev_list) { 111 if (dev->devid == devid && 112 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 113 return dev; 114 } 115 } 116 return NULL; 117 } 118 119 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 120 { 121 struct btrfs_fs_devices *fs_devices; 122 123 list_for_each_entry(fs_devices, &fs_uuids, list) { 124 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 125 return fs_devices; 126 } 127 return NULL; 128 } 129 130 static void requeue_list(struct btrfs_pending_bios *pending_bios, 131 struct bio *head, struct bio *tail) 132 { 133 134 struct bio *old_head; 135 136 old_head = pending_bios->head; 137 pending_bios->head = head; 138 if (pending_bios->tail) 139 tail->bi_next = old_head; 140 else 141 pending_bios->tail = tail; 142 } 143 144 /* 145 * we try to collect pending bios for a device so we don't get a large 146 * number of procs sending bios down to the same device. This greatly 147 * improves the schedulers ability to collect and merge the bios. 148 * 149 * But, it also turns into a long list of bios to process and that is sure 150 * to eventually make the worker thread block. The solution here is to 151 * make some progress and then put this work struct back at the end of 152 * the list if the block device is congested. This way, multiple devices 153 * can make progress from a single worker thread. 154 */ 155 static noinline int run_scheduled_bios(struct btrfs_device *device) 156 { 157 struct bio *pending; 158 struct backing_dev_info *bdi; 159 struct btrfs_fs_info *fs_info; 160 struct btrfs_pending_bios *pending_bios; 161 struct bio *tail; 162 struct bio *cur; 163 int again = 0; 164 unsigned long num_run; 165 unsigned long num_sync_run; 166 unsigned long batch_run = 0; 167 unsigned long limit; 168 unsigned long last_waited = 0; 169 int force_reg = 0; 170 171 bdi = blk_get_backing_dev_info(device->bdev); 172 fs_info = device->dev_root->fs_info; 173 limit = btrfs_async_submit_limit(fs_info); 174 limit = limit * 2 / 3; 175 176 /* we want to make sure that every time we switch from the sync 177 * list to the normal list, we unplug 178 */ 179 num_sync_run = 0; 180 181 loop: 182 spin_lock(&device->io_lock); 183 184 loop_lock: 185 num_run = 0; 186 187 /* take all the bios off the list at once and process them 188 * later on (without the lock held). But, remember the 189 * tail and other pointers so the bios can be properly reinserted 190 * into the list if we hit congestion 191 */ 192 if (!force_reg && device->pending_sync_bios.head) { 193 pending_bios = &device->pending_sync_bios; 194 force_reg = 1; 195 } else { 196 pending_bios = &device->pending_bios; 197 force_reg = 0; 198 } 199 200 pending = pending_bios->head; 201 tail = pending_bios->tail; 202 WARN_ON(pending && !tail); 203 204 /* 205 * if pending was null this time around, no bios need processing 206 * at all and we can stop. Otherwise it'll loop back up again 207 * and do an additional check so no bios are missed. 208 * 209 * device->running_pending is used to synchronize with the 210 * schedule_bio code. 211 */ 212 if (device->pending_sync_bios.head == NULL && 213 device->pending_bios.head == NULL) { 214 again = 0; 215 device->running_pending = 0; 216 } else { 217 again = 1; 218 device->running_pending = 1; 219 } 220 221 pending_bios->head = NULL; 222 pending_bios->tail = NULL; 223 224 spin_unlock(&device->io_lock); 225 226 /* 227 * if we're doing the regular priority list, make sure we unplug 228 * for any high prio bios we've sent down 229 */ 230 if (pending_bios == &device->pending_bios && num_sync_run > 0) { 231 num_sync_run = 0; 232 blk_run_backing_dev(bdi, NULL); 233 } 234 235 while (pending) { 236 237 rmb(); 238 /* we want to work on both lists, but do more bios on the 239 * sync list than the regular list 240 */ 241 if ((num_run > 32 && 242 pending_bios != &device->pending_sync_bios && 243 device->pending_sync_bios.head) || 244 (num_run > 64 && pending_bios == &device->pending_sync_bios && 245 device->pending_bios.head)) { 246 spin_lock(&device->io_lock); 247 requeue_list(pending_bios, pending, tail); 248 goto loop_lock; 249 } 250 251 cur = pending; 252 pending = pending->bi_next; 253 cur->bi_next = NULL; 254 atomic_dec(&fs_info->nr_async_bios); 255 256 if (atomic_read(&fs_info->nr_async_bios) < limit && 257 waitqueue_active(&fs_info->async_submit_wait)) 258 wake_up(&fs_info->async_submit_wait); 259 260 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 261 262 if (cur->bi_rw & REQ_SYNC) 263 num_sync_run++; 264 265 submit_bio(cur->bi_rw, cur); 266 num_run++; 267 batch_run++; 268 if (need_resched()) { 269 if (num_sync_run) { 270 blk_run_backing_dev(bdi, NULL); 271 num_sync_run = 0; 272 } 273 cond_resched(); 274 } 275 276 /* 277 * we made progress, there is more work to do and the bdi 278 * is now congested. Back off and let other work structs 279 * run instead 280 */ 281 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 282 fs_info->fs_devices->open_devices > 1) { 283 struct io_context *ioc; 284 285 ioc = current->io_context; 286 287 /* 288 * the main goal here is that we don't want to 289 * block if we're going to be able to submit 290 * more requests without blocking. 291 * 292 * This code does two great things, it pokes into 293 * the elevator code from a filesystem _and_ 294 * it makes assumptions about how batching works. 295 */ 296 if (ioc && ioc->nr_batch_requests > 0 && 297 time_before(jiffies, ioc->last_waited + HZ/50UL) && 298 (last_waited == 0 || 299 ioc->last_waited == last_waited)) { 300 /* 301 * we want to go through our batch of 302 * requests and stop. So, we copy out 303 * the ioc->last_waited time and test 304 * against it before looping 305 */ 306 last_waited = ioc->last_waited; 307 if (need_resched()) { 308 if (num_sync_run) { 309 blk_run_backing_dev(bdi, NULL); 310 num_sync_run = 0; 311 } 312 cond_resched(); 313 } 314 continue; 315 } 316 spin_lock(&device->io_lock); 317 requeue_list(pending_bios, pending, tail); 318 device->running_pending = 1; 319 320 spin_unlock(&device->io_lock); 321 btrfs_requeue_work(&device->work); 322 goto done; 323 } 324 } 325 326 if (num_sync_run) { 327 num_sync_run = 0; 328 blk_run_backing_dev(bdi, NULL); 329 } 330 /* 331 * IO has already been through a long path to get here. Checksumming, 332 * async helper threads, perhaps compression. We've done a pretty 333 * good job of collecting a batch of IO and should just unplug 334 * the device right away. 335 * 336 * This will help anyone who is waiting on the IO, they might have 337 * already unplugged, but managed to do so before the bio they 338 * cared about found its way down here. 339 */ 340 blk_run_backing_dev(bdi, NULL); 341 342 cond_resched(); 343 if (again) 344 goto loop; 345 346 spin_lock(&device->io_lock); 347 if (device->pending_bios.head || device->pending_sync_bios.head) 348 goto loop_lock; 349 spin_unlock(&device->io_lock); 350 351 done: 352 return 0; 353 } 354 355 static void pending_bios_fn(struct btrfs_work *work) 356 { 357 struct btrfs_device *device; 358 359 device = container_of(work, struct btrfs_device, work); 360 run_scheduled_bios(device); 361 } 362 363 static noinline int device_list_add(const char *path, 364 struct btrfs_super_block *disk_super, 365 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 366 { 367 struct btrfs_device *device; 368 struct btrfs_fs_devices *fs_devices; 369 u64 found_transid = btrfs_super_generation(disk_super); 370 char *name; 371 372 fs_devices = find_fsid(disk_super->fsid); 373 if (!fs_devices) { 374 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 375 if (!fs_devices) 376 return -ENOMEM; 377 INIT_LIST_HEAD(&fs_devices->devices); 378 INIT_LIST_HEAD(&fs_devices->alloc_list); 379 list_add(&fs_devices->list, &fs_uuids); 380 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 381 fs_devices->latest_devid = devid; 382 fs_devices->latest_trans = found_transid; 383 mutex_init(&fs_devices->device_list_mutex); 384 device = NULL; 385 } else { 386 device = __find_device(&fs_devices->devices, devid, 387 disk_super->dev_item.uuid); 388 } 389 if (!device) { 390 if (fs_devices->opened) 391 return -EBUSY; 392 393 device = kzalloc(sizeof(*device), GFP_NOFS); 394 if (!device) { 395 /* we can safely leave the fs_devices entry around */ 396 return -ENOMEM; 397 } 398 device->devid = devid; 399 device->work.func = pending_bios_fn; 400 memcpy(device->uuid, disk_super->dev_item.uuid, 401 BTRFS_UUID_SIZE); 402 spin_lock_init(&device->io_lock); 403 device->name = kstrdup(path, GFP_NOFS); 404 if (!device->name) { 405 kfree(device); 406 return -ENOMEM; 407 } 408 INIT_LIST_HEAD(&device->dev_alloc_list); 409 410 mutex_lock(&fs_devices->device_list_mutex); 411 list_add(&device->dev_list, &fs_devices->devices); 412 mutex_unlock(&fs_devices->device_list_mutex); 413 414 device->fs_devices = fs_devices; 415 fs_devices->num_devices++; 416 } else if (!device->name || strcmp(device->name, path)) { 417 name = kstrdup(path, GFP_NOFS); 418 if (!name) 419 return -ENOMEM; 420 kfree(device->name); 421 device->name = name; 422 if (device->missing) { 423 fs_devices->missing_devices--; 424 device->missing = 0; 425 } 426 } 427 428 if (found_transid > fs_devices->latest_trans) { 429 fs_devices->latest_devid = devid; 430 fs_devices->latest_trans = found_transid; 431 } 432 *fs_devices_ret = fs_devices; 433 return 0; 434 } 435 436 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 437 { 438 struct btrfs_fs_devices *fs_devices; 439 struct btrfs_device *device; 440 struct btrfs_device *orig_dev; 441 442 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 443 if (!fs_devices) 444 return ERR_PTR(-ENOMEM); 445 446 INIT_LIST_HEAD(&fs_devices->devices); 447 INIT_LIST_HEAD(&fs_devices->alloc_list); 448 INIT_LIST_HEAD(&fs_devices->list); 449 mutex_init(&fs_devices->device_list_mutex); 450 fs_devices->latest_devid = orig->latest_devid; 451 fs_devices->latest_trans = orig->latest_trans; 452 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); 453 454 mutex_lock(&orig->device_list_mutex); 455 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 456 device = kzalloc(sizeof(*device), GFP_NOFS); 457 if (!device) 458 goto error; 459 460 device->name = kstrdup(orig_dev->name, GFP_NOFS); 461 if (!device->name) { 462 kfree(device); 463 goto error; 464 } 465 466 device->devid = orig_dev->devid; 467 device->work.func = pending_bios_fn; 468 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); 469 spin_lock_init(&device->io_lock); 470 INIT_LIST_HEAD(&device->dev_list); 471 INIT_LIST_HEAD(&device->dev_alloc_list); 472 473 list_add(&device->dev_list, &fs_devices->devices); 474 device->fs_devices = fs_devices; 475 fs_devices->num_devices++; 476 } 477 mutex_unlock(&orig->device_list_mutex); 478 return fs_devices; 479 error: 480 mutex_unlock(&orig->device_list_mutex); 481 free_fs_devices(fs_devices); 482 return ERR_PTR(-ENOMEM); 483 } 484 485 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 486 { 487 struct btrfs_device *device, *next; 488 489 mutex_lock(&uuid_mutex); 490 again: 491 mutex_lock(&fs_devices->device_list_mutex); 492 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 493 if (device->in_fs_metadata) 494 continue; 495 496 if (device->bdev) { 497 blkdev_put(device->bdev, device->mode); 498 device->bdev = NULL; 499 fs_devices->open_devices--; 500 } 501 if (device->writeable) { 502 list_del_init(&device->dev_alloc_list); 503 device->writeable = 0; 504 fs_devices->rw_devices--; 505 } 506 list_del_init(&device->dev_list); 507 fs_devices->num_devices--; 508 kfree(device->name); 509 kfree(device); 510 } 511 mutex_unlock(&fs_devices->device_list_mutex); 512 513 if (fs_devices->seed) { 514 fs_devices = fs_devices->seed; 515 goto again; 516 } 517 518 mutex_unlock(&uuid_mutex); 519 return 0; 520 } 521 522 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 523 { 524 struct btrfs_device *device; 525 526 if (--fs_devices->opened > 0) 527 return 0; 528 529 list_for_each_entry(device, &fs_devices->devices, dev_list) { 530 if (device->bdev) { 531 blkdev_put(device->bdev, device->mode); 532 fs_devices->open_devices--; 533 } 534 if (device->writeable) { 535 list_del_init(&device->dev_alloc_list); 536 fs_devices->rw_devices--; 537 } 538 539 device->bdev = NULL; 540 device->writeable = 0; 541 device->in_fs_metadata = 0; 542 } 543 WARN_ON(fs_devices->open_devices); 544 WARN_ON(fs_devices->rw_devices); 545 fs_devices->opened = 0; 546 fs_devices->seeding = 0; 547 548 return 0; 549 } 550 551 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 552 { 553 struct btrfs_fs_devices *seed_devices = NULL; 554 int ret; 555 556 mutex_lock(&uuid_mutex); 557 ret = __btrfs_close_devices(fs_devices); 558 if (!fs_devices->opened) { 559 seed_devices = fs_devices->seed; 560 fs_devices->seed = NULL; 561 } 562 mutex_unlock(&uuid_mutex); 563 564 while (seed_devices) { 565 fs_devices = seed_devices; 566 seed_devices = fs_devices->seed; 567 __btrfs_close_devices(fs_devices); 568 free_fs_devices(fs_devices); 569 } 570 return ret; 571 } 572 573 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 574 fmode_t flags, void *holder) 575 { 576 struct block_device *bdev; 577 struct list_head *head = &fs_devices->devices; 578 struct btrfs_device *device; 579 struct block_device *latest_bdev = NULL; 580 struct buffer_head *bh; 581 struct btrfs_super_block *disk_super; 582 u64 latest_devid = 0; 583 u64 latest_transid = 0; 584 u64 devid; 585 int seeding = 1; 586 int ret = 0; 587 588 flags |= FMODE_EXCL; 589 590 list_for_each_entry(device, head, dev_list) { 591 if (device->bdev) 592 continue; 593 if (!device->name) 594 continue; 595 596 bdev = blkdev_get_by_path(device->name, flags, holder); 597 if (IS_ERR(bdev)) { 598 printk(KERN_INFO "open %s failed\n", device->name); 599 goto error; 600 } 601 set_blocksize(bdev, 4096); 602 603 bh = btrfs_read_dev_super(bdev); 604 if (!bh) { 605 ret = -EINVAL; 606 goto error_close; 607 } 608 609 disk_super = (struct btrfs_super_block *)bh->b_data; 610 devid = btrfs_stack_device_id(&disk_super->dev_item); 611 if (devid != device->devid) 612 goto error_brelse; 613 614 if (memcmp(device->uuid, disk_super->dev_item.uuid, 615 BTRFS_UUID_SIZE)) 616 goto error_brelse; 617 618 device->generation = btrfs_super_generation(disk_super); 619 if (!latest_transid || device->generation > latest_transid) { 620 latest_devid = devid; 621 latest_transid = device->generation; 622 latest_bdev = bdev; 623 } 624 625 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 626 device->writeable = 0; 627 } else { 628 device->writeable = !bdev_read_only(bdev); 629 seeding = 0; 630 } 631 632 device->bdev = bdev; 633 device->in_fs_metadata = 0; 634 device->mode = flags; 635 636 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 637 fs_devices->rotating = 1; 638 639 fs_devices->open_devices++; 640 if (device->writeable) { 641 fs_devices->rw_devices++; 642 list_add(&device->dev_alloc_list, 643 &fs_devices->alloc_list); 644 } 645 continue; 646 647 error_brelse: 648 brelse(bh); 649 error_close: 650 blkdev_put(bdev, flags); 651 error: 652 continue; 653 } 654 if (fs_devices->open_devices == 0) { 655 ret = -EIO; 656 goto out; 657 } 658 fs_devices->seeding = seeding; 659 fs_devices->opened = 1; 660 fs_devices->latest_bdev = latest_bdev; 661 fs_devices->latest_devid = latest_devid; 662 fs_devices->latest_trans = latest_transid; 663 fs_devices->total_rw_bytes = 0; 664 out: 665 return ret; 666 } 667 668 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 669 fmode_t flags, void *holder) 670 { 671 int ret; 672 673 mutex_lock(&uuid_mutex); 674 if (fs_devices->opened) { 675 fs_devices->opened++; 676 ret = 0; 677 } else { 678 ret = __btrfs_open_devices(fs_devices, flags, holder); 679 } 680 mutex_unlock(&uuid_mutex); 681 return ret; 682 } 683 684 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 685 struct btrfs_fs_devices **fs_devices_ret) 686 { 687 struct btrfs_super_block *disk_super; 688 struct block_device *bdev; 689 struct buffer_head *bh; 690 int ret; 691 u64 devid; 692 u64 transid; 693 694 mutex_lock(&uuid_mutex); 695 696 flags |= FMODE_EXCL; 697 bdev = blkdev_get_by_path(path, flags, holder); 698 699 if (IS_ERR(bdev)) { 700 ret = PTR_ERR(bdev); 701 goto error; 702 } 703 704 ret = set_blocksize(bdev, 4096); 705 if (ret) 706 goto error_close; 707 bh = btrfs_read_dev_super(bdev); 708 if (!bh) { 709 ret = -EINVAL; 710 goto error_close; 711 } 712 disk_super = (struct btrfs_super_block *)bh->b_data; 713 devid = btrfs_stack_device_id(&disk_super->dev_item); 714 transid = btrfs_super_generation(disk_super); 715 if (disk_super->label[0]) 716 printk(KERN_INFO "device label %s ", disk_super->label); 717 else { 718 /* FIXME, make a readl uuid parser */ 719 printk(KERN_INFO "device fsid %llx-%llx ", 720 *(unsigned long long *)disk_super->fsid, 721 *(unsigned long long *)(disk_super->fsid + 8)); 722 } 723 printk(KERN_CONT "devid %llu transid %llu %s\n", 724 (unsigned long long)devid, (unsigned long long)transid, path); 725 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 726 727 brelse(bh); 728 error_close: 729 blkdev_put(bdev, flags); 730 error: 731 mutex_unlock(&uuid_mutex); 732 return ret; 733 } 734 735 /* helper to account the used device space in the range */ 736 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 737 u64 end, u64 *length) 738 { 739 struct btrfs_key key; 740 struct btrfs_root *root = device->dev_root; 741 struct btrfs_dev_extent *dev_extent; 742 struct btrfs_path *path; 743 u64 extent_end; 744 int ret; 745 int slot; 746 struct extent_buffer *l; 747 748 *length = 0; 749 750 if (start >= device->total_bytes) 751 return 0; 752 753 path = btrfs_alloc_path(); 754 if (!path) 755 return -ENOMEM; 756 path->reada = 2; 757 758 key.objectid = device->devid; 759 key.offset = start; 760 key.type = BTRFS_DEV_EXTENT_KEY; 761 762 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 763 if (ret < 0) 764 goto out; 765 if (ret > 0) { 766 ret = btrfs_previous_item(root, path, key.objectid, key.type); 767 if (ret < 0) 768 goto out; 769 } 770 771 while (1) { 772 l = path->nodes[0]; 773 slot = path->slots[0]; 774 if (slot >= btrfs_header_nritems(l)) { 775 ret = btrfs_next_leaf(root, path); 776 if (ret == 0) 777 continue; 778 if (ret < 0) 779 goto out; 780 781 break; 782 } 783 btrfs_item_key_to_cpu(l, &key, slot); 784 785 if (key.objectid < device->devid) 786 goto next; 787 788 if (key.objectid > device->devid) 789 break; 790 791 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 792 goto next; 793 794 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 795 extent_end = key.offset + btrfs_dev_extent_length(l, 796 dev_extent); 797 if (key.offset <= start && extent_end > end) { 798 *length = end - start + 1; 799 break; 800 } else if (key.offset <= start && extent_end > start) 801 *length += extent_end - start; 802 else if (key.offset > start && extent_end <= end) 803 *length += extent_end - key.offset; 804 else if (key.offset > start && key.offset <= end) { 805 *length += end - key.offset + 1; 806 break; 807 } else if (key.offset > end) 808 break; 809 810 next: 811 path->slots[0]++; 812 } 813 ret = 0; 814 out: 815 btrfs_free_path(path); 816 return ret; 817 } 818 819 /* 820 * find_free_dev_extent - find free space in the specified device 821 * @trans: transaction handler 822 * @device: the device which we search the free space in 823 * @num_bytes: the size of the free space that we need 824 * @start: store the start of the free space. 825 * @len: the size of the free space. that we find, or the size of the max 826 * free space if we don't find suitable free space 827 * 828 * this uses a pretty simple search, the expectation is that it is 829 * called very infrequently and that a given device has a small number 830 * of extents 831 * 832 * @start is used to store the start of the free space if we find. But if we 833 * don't find suitable free space, it will be used to store the start position 834 * of the max free space. 835 * 836 * @len is used to store the size of the free space that we find. 837 * But if we don't find suitable free space, it is used to store the size of 838 * the max free space. 839 */ 840 int find_free_dev_extent(struct btrfs_trans_handle *trans, 841 struct btrfs_device *device, u64 num_bytes, 842 u64 *start, u64 *len) 843 { 844 struct btrfs_key key; 845 struct btrfs_root *root = device->dev_root; 846 struct btrfs_dev_extent *dev_extent; 847 struct btrfs_path *path; 848 u64 hole_size; 849 u64 max_hole_start; 850 u64 max_hole_size; 851 u64 extent_end; 852 u64 search_start; 853 u64 search_end = device->total_bytes; 854 int ret; 855 int slot; 856 struct extent_buffer *l; 857 858 /* FIXME use last free of some kind */ 859 860 /* we don't want to overwrite the superblock on the drive, 861 * so we make sure to start at an offset of at least 1MB 862 */ 863 search_start = 1024 * 1024; 864 865 if (root->fs_info->alloc_start + num_bytes <= search_end) 866 search_start = max(root->fs_info->alloc_start, search_start); 867 868 max_hole_start = search_start; 869 max_hole_size = 0; 870 871 if (search_start >= search_end) { 872 ret = -ENOSPC; 873 goto error; 874 } 875 876 path = btrfs_alloc_path(); 877 if (!path) { 878 ret = -ENOMEM; 879 goto error; 880 } 881 path->reada = 2; 882 883 key.objectid = device->devid; 884 key.offset = search_start; 885 key.type = BTRFS_DEV_EXTENT_KEY; 886 887 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 888 if (ret < 0) 889 goto out; 890 if (ret > 0) { 891 ret = btrfs_previous_item(root, path, key.objectid, key.type); 892 if (ret < 0) 893 goto out; 894 } 895 896 while (1) { 897 l = path->nodes[0]; 898 slot = path->slots[0]; 899 if (slot >= btrfs_header_nritems(l)) { 900 ret = btrfs_next_leaf(root, path); 901 if (ret == 0) 902 continue; 903 if (ret < 0) 904 goto out; 905 906 break; 907 } 908 btrfs_item_key_to_cpu(l, &key, slot); 909 910 if (key.objectid < device->devid) 911 goto next; 912 913 if (key.objectid > device->devid) 914 break; 915 916 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 917 goto next; 918 919 if (key.offset > search_start) { 920 hole_size = key.offset - search_start; 921 922 if (hole_size > max_hole_size) { 923 max_hole_start = search_start; 924 max_hole_size = hole_size; 925 } 926 927 /* 928 * If this free space is greater than which we need, 929 * it must be the max free space that we have found 930 * until now, so max_hole_start must point to the start 931 * of this free space and the length of this free space 932 * is stored in max_hole_size. Thus, we return 933 * max_hole_start and max_hole_size and go back to the 934 * caller. 935 */ 936 if (hole_size >= num_bytes) { 937 ret = 0; 938 goto out; 939 } 940 } 941 942 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 943 extent_end = key.offset + btrfs_dev_extent_length(l, 944 dev_extent); 945 if (extent_end > search_start) 946 search_start = extent_end; 947 next: 948 path->slots[0]++; 949 cond_resched(); 950 } 951 952 hole_size = search_end- search_start; 953 if (hole_size > max_hole_size) { 954 max_hole_start = search_start; 955 max_hole_size = hole_size; 956 } 957 958 /* See above. */ 959 if (hole_size < num_bytes) 960 ret = -ENOSPC; 961 else 962 ret = 0; 963 964 out: 965 btrfs_free_path(path); 966 error: 967 *start = max_hole_start; 968 if (len) 969 *len = max_hole_size; 970 return ret; 971 } 972 973 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 974 struct btrfs_device *device, 975 u64 start) 976 { 977 int ret; 978 struct btrfs_path *path; 979 struct btrfs_root *root = device->dev_root; 980 struct btrfs_key key; 981 struct btrfs_key found_key; 982 struct extent_buffer *leaf = NULL; 983 struct btrfs_dev_extent *extent = NULL; 984 985 path = btrfs_alloc_path(); 986 if (!path) 987 return -ENOMEM; 988 989 key.objectid = device->devid; 990 key.offset = start; 991 key.type = BTRFS_DEV_EXTENT_KEY; 992 993 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 994 if (ret > 0) { 995 ret = btrfs_previous_item(root, path, key.objectid, 996 BTRFS_DEV_EXTENT_KEY); 997 BUG_ON(ret); 998 leaf = path->nodes[0]; 999 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1000 extent = btrfs_item_ptr(leaf, path->slots[0], 1001 struct btrfs_dev_extent); 1002 BUG_ON(found_key.offset > start || found_key.offset + 1003 btrfs_dev_extent_length(leaf, extent) < start); 1004 ret = 0; 1005 } else if (ret == 0) { 1006 leaf = path->nodes[0]; 1007 extent = btrfs_item_ptr(leaf, path->slots[0], 1008 struct btrfs_dev_extent); 1009 } 1010 BUG_ON(ret); 1011 1012 if (device->bytes_used > 0) 1013 device->bytes_used -= btrfs_dev_extent_length(leaf, extent); 1014 ret = btrfs_del_item(trans, root, path); 1015 BUG_ON(ret); 1016 1017 btrfs_free_path(path); 1018 return ret; 1019 } 1020 1021 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1022 struct btrfs_device *device, 1023 u64 chunk_tree, u64 chunk_objectid, 1024 u64 chunk_offset, u64 start, u64 num_bytes) 1025 { 1026 int ret; 1027 struct btrfs_path *path; 1028 struct btrfs_root *root = device->dev_root; 1029 struct btrfs_dev_extent *extent; 1030 struct extent_buffer *leaf; 1031 struct btrfs_key key; 1032 1033 WARN_ON(!device->in_fs_metadata); 1034 path = btrfs_alloc_path(); 1035 if (!path) 1036 return -ENOMEM; 1037 1038 key.objectid = device->devid; 1039 key.offset = start; 1040 key.type = BTRFS_DEV_EXTENT_KEY; 1041 ret = btrfs_insert_empty_item(trans, root, path, &key, 1042 sizeof(*extent)); 1043 BUG_ON(ret); 1044 1045 leaf = path->nodes[0]; 1046 extent = btrfs_item_ptr(leaf, path->slots[0], 1047 struct btrfs_dev_extent); 1048 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 1049 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 1050 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1051 1052 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 1053 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent), 1054 BTRFS_UUID_SIZE); 1055 1056 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1057 btrfs_mark_buffer_dirty(leaf); 1058 btrfs_free_path(path); 1059 return ret; 1060 } 1061 1062 static noinline int find_next_chunk(struct btrfs_root *root, 1063 u64 objectid, u64 *offset) 1064 { 1065 struct btrfs_path *path; 1066 int ret; 1067 struct btrfs_key key; 1068 struct btrfs_chunk *chunk; 1069 struct btrfs_key found_key; 1070 1071 path = btrfs_alloc_path(); 1072 BUG_ON(!path); 1073 1074 key.objectid = objectid; 1075 key.offset = (u64)-1; 1076 key.type = BTRFS_CHUNK_ITEM_KEY; 1077 1078 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1079 if (ret < 0) 1080 goto error; 1081 1082 BUG_ON(ret == 0); 1083 1084 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); 1085 if (ret) { 1086 *offset = 0; 1087 } else { 1088 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1089 path->slots[0]); 1090 if (found_key.objectid != objectid) 1091 *offset = 0; 1092 else { 1093 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0], 1094 struct btrfs_chunk); 1095 *offset = found_key.offset + 1096 btrfs_chunk_length(path->nodes[0], chunk); 1097 } 1098 } 1099 ret = 0; 1100 error: 1101 btrfs_free_path(path); 1102 return ret; 1103 } 1104 1105 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid) 1106 { 1107 int ret; 1108 struct btrfs_key key; 1109 struct btrfs_key found_key; 1110 struct btrfs_path *path; 1111 1112 root = root->fs_info->chunk_root; 1113 1114 path = btrfs_alloc_path(); 1115 if (!path) 1116 return -ENOMEM; 1117 1118 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1119 key.type = BTRFS_DEV_ITEM_KEY; 1120 key.offset = (u64)-1; 1121 1122 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1123 if (ret < 0) 1124 goto error; 1125 1126 BUG_ON(ret == 0); 1127 1128 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, 1129 BTRFS_DEV_ITEM_KEY); 1130 if (ret) { 1131 *objectid = 1; 1132 } else { 1133 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1134 path->slots[0]); 1135 *objectid = found_key.offset + 1; 1136 } 1137 ret = 0; 1138 error: 1139 btrfs_free_path(path); 1140 return ret; 1141 } 1142 1143 /* 1144 * the device information is stored in the chunk root 1145 * the btrfs_device struct should be fully filled in 1146 */ 1147 int btrfs_add_device(struct btrfs_trans_handle *trans, 1148 struct btrfs_root *root, 1149 struct btrfs_device *device) 1150 { 1151 int ret; 1152 struct btrfs_path *path; 1153 struct btrfs_dev_item *dev_item; 1154 struct extent_buffer *leaf; 1155 struct btrfs_key key; 1156 unsigned long ptr; 1157 1158 root = root->fs_info->chunk_root; 1159 1160 path = btrfs_alloc_path(); 1161 if (!path) 1162 return -ENOMEM; 1163 1164 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1165 key.type = BTRFS_DEV_ITEM_KEY; 1166 key.offset = device->devid; 1167 1168 ret = btrfs_insert_empty_item(trans, root, path, &key, 1169 sizeof(*dev_item)); 1170 if (ret) 1171 goto out; 1172 1173 leaf = path->nodes[0]; 1174 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1175 1176 btrfs_set_device_id(leaf, dev_item, device->devid); 1177 btrfs_set_device_generation(leaf, dev_item, 0); 1178 btrfs_set_device_type(leaf, dev_item, device->type); 1179 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1180 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1181 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1182 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 1183 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1184 btrfs_set_device_group(leaf, dev_item, 0); 1185 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1186 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1187 btrfs_set_device_start_offset(leaf, dev_item, 0); 1188 1189 ptr = (unsigned long)btrfs_device_uuid(dev_item); 1190 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1191 ptr = (unsigned long)btrfs_device_fsid(dev_item); 1192 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1193 btrfs_mark_buffer_dirty(leaf); 1194 1195 ret = 0; 1196 out: 1197 btrfs_free_path(path); 1198 return ret; 1199 } 1200 1201 static int btrfs_rm_dev_item(struct btrfs_root *root, 1202 struct btrfs_device *device) 1203 { 1204 int ret; 1205 struct btrfs_path *path; 1206 struct btrfs_key key; 1207 struct btrfs_trans_handle *trans; 1208 1209 root = root->fs_info->chunk_root; 1210 1211 path = btrfs_alloc_path(); 1212 if (!path) 1213 return -ENOMEM; 1214 1215 trans = btrfs_start_transaction(root, 0); 1216 if (IS_ERR(trans)) { 1217 btrfs_free_path(path); 1218 return PTR_ERR(trans); 1219 } 1220 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1221 key.type = BTRFS_DEV_ITEM_KEY; 1222 key.offset = device->devid; 1223 lock_chunks(root); 1224 1225 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1226 if (ret < 0) 1227 goto out; 1228 1229 if (ret > 0) { 1230 ret = -ENOENT; 1231 goto out; 1232 } 1233 1234 ret = btrfs_del_item(trans, root, path); 1235 if (ret) 1236 goto out; 1237 out: 1238 btrfs_free_path(path); 1239 unlock_chunks(root); 1240 btrfs_commit_transaction(trans, root); 1241 return ret; 1242 } 1243 1244 int btrfs_rm_device(struct btrfs_root *root, char *device_path) 1245 { 1246 struct btrfs_device *device; 1247 struct btrfs_device *next_device; 1248 struct block_device *bdev; 1249 struct buffer_head *bh = NULL; 1250 struct btrfs_super_block *disk_super; 1251 u64 all_avail; 1252 u64 devid; 1253 u64 num_devices; 1254 u8 *dev_uuid; 1255 int ret = 0; 1256 1257 mutex_lock(&uuid_mutex); 1258 mutex_lock(&root->fs_info->volume_mutex); 1259 1260 all_avail = root->fs_info->avail_data_alloc_bits | 1261 root->fs_info->avail_system_alloc_bits | 1262 root->fs_info->avail_metadata_alloc_bits; 1263 1264 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && 1265 root->fs_info->fs_devices->num_devices <= 4) { 1266 printk(KERN_ERR "btrfs: unable to go below four devices " 1267 "on raid10\n"); 1268 ret = -EINVAL; 1269 goto out; 1270 } 1271 1272 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && 1273 root->fs_info->fs_devices->num_devices <= 2) { 1274 printk(KERN_ERR "btrfs: unable to go below two " 1275 "devices on raid1\n"); 1276 ret = -EINVAL; 1277 goto out; 1278 } 1279 1280 if (strcmp(device_path, "missing") == 0) { 1281 struct list_head *devices; 1282 struct btrfs_device *tmp; 1283 1284 device = NULL; 1285 devices = &root->fs_info->fs_devices->devices; 1286 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1287 list_for_each_entry(tmp, devices, dev_list) { 1288 if (tmp->in_fs_metadata && !tmp->bdev) { 1289 device = tmp; 1290 break; 1291 } 1292 } 1293 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1294 bdev = NULL; 1295 bh = NULL; 1296 disk_super = NULL; 1297 if (!device) { 1298 printk(KERN_ERR "btrfs: no missing devices found to " 1299 "remove\n"); 1300 goto out; 1301 } 1302 } else { 1303 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, 1304 root->fs_info->bdev_holder); 1305 if (IS_ERR(bdev)) { 1306 ret = PTR_ERR(bdev); 1307 goto out; 1308 } 1309 1310 set_blocksize(bdev, 4096); 1311 bh = btrfs_read_dev_super(bdev); 1312 if (!bh) { 1313 ret = -EINVAL; 1314 goto error_close; 1315 } 1316 disk_super = (struct btrfs_super_block *)bh->b_data; 1317 devid = btrfs_stack_device_id(&disk_super->dev_item); 1318 dev_uuid = disk_super->dev_item.uuid; 1319 device = btrfs_find_device(root, devid, dev_uuid, 1320 disk_super->fsid); 1321 if (!device) { 1322 ret = -ENOENT; 1323 goto error_brelse; 1324 } 1325 } 1326 1327 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1328 printk(KERN_ERR "btrfs: unable to remove the only writeable " 1329 "device\n"); 1330 ret = -EINVAL; 1331 goto error_brelse; 1332 } 1333 1334 if (device->writeable) { 1335 list_del_init(&device->dev_alloc_list); 1336 root->fs_info->fs_devices->rw_devices--; 1337 } 1338 1339 ret = btrfs_shrink_device(device, 0); 1340 if (ret) 1341 goto error_brelse; 1342 1343 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1344 if (ret) 1345 goto error_brelse; 1346 1347 device->in_fs_metadata = 0; 1348 1349 /* 1350 * the device list mutex makes sure that we don't change 1351 * the device list while someone else is writing out all 1352 * the device supers. 1353 */ 1354 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1355 list_del_init(&device->dev_list); 1356 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1357 1358 device->fs_devices->num_devices--; 1359 1360 if (device->missing) 1361 root->fs_info->fs_devices->missing_devices--; 1362 1363 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1364 struct btrfs_device, dev_list); 1365 if (device->bdev == root->fs_info->sb->s_bdev) 1366 root->fs_info->sb->s_bdev = next_device->bdev; 1367 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1368 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1369 1370 if (device->bdev) { 1371 blkdev_put(device->bdev, device->mode); 1372 device->bdev = NULL; 1373 device->fs_devices->open_devices--; 1374 } 1375 1376 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; 1377 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); 1378 1379 if (device->fs_devices->open_devices == 0) { 1380 struct btrfs_fs_devices *fs_devices; 1381 fs_devices = root->fs_info->fs_devices; 1382 while (fs_devices) { 1383 if (fs_devices->seed == device->fs_devices) 1384 break; 1385 fs_devices = fs_devices->seed; 1386 } 1387 fs_devices->seed = device->fs_devices->seed; 1388 device->fs_devices->seed = NULL; 1389 __btrfs_close_devices(device->fs_devices); 1390 free_fs_devices(device->fs_devices); 1391 } 1392 1393 /* 1394 * at this point, the device is zero sized. We want to 1395 * remove it from the devices list and zero out the old super 1396 */ 1397 if (device->writeable) { 1398 /* make sure this device isn't detected as part of 1399 * the FS anymore 1400 */ 1401 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 1402 set_buffer_dirty(bh); 1403 sync_dirty_buffer(bh); 1404 } 1405 1406 kfree(device->name); 1407 kfree(device); 1408 ret = 0; 1409 1410 error_brelse: 1411 brelse(bh); 1412 error_close: 1413 if (bdev) 1414 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 1415 out: 1416 mutex_unlock(&root->fs_info->volume_mutex); 1417 mutex_unlock(&uuid_mutex); 1418 return ret; 1419 } 1420 1421 /* 1422 * does all the dirty work required for changing file system's UUID. 1423 */ 1424 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, 1425 struct btrfs_root *root) 1426 { 1427 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 1428 struct btrfs_fs_devices *old_devices; 1429 struct btrfs_fs_devices *seed_devices; 1430 struct btrfs_super_block *disk_super = &root->fs_info->super_copy; 1431 struct btrfs_device *device; 1432 u64 super_flags; 1433 1434 BUG_ON(!mutex_is_locked(&uuid_mutex)); 1435 if (!fs_devices->seeding) 1436 return -EINVAL; 1437 1438 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 1439 if (!seed_devices) 1440 return -ENOMEM; 1441 1442 old_devices = clone_fs_devices(fs_devices); 1443 if (IS_ERR(old_devices)) { 1444 kfree(seed_devices); 1445 return PTR_ERR(old_devices); 1446 } 1447 1448 list_add(&old_devices->list, &fs_uuids); 1449 1450 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 1451 seed_devices->opened = 1; 1452 INIT_LIST_HEAD(&seed_devices->devices); 1453 INIT_LIST_HEAD(&seed_devices->alloc_list); 1454 mutex_init(&seed_devices->device_list_mutex); 1455 list_splice_init(&fs_devices->devices, &seed_devices->devices); 1456 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 1457 list_for_each_entry(device, &seed_devices->devices, dev_list) { 1458 device->fs_devices = seed_devices; 1459 } 1460 1461 fs_devices->seeding = 0; 1462 fs_devices->num_devices = 0; 1463 fs_devices->open_devices = 0; 1464 fs_devices->seed = seed_devices; 1465 1466 generate_random_uuid(fs_devices->fsid); 1467 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1468 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1469 super_flags = btrfs_super_flags(disk_super) & 1470 ~BTRFS_SUPER_FLAG_SEEDING; 1471 btrfs_set_super_flags(disk_super, super_flags); 1472 1473 return 0; 1474 } 1475 1476 /* 1477 * strore the expected generation for seed devices in device items. 1478 */ 1479 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 1480 struct btrfs_root *root) 1481 { 1482 struct btrfs_path *path; 1483 struct extent_buffer *leaf; 1484 struct btrfs_dev_item *dev_item; 1485 struct btrfs_device *device; 1486 struct btrfs_key key; 1487 u8 fs_uuid[BTRFS_UUID_SIZE]; 1488 u8 dev_uuid[BTRFS_UUID_SIZE]; 1489 u64 devid; 1490 int ret; 1491 1492 path = btrfs_alloc_path(); 1493 if (!path) 1494 return -ENOMEM; 1495 1496 root = root->fs_info->chunk_root; 1497 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1498 key.offset = 0; 1499 key.type = BTRFS_DEV_ITEM_KEY; 1500 1501 while (1) { 1502 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1503 if (ret < 0) 1504 goto error; 1505 1506 leaf = path->nodes[0]; 1507 next_slot: 1508 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1509 ret = btrfs_next_leaf(root, path); 1510 if (ret > 0) 1511 break; 1512 if (ret < 0) 1513 goto error; 1514 leaf = path->nodes[0]; 1515 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1516 btrfs_release_path(root, path); 1517 continue; 1518 } 1519 1520 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1521 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 1522 key.type != BTRFS_DEV_ITEM_KEY) 1523 break; 1524 1525 dev_item = btrfs_item_ptr(leaf, path->slots[0], 1526 struct btrfs_dev_item); 1527 devid = btrfs_device_id(leaf, dev_item); 1528 read_extent_buffer(leaf, dev_uuid, 1529 (unsigned long)btrfs_device_uuid(dev_item), 1530 BTRFS_UUID_SIZE); 1531 read_extent_buffer(leaf, fs_uuid, 1532 (unsigned long)btrfs_device_fsid(dev_item), 1533 BTRFS_UUID_SIZE); 1534 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 1535 BUG_ON(!device); 1536 1537 if (device->fs_devices->seeding) { 1538 btrfs_set_device_generation(leaf, dev_item, 1539 device->generation); 1540 btrfs_mark_buffer_dirty(leaf); 1541 } 1542 1543 path->slots[0]++; 1544 goto next_slot; 1545 } 1546 ret = 0; 1547 error: 1548 btrfs_free_path(path); 1549 return ret; 1550 } 1551 1552 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 1553 { 1554 struct btrfs_trans_handle *trans; 1555 struct btrfs_device *device; 1556 struct block_device *bdev; 1557 struct list_head *devices; 1558 struct super_block *sb = root->fs_info->sb; 1559 u64 total_bytes; 1560 int seeding_dev = 0; 1561 int ret = 0; 1562 1563 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1564 return -EINVAL; 1565 1566 bdev = blkdev_get_by_path(device_path, FMODE_EXCL, 1567 root->fs_info->bdev_holder); 1568 if (IS_ERR(bdev)) 1569 return PTR_ERR(bdev); 1570 1571 if (root->fs_info->fs_devices->seeding) { 1572 seeding_dev = 1; 1573 down_write(&sb->s_umount); 1574 mutex_lock(&uuid_mutex); 1575 } 1576 1577 filemap_write_and_wait(bdev->bd_inode->i_mapping); 1578 mutex_lock(&root->fs_info->volume_mutex); 1579 1580 devices = &root->fs_info->fs_devices->devices; 1581 /* 1582 * we have the volume lock, so we don't need the extra 1583 * device list mutex while reading the list here. 1584 */ 1585 list_for_each_entry(device, devices, dev_list) { 1586 if (device->bdev == bdev) { 1587 ret = -EEXIST; 1588 goto error; 1589 } 1590 } 1591 1592 device = kzalloc(sizeof(*device), GFP_NOFS); 1593 if (!device) { 1594 /* we can safely leave the fs_devices entry around */ 1595 ret = -ENOMEM; 1596 goto error; 1597 } 1598 1599 device->name = kstrdup(device_path, GFP_NOFS); 1600 if (!device->name) { 1601 kfree(device); 1602 ret = -ENOMEM; 1603 goto error; 1604 } 1605 1606 ret = find_next_devid(root, &device->devid); 1607 if (ret) { 1608 kfree(device->name); 1609 kfree(device); 1610 goto error; 1611 } 1612 1613 trans = btrfs_start_transaction(root, 0); 1614 if (IS_ERR(trans)) { 1615 kfree(device->name); 1616 kfree(device); 1617 ret = PTR_ERR(trans); 1618 goto error; 1619 } 1620 1621 lock_chunks(root); 1622 1623 device->writeable = 1; 1624 device->work.func = pending_bios_fn; 1625 generate_random_uuid(device->uuid); 1626 spin_lock_init(&device->io_lock); 1627 device->generation = trans->transid; 1628 device->io_width = root->sectorsize; 1629 device->io_align = root->sectorsize; 1630 device->sector_size = root->sectorsize; 1631 device->total_bytes = i_size_read(bdev->bd_inode); 1632 device->disk_total_bytes = device->total_bytes; 1633 device->dev_root = root->fs_info->dev_root; 1634 device->bdev = bdev; 1635 device->in_fs_metadata = 1; 1636 device->mode = 0; 1637 set_blocksize(device->bdev, 4096); 1638 1639 if (seeding_dev) { 1640 sb->s_flags &= ~MS_RDONLY; 1641 ret = btrfs_prepare_sprout(trans, root); 1642 BUG_ON(ret); 1643 } 1644 1645 device->fs_devices = root->fs_info->fs_devices; 1646 1647 /* 1648 * we don't want write_supers to jump in here with our device 1649 * half setup 1650 */ 1651 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1652 list_add(&device->dev_list, &root->fs_info->fs_devices->devices); 1653 list_add(&device->dev_alloc_list, 1654 &root->fs_info->fs_devices->alloc_list); 1655 root->fs_info->fs_devices->num_devices++; 1656 root->fs_info->fs_devices->open_devices++; 1657 root->fs_info->fs_devices->rw_devices++; 1658 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 1659 1660 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 1661 root->fs_info->fs_devices->rotating = 1; 1662 1663 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); 1664 btrfs_set_super_total_bytes(&root->fs_info->super_copy, 1665 total_bytes + device->total_bytes); 1666 1667 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy); 1668 btrfs_set_super_num_devices(&root->fs_info->super_copy, 1669 total_bytes + 1); 1670 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1671 1672 if (seeding_dev) { 1673 ret = init_first_rw_device(trans, root, device); 1674 BUG_ON(ret); 1675 ret = btrfs_finish_sprout(trans, root); 1676 BUG_ON(ret); 1677 } else { 1678 ret = btrfs_add_device(trans, root, device); 1679 } 1680 1681 /* 1682 * we've got more storage, clear any full flags on the space 1683 * infos 1684 */ 1685 btrfs_clear_space_info_full(root->fs_info); 1686 1687 unlock_chunks(root); 1688 btrfs_commit_transaction(trans, root); 1689 1690 if (seeding_dev) { 1691 mutex_unlock(&uuid_mutex); 1692 up_write(&sb->s_umount); 1693 1694 ret = btrfs_relocate_sys_chunks(root); 1695 BUG_ON(ret); 1696 } 1697 out: 1698 mutex_unlock(&root->fs_info->volume_mutex); 1699 return ret; 1700 error: 1701 blkdev_put(bdev, FMODE_EXCL); 1702 if (seeding_dev) { 1703 mutex_unlock(&uuid_mutex); 1704 up_write(&sb->s_umount); 1705 } 1706 goto out; 1707 } 1708 1709 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 1710 struct btrfs_device *device) 1711 { 1712 int ret; 1713 struct btrfs_path *path; 1714 struct btrfs_root *root; 1715 struct btrfs_dev_item *dev_item; 1716 struct extent_buffer *leaf; 1717 struct btrfs_key key; 1718 1719 root = device->dev_root->fs_info->chunk_root; 1720 1721 path = btrfs_alloc_path(); 1722 if (!path) 1723 return -ENOMEM; 1724 1725 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1726 key.type = BTRFS_DEV_ITEM_KEY; 1727 key.offset = device->devid; 1728 1729 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1730 if (ret < 0) 1731 goto out; 1732 1733 if (ret > 0) { 1734 ret = -ENOENT; 1735 goto out; 1736 } 1737 1738 leaf = path->nodes[0]; 1739 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1740 1741 btrfs_set_device_id(leaf, dev_item, device->devid); 1742 btrfs_set_device_type(leaf, dev_item, device->type); 1743 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1744 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1745 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1746 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); 1747 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1748 btrfs_mark_buffer_dirty(leaf); 1749 1750 out: 1751 btrfs_free_path(path); 1752 return ret; 1753 } 1754 1755 static int __btrfs_grow_device(struct btrfs_trans_handle *trans, 1756 struct btrfs_device *device, u64 new_size) 1757 { 1758 struct btrfs_super_block *super_copy = 1759 &device->dev_root->fs_info->super_copy; 1760 u64 old_total = btrfs_super_total_bytes(super_copy); 1761 u64 diff = new_size - device->total_bytes; 1762 1763 if (!device->writeable) 1764 return -EACCES; 1765 if (new_size <= device->total_bytes) 1766 return -EINVAL; 1767 1768 btrfs_set_super_total_bytes(super_copy, old_total + diff); 1769 device->fs_devices->total_rw_bytes += diff; 1770 1771 device->total_bytes = new_size; 1772 device->disk_total_bytes = new_size; 1773 btrfs_clear_space_info_full(device->dev_root->fs_info); 1774 1775 return btrfs_update_device(trans, device); 1776 } 1777 1778 int btrfs_grow_device(struct btrfs_trans_handle *trans, 1779 struct btrfs_device *device, u64 new_size) 1780 { 1781 int ret; 1782 lock_chunks(device->dev_root); 1783 ret = __btrfs_grow_device(trans, device, new_size); 1784 unlock_chunks(device->dev_root); 1785 return ret; 1786 } 1787 1788 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 1789 struct btrfs_root *root, 1790 u64 chunk_tree, u64 chunk_objectid, 1791 u64 chunk_offset) 1792 { 1793 int ret; 1794 struct btrfs_path *path; 1795 struct btrfs_key key; 1796 1797 root = root->fs_info->chunk_root; 1798 path = btrfs_alloc_path(); 1799 if (!path) 1800 return -ENOMEM; 1801 1802 key.objectid = chunk_objectid; 1803 key.offset = chunk_offset; 1804 key.type = BTRFS_CHUNK_ITEM_KEY; 1805 1806 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1807 BUG_ON(ret); 1808 1809 ret = btrfs_del_item(trans, root, path); 1810 BUG_ON(ret); 1811 1812 btrfs_free_path(path); 1813 return 0; 1814 } 1815 1816 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 1817 chunk_offset) 1818 { 1819 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 1820 struct btrfs_disk_key *disk_key; 1821 struct btrfs_chunk *chunk; 1822 u8 *ptr; 1823 int ret = 0; 1824 u32 num_stripes; 1825 u32 array_size; 1826 u32 len = 0; 1827 u32 cur; 1828 struct btrfs_key key; 1829 1830 array_size = btrfs_super_sys_array_size(super_copy); 1831 1832 ptr = super_copy->sys_chunk_array; 1833 cur = 0; 1834 1835 while (cur < array_size) { 1836 disk_key = (struct btrfs_disk_key *)ptr; 1837 btrfs_disk_key_to_cpu(&key, disk_key); 1838 1839 len = sizeof(*disk_key); 1840 1841 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 1842 chunk = (struct btrfs_chunk *)(ptr + len); 1843 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 1844 len += btrfs_chunk_item_size(num_stripes); 1845 } else { 1846 ret = -EIO; 1847 break; 1848 } 1849 if (key.objectid == chunk_objectid && 1850 key.offset == chunk_offset) { 1851 memmove(ptr, ptr + len, array_size - (cur + len)); 1852 array_size -= len; 1853 btrfs_set_super_sys_array_size(super_copy, array_size); 1854 } else { 1855 ptr += len; 1856 cur += len; 1857 } 1858 } 1859 return ret; 1860 } 1861 1862 static int btrfs_relocate_chunk(struct btrfs_root *root, 1863 u64 chunk_tree, u64 chunk_objectid, 1864 u64 chunk_offset) 1865 { 1866 struct extent_map_tree *em_tree; 1867 struct btrfs_root *extent_root; 1868 struct btrfs_trans_handle *trans; 1869 struct extent_map *em; 1870 struct map_lookup *map; 1871 int ret; 1872 int i; 1873 1874 root = root->fs_info->chunk_root; 1875 extent_root = root->fs_info->extent_root; 1876 em_tree = &root->fs_info->mapping_tree.map_tree; 1877 1878 ret = btrfs_can_relocate(extent_root, chunk_offset); 1879 if (ret) 1880 return -ENOSPC; 1881 1882 /* step one, relocate all the extents inside this chunk */ 1883 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 1884 if (ret) 1885 return ret; 1886 1887 trans = btrfs_start_transaction(root, 0); 1888 BUG_ON(IS_ERR(trans)); 1889 1890 lock_chunks(root); 1891 1892 /* 1893 * step two, delete the device extents and the 1894 * chunk tree entries 1895 */ 1896 read_lock(&em_tree->lock); 1897 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1898 read_unlock(&em_tree->lock); 1899 1900 BUG_ON(em->start > chunk_offset || 1901 em->start + em->len < chunk_offset); 1902 map = (struct map_lookup *)em->bdev; 1903 1904 for (i = 0; i < map->num_stripes; i++) { 1905 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, 1906 map->stripes[i].physical); 1907 BUG_ON(ret); 1908 1909 if (map->stripes[i].dev) { 1910 ret = btrfs_update_device(trans, map->stripes[i].dev); 1911 BUG_ON(ret); 1912 } 1913 } 1914 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, 1915 chunk_offset); 1916 1917 BUG_ON(ret); 1918 1919 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 1920 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 1921 BUG_ON(ret); 1922 } 1923 1924 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 1925 BUG_ON(ret); 1926 1927 write_lock(&em_tree->lock); 1928 remove_extent_mapping(em_tree, em); 1929 write_unlock(&em_tree->lock); 1930 1931 kfree(map); 1932 em->bdev = NULL; 1933 1934 /* once for the tree */ 1935 free_extent_map(em); 1936 /* once for us */ 1937 free_extent_map(em); 1938 1939 unlock_chunks(root); 1940 btrfs_end_transaction(trans, root); 1941 return 0; 1942 } 1943 1944 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 1945 { 1946 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 1947 struct btrfs_path *path; 1948 struct extent_buffer *leaf; 1949 struct btrfs_chunk *chunk; 1950 struct btrfs_key key; 1951 struct btrfs_key found_key; 1952 u64 chunk_tree = chunk_root->root_key.objectid; 1953 u64 chunk_type; 1954 bool retried = false; 1955 int failed = 0; 1956 int ret; 1957 1958 path = btrfs_alloc_path(); 1959 if (!path) 1960 return -ENOMEM; 1961 1962 again: 1963 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 1964 key.offset = (u64)-1; 1965 key.type = BTRFS_CHUNK_ITEM_KEY; 1966 1967 while (1) { 1968 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 1969 if (ret < 0) 1970 goto error; 1971 BUG_ON(ret == 0); 1972 1973 ret = btrfs_previous_item(chunk_root, path, key.objectid, 1974 key.type); 1975 if (ret < 0) 1976 goto error; 1977 if (ret > 0) 1978 break; 1979 1980 leaf = path->nodes[0]; 1981 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1982 1983 chunk = btrfs_item_ptr(leaf, path->slots[0], 1984 struct btrfs_chunk); 1985 chunk_type = btrfs_chunk_type(leaf, chunk); 1986 btrfs_release_path(chunk_root, path); 1987 1988 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 1989 ret = btrfs_relocate_chunk(chunk_root, chunk_tree, 1990 found_key.objectid, 1991 found_key.offset); 1992 if (ret == -ENOSPC) 1993 failed++; 1994 else if (ret) 1995 BUG(); 1996 } 1997 1998 if (found_key.offset == 0) 1999 break; 2000 key.offset = found_key.offset - 1; 2001 } 2002 ret = 0; 2003 if (failed && !retried) { 2004 failed = 0; 2005 retried = true; 2006 goto again; 2007 } else if (failed && retried) { 2008 WARN_ON(1); 2009 ret = -ENOSPC; 2010 } 2011 error: 2012 btrfs_free_path(path); 2013 return ret; 2014 } 2015 2016 static u64 div_factor(u64 num, int factor) 2017 { 2018 if (factor == 10) 2019 return num; 2020 num *= factor; 2021 do_div(num, 10); 2022 return num; 2023 } 2024 2025 int btrfs_balance(struct btrfs_root *dev_root) 2026 { 2027 int ret; 2028 struct list_head *devices = &dev_root->fs_info->fs_devices->devices; 2029 struct btrfs_device *device; 2030 u64 old_size; 2031 u64 size_to_free; 2032 struct btrfs_path *path; 2033 struct btrfs_key key; 2034 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; 2035 struct btrfs_trans_handle *trans; 2036 struct btrfs_key found_key; 2037 2038 if (dev_root->fs_info->sb->s_flags & MS_RDONLY) 2039 return -EROFS; 2040 2041 if (!capable(CAP_SYS_ADMIN)) 2042 return -EPERM; 2043 2044 mutex_lock(&dev_root->fs_info->volume_mutex); 2045 dev_root = dev_root->fs_info->dev_root; 2046 2047 /* step one make some room on all the devices */ 2048 list_for_each_entry(device, devices, dev_list) { 2049 old_size = device->total_bytes; 2050 size_to_free = div_factor(old_size, 1); 2051 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 2052 if (!device->writeable || 2053 device->total_bytes - device->bytes_used > size_to_free) 2054 continue; 2055 2056 ret = btrfs_shrink_device(device, old_size - size_to_free); 2057 if (ret == -ENOSPC) 2058 break; 2059 BUG_ON(ret); 2060 2061 trans = btrfs_start_transaction(dev_root, 0); 2062 BUG_ON(IS_ERR(trans)); 2063 2064 ret = btrfs_grow_device(trans, device, old_size); 2065 BUG_ON(ret); 2066 2067 btrfs_end_transaction(trans, dev_root); 2068 } 2069 2070 /* step two, relocate all the chunks */ 2071 path = btrfs_alloc_path(); 2072 BUG_ON(!path); 2073 2074 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2075 key.offset = (u64)-1; 2076 key.type = BTRFS_CHUNK_ITEM_KEY; 2077 2078 while (1) { 2079 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2080 if (ret < 0) 2081 goto error; 2082 2083 /* 2084 * this shouldn't happen, it means the last relocate 2085 * failed 2086 */ 2087 if (ret == 0) 2088 break; 2089 2090 ret = btrfs_previous_item(chunk_root, path, 0, 2091 BTRFS_CHUNK_ITEM_KEY); 2092 if (ret) 2093 break; 2094 2095 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2096 path->slots[0]); 2097 if (found_key.objectid != key.objectid) 2098 break; 2099 2100 /* chunk zero is special */ 2101 if (found_key.offset == 0) 2102 break; 2103 2104 btrfs_release_path(chunk_root, path); 2105 ret = btrfs_relocate_chunk(chunk_root, 2106 chunk_root->root_key.objectid, 2107 found_key.objectid, 2108 found_key.offset); 2109 BUG_ON(ret && ret != -ENOSPC); 2110 key.offset = found_key.offset - 1; 2111 } 2112 ret = 0; 2113 error: 2114 btrfs_free_path(path); 2115 mutex_unlock(&dev_root->fs_info->volume_mutex); 2116 return ret; 2117 } 2118 2119 /* 2120 * shrinking a device means finding all of the device extents past 2121 * the new size, and then following the back refs to the chunks. 2122 * The chunk relocation code actually frees the device extent 2123 */ 2124 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 2125 { 2126 struct btrfs_trans_handle *trans; 2127 struct btrfs_root *root = device->dev_root; 2128 struct btrfs_dev_extent *dev_extent = NULL; 2129 struct btrfs_path *path; 2130 u64 length; 2131 u64 chunk_tree; 2132 u64 chunk_objectid; 2133 u64 chunk_offset; 2134 int ret; 2135 int slot; 2136 int failed = 0; 2137 bool retried = false; 2138 struct extent_buffer *l; 2139 struct btrfs_key key; 2140 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2141 u64 old_total = btrfs_super_total_bytes(super_copy); 2142 u64 old_size = device->total_bytes; 2143 u64 diff = device->total_bytes - new_size; 2144 2145 if (new_size >= device->total_bytes) 2146 return -EINVAL; 2147 2148 path = btrfs_alloc_path(); 2149 if (!path) 2150 return -ENOMEM; 2151 2152 path->reada = 2; 2153 2154 lock_chunks(root); 2155 2156 device->total_bytes = new_size; 2157 if (device->writeable) 2158 device->fs_devices->total_rw_bytes -= diff; 2159 unlock_chunks(root); 2160 2161 again: 2162 key.objectid = device->devid; 2163 key.offset = (u64)-1; 2164 key.type = BTRFS_DEV_EXTENT_KEY; 2165 2166 while (1) { 2167 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2168 if (ret < 0) 2169 goto done; 2170 2171 ret = btrfs_previous_item(root, path, 0, key.type); 2172 if (ret < 0) 2173 goto done; 2174 if (ret) { 2175 ret = 0; 2176 btrfs_release_path(root, path); 2177 break; 2178 } 2179 2180 l = path->nodes[0]; 2181 slot = path->slots[0]; 2182 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2183 2184 if (key.objectid != device->devid) { 2185 btrfs_release_path(root, path); 2186 break; 2187 } 2188 2189 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2190 length = btrfs_dev_extent_length(l, dev_extent); 2191 2192 if (key.offset + length <= new_size) { 2193 btrfs_release_path(root, path); 2194 break; 2195 } 2196 2197 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2198 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2199 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2200 btrfs_release_path(root, path); 2201 2202 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, 2203 chunk_offset); 2204 if (ret && ret != -ENOSPC) 2205 goto done; 2206 if (ret == -ENOSPC) 2207 failed++; 2208 key.offset -= 1; 2209 } 2210 2211 if (failed && !retried) { 2212 failed = 0; 2213 retried = true; 2214 goto again; 2215 } else if (failed && retried) { 2216 ret = -ENOSPC; 2217 lock_chunks(root); 2218 2219 device->total_bytes = old_size; 2220 if (device->writeable) 2221 device->fs_devices->total_rw_bytes += diff; 2222 unlock_chunks(root); 2223 goto done; 2224 } 2225 2226 /* Shrinking succeeded, else we would be at "done". */ 2227 trans = btrfs_start_transaction(root, 0); 2228 if (IS_ERR(trans)) { 2229 ret = PTR_ERR(trans); 2230 goto done; 2231 } 2232 2233 lock_chunks(root); 2234 2235 device->disk_total_bytes = new_size; 2236 /* Now btrfs_update_device() will change the on-disk size. */ 2237 ret = btrfs_update_device(trans, device); 2238 if (ret) { 2239 unlock_chunks(root); 2240 btrfs_end_transaction(trans, root); 2241 goto done; 2242 } 2243 WARN_ON(diff > old_total); 2244 btrfs_set_super_total_bytes(super_copy, old_total - diff); 2245 unlock_chunks(root); 2246 btrfs_end_transaction(trans, root); 2247 done: 2248 btrfs_free_path(path); 2249 return ret; 2250 } 2251 2252 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, 2253 struct btrfs_root *root, 2254 struct btrfs_key *key, 2255 struct btrfs_chunk *chunk, int item_size) 2256 { 2257 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2258 struct btrfs_disk_key disk_key; 2259 u32 array_size; 2260 u8 *ptr; 2261 2262 array_size = btrfs_super_sys_array_size(super_copy); 2263 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 2264 return -EFBIG; 2265 2266 ptr = super_copy->sys_chunk_array + array_size; 2267 btrfs_cpu_key_to_disk(&disk_key, key); 2268 memcpy(ptr, &disk_key, sizeof(disk_key)); 2269 ptr += sizeof(disk_key); 2270 memcpy(ptr, chunk, item_size); 2271 item_size += sizeof(disk_key); 2272 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 2273 return 0; 2274 } 2275 2276 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, 2277 int num_stripes, int sub_stripes) 2278 { 2279 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 2280 return calc_size; 2281 else if (type & BTRFS_BLOCK_GROUP_RAID10) 2282 return calc_size * (num_stripes / sub_stripes); 2283 else 2284 return calc_size * num_stripes; 2285 } 2286 2287 /* Used to sort the devices by max_avail(descending sort) */ 2288 int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) 2289 { 2290 if (((struct btrfs_device_info *)dev_info1)->max_avail > 2291 ((struct btrfs_device_info *)dev_info2)->max_avail) 2292 return -1; 2293 else if (((struct btrfs_device_info *)dev_info1)->max_avail < 2294 ((struct btrfs_device_info *)dev_info2)->max_avail) 2295 return 1; 2296 else 2297 return 0; 2298 } 2299 2300 static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, 2301 int *num_stripes, int *min_stripes, 2302 int *sub_stripes) 2303 { 2304 *num_stripes = 1; 2305 *min_stripes = 1; 2306 *sub_stripes = 0; 2307 2308 if (type & (BTRFS_BLOCK_GROUP_RAID0)) { 2309 *num_stripes = fs_devices->rw_devices; 2310 *min_stripes = 2; 2311 } 2312 if (type & (BTRFS_BLOCK_GROUP_DUP)) { 2313 *num_stripes = 2; 2314 *min_stripes = 2; 2315 } 2316 if (type & (BTRFS_BLOCK_GROUP_RAID1)) { 2317 if (fs_devices->rw_devices < 2) 2318 return -ENOSPC; 2319 *num_stripes = 2; 2320 *min_stripes = 2; 2321 } 2322 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2323 *num_stripes = fs_devices->rw_devices; 2324 if (*num_stripes < 4) 2325 return -ENOSPC; 2326 *num_stripes &= ~(u32)1; 2327 *sub_stripes = 2; 2328 *min_stripes = 4; 2329 } 2330 2331 return 0; 2332 } 2333 2334 static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, 2335 u64 proposed_size, u64 type, 2336 int num_stripes, int small_stripe) 2337 { 2338 int min_stripe_size = 1 * 1024 * 1024; 2339 u64 calc_size = proposed_size; 2340 u64 max_chunk_size = calc_size; 2341 int ncopies = 1; 2342 2343 if (type & (BTRFS_BLOCK_GROUP_RAID1 | 2344 BTRFS_BLOCK_GROUP_DUP | 2345 BTRFS_BLOCK_GROUP_RAID10)) 2346 ncopies = 2; 2347 2348 if (type & BTRFS_BLOCK_GROUP_DATA) { 2349 max_chunk_size = 10 * calc_size; 2350 min_stripe_size = 64 * 1024 * 1024; 2351 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 2352 max_chunk_size = 256 * 1024 * 1024; 2353 min_stripe_size = 32 * 1024 * 1024; 2354 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 2355 calc_size = 8 * 1024 * 1024; 2356 max_chunk_size = calc_size * 2; 2357 min_stripe_size = 1 * 1024 * 1024; 2358 } 2359 2360 /* we don't want a chunk larger than 10% of writeable space */ 2361 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 2362 max_chunk_size); 2363 2364 if (calc_size * num_stripes > max_chunk_size * ncopies) { 2365 calc_size = max_chunk_size * ncopies; 2366 do_div(calc_size, num_stripes); 2367 do_div(calc_size, BTRFS_STRIPE_LEN); 2368 calc_size *= BTRFS_STRIPE_LEN; 2369 } 2370 2371 /* we don't want tiny stripes */ 2372 if (!small_stripe) 2373 calc_size = max_t(u64, min_stripe_size, calc_size); 2374 2375 /* 2376 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure 2377 * we end up with something bigger than a stripe 2378 */ 2379 calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); 2380 2381 do_div(calc_size, BTRFS_STRIPE_LEN); 2382 calc_size *= BTRFS_STRIPE_LEN; 2383 2384 return calc_size; 2385 } 2386 2387 static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, 2388 int num_stripes) 2389 { 2390 struct map_lookup *new; 2391 size_t len = map_lookup_size(num_stripes); 2392 2393 BUG_ON(map->num_stripes < num_stripes); 2394 2395 if (map->num_stripes == num_stripes) 2396 return map; 2397 2398 new = kmalloc(len, GFP_NOFS); 2399 if (!new) { 2400 /* just change map->num_stripes */ 2401 map->num_stripes = num_stripes; 2402 return map; 2403 } 2404 2405 memcpy(new, map, len); 2406 new->num_stripes = num_stripes; 2407 kfree(map); 2408 return new; 2409 } 2410 2411 /* 2412 * helper to allocate device space from btrfs_device_info, in which we stored 2413 * max free space information of every device. It is used when we can not 2414 * allocate chunks by default size. 2415 * 2416 * By this helper, we can allocate a new chunk as larger as possible. 2417 */ 2418 static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, 2419 struct btrfs_fs_devices *fs_devices, 2420 struct btrfs_device_info *devices, 2421 int nr_device, u64 type, 2422 struct map_lookup **map_lookup, 2423 int min_stripes, u64 *stripe_size) 2424 { 2425 int i, index, sort_again = 0; 2426 int min_devices = min_stripes; 2427 u64 max_avail, min_free; 2428 struct map_lookup *map = *map_lookup; 2429 int ret; 2430 2431 if (nr_device < min_stripes) 2432 return -ENOSPC; 2433 2434 btrfs_descending_sort_devices(devices, nr_device); 2435 2436 max_avail = devices[0].max_avail; 2437 if (!max_avail) 2438 return -ENOSPC; 2439 2440 for (i = 0; i < nr_device; i++) { 2441 /* 2442 * if dev_offset = 0, it means the free space of this device 2443 * is less than what we need, and we didn't search max avail 2444 * extent on this device, so do it now. 2445 */ 2446 if (!devices[i].dev_offset) { 2447 ret = find_free_dev_extent(trans, devices[i].dev, 2448 max_avail, 2449 &devices[i].dev_offset, 2450 &devices[i].max_avail); 2451 if (ret != 0 && ret != -ENOSPC) 2452 return ret; 2453 sort_again = 1; 2454 } 2455 } 2456 2457 /* we update the max avail free extent of each devices, sort again */ 2458 if (sort_again) 2459 btrfs_descending_sort_devices(devices, nr_device); 2460 2461 if (type & BTRFS_BLOCK_GROUP_DUP) 2462 min_devices = 1; 2463 2464 if (!devices[min_devices - 1].max_avail) 2465 return -ENOSPC; 2466 2467 max_avail = devices[min_devices - 1].max_avail; 2468 if (type & BTRFS_BLOCK_GROUP_DUP) 2469 do_div(max_avail, 2); 2470 2471 max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, 2472 min_stripes, 1); 2473 if (type & BTRFS_BLOCK_GROUP_DUP) 2474 min_free = max_avail * 2; 2475 else 2476 min_free = max_avail; 2477 2478 if (min_free > devices[min_devices - 1].max_avail) 2479 return -ENOSPC; 2480 2481 map = __shrink_map_lookup_stripes(map, min_stripes); 2482 *stripe_size = max_avail; 2483 2484 index = 0; 2485 for (i = 0; i < min_stripes; i++) { 2486 map->stripes[i].dev = devices[index].dev; 2487 map->stripes[i].physical = devices[index].dev_offset; 2488 if (type & BTRFS_BLOCK_GROUP_DUP) { 2489 i++; 2490 map->stripes[i].dev = devices[index].dev; 2491 map->stripes[i].physical = devices[index].dev_offset + 2492 max_avail; 2493 } 2494 index++; 2495 } 2496 *map_lookup = map; 2497 2498 return 0; 2499 } 2500 2501 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2502 struct btrfs_root *extent_root, 2503 struct map_lookup **map_ret, 2504 u64 *num_bytes, u64 *stripe_size, 2505 u64 start, u64 type) 2506 { 2507 struct btrfs_fs_info *info = extent_root->fs_info; 2508 struct btrfs_device *device = NULL; 2509 struct btrfs_fs_devices *fs_devices = info->fs_devices; 2510 struct list_head *cur; 2511 struct map_lookup *map; 2512 struct extent_map_tree *em_tree; 2513 struct extent_map *em; 2514 struct btrfs_device_info *devices_info; 2515 struct list_head private_devs; 2516 u64 calc_size = 1024 * 1024 * 1024; 2517 u64 min_free; 2518 u64 avail; 2519 u64 dev_offset; 2520 int num_stripes; 2521 int min_stripes; 2522 int sub_stripes; 2523 int min_devices; /* the min number of devices we need */ 2524 int i; 2525 int ret; 2526 int index; 2527 2528 if ((type & BTRFS_BLOCK_GROUP_RAID1) && 2529 (type & BTRFS_BLOCK_GROUP_DUP)) { 2530 WARN_ON(1); 2531 type &= ~BTRFS_BLOCK_GROUP_DUP; 2532 } 2533 if (list_empty(&fs_devices->alloc_list)) 2534 return -ENOSPC; 2535 2536 ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, 2537 &min_stripes, &sub_stripes); 2538 if (ret) 2539 return ret; 2540 2541 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, 2542 GFP_NOFS); 2543 if (!devices_info) 2544 return -ENOMEM; 2545 2546 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 2547 if (!map) { 2548 ret = -ENOMEM; 2549 goto error; 2550 } 2551 map->num_stripes = num_stripes; 2552 2553 cur = fs_devices->alloc_list.next; 2554 index = 0; 2555 i = 0; 2556 2557 calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, 2558 num_stripes, 0); 2559 2560 if (type & BTRFS_BLOCK_GROUP_DUP) { 2561 min_free = calc_size * 2; 2562 min_devices = 1; 2563 } else { 2564 min_free = calc_size; 2565 min_devices = min_stripes; 2566 } 2567 2568 INIT_LIST_HEAD(&private_devs); 2569 while (index < num_stripes) { 2570 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 2571 BUG_ON(!device->writeable); 2572 if (device->total_bytes > device->bytes_used) 2573 avail = device->total_bytes - device->bytes_used; 2574 else 2575 avail = 0; 2576 cur = cur->next; 2577 2578 if (device->in_fs_metadata && avail >= min_free) { 2579 ret = find_free_dev_extent(trans, device, min_free, 2580 &devices_info[i].dev_offset, 2581 &devices_info[i].max_avail); 2582 if (ret == 0) { 2583 list_move_tail(&device->dev_alloc_list, 2584 &private_devs); 2585 map->stripes[index].dev = device; 2586 map->stripes[index].physical = 2587 devices_info[i].dev_offset; 2588 index++; 2589 if (type & BTRFS_BLOCK_GROUP_DUP) { 2590 map->stripes[index].dev = device; 2591 map->stripes[index].physical = 2592 devices_info[i].dev_offset + 2593 calc_size; 2594 index++; 2595 } 2596 } else if (ret != -ENOSPC) 2597 goto error; 2598 2599 devices_info[i].dev = device; 2600 i++; 2601 } else if (device->in_fs_metadata && 2602 avail >= BTRFS_STRIPE_LEN) { 2603 devices_info[i].dev = device; 2604 devices_info[i].max_avail = avail; 2605 i++; 2606 } 2607 2608 if (cur == &fs_devices->alloc_list) 2609 break; 2610 } 2611 2612 list_splice(&private_devs, &fs_devices->alloc_list); 2613 if (index < num_stripes) { 2614 if (index >= min_stripes) { 2615 num_stripes = index; 2616 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2617 num_stripes /= sub_stripes; 2618 num_stripes *= sub_stripes; 2619 } 2620 2621 map = __shrink_map_lookup_stripes(map, num_stripes); 2622 } else if (i >= min_devices) { 2623 ret = __btrfs_alloc_tiny_space(trans, fs_devices, 2624 devices_info, i, type, 2625 &map, min_stripes, 2626 &calc_size); 2627 if (ret) 2628 goto error; 2629 } else { 2630 ret = -ENOSPC; 2631 goto error; 2632 } 2633 } 2634 map->sector_size = extent_root->sectorsize; 2635 map->stripe_len = BTRFS_STRIPE_LEN; 2636 map->io_align = BTRFS_STRIPE_LEN; 2637 map->io_width = BTRFS_STRIPE_LEN; 2638 map->type = type; 2639 map->sub_stripes = sub_stripes; 2640 2641 *map_ret = map; 2642 *stripe_size = calc_size; 2643 *num_bytes = chunk_bytes_by_type(type, calc_size, 2644 map->num_stripes, sub_stripes); 2645 2646 em = alloc_extent_map(GFP_NOFS); 2647 if (!em) { 2648 ret = -ENOMEM; 2649 goto error; 2650 } 2651 em->bdev = (struct block_device *)map; 2652 em->start = start; 2653 em->len = *num_bytes; 2654 em->block_start = 0; 2655 em->block_len = em->len; 2656 2657 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 2658 write_lock(&em_tree->lock); 2659 ret = add_extent_mapping(em_tree, em); 2660 write_unlock(&em_tree->lock); 2661 BUG_ON(ret); 2662 free_extent_map(em); 2663 2664 ret = btrfs_make_block_group(trans, extent_root, 0, type, 2665 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2666 start, *num_bytes); 2667 BUG_ON(ret); 2668 2669 index = 0; 2670 while (index < map->num_stripes) { 2671 device = map->stripes[index].dev; 2672 dev_offset = map->stripes[index].physical; 2673 2674 ret = btrfs_alloc_dev_extent(trans, device, 2675 info->chunk_root->root_key.objectid, 2676 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2677 start, dev_offset, calc_size); 2678 BUG_ON(ret); 2679 index++; 2680 } 2681 2682 kfree(devices_info); 2683 return 0; 2684 2685 error: 2686 kfree(map); 2687 kfree(devices_info); 2688 return ret; 2689 } 2690 2691 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, 2692 struct btrfs_root *extent_root, 2693 struct map_lookup *map, u64 chunk_offset, 2694 u64 chunk_size, u64 stripe_size) 2695 { 2696 u64 dev_offset; 2697 struct btrfs_key key; 2698 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 2699 struct btrfs_device *device; 2700 struct btrfs_chunk *chunk; 2701 struct btrfs_stripe *stripe; 2702 size_t item_size = btrfs_chunk_item_size(map->num_stripes); 2703 int index = 0; 2704 int ret; 2705 2706 chunk = kzalloc(item_size, GFP_NOFS); 2707 if (!chunk) 2708 return -ENOMEM; 2709 2710 index = 0; 2711 while (index < map->num_stripes) { 2712 device = map->stripes[index].dev; 2713 device->bytes_used += stripe_size; 2714 ret = btrfs_update_device(trans, device); 2715 BUG_ON(ret); 2716 index++; 2717 } 2718 2719 index = 0; 2720 stripe = &chunk->stripe; 2721 while (index < map->num_stripes) { 2722 device = map->stripes[index].dev; 2723 dev_offset = map->stripes[index].physical; 2724 2725 btrfs_set_stack_stripe_devid(stripe, device->devid); 2726 btrfs_set_stack_stripe_offset(stripe, dev_offset); 2727 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 2728 stripe++; 2729 index++; 2730 } 2731 2732 btrfs_set_stack_chunk_length(chunk, chunk_size); 2733 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 2734 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 2735 btrfs_set_stack_chunk_type(chunk, map->type); 2736 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 2737 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 2738 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 2739 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 2740 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 2741 2742 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2743 key.type = BTRFS_CHUNK_ITEM_KEY; 2744 key.offset = chunk_offset; 2745 2746 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 2747 BUG_ON(ret); 2748 2749 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2750 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, 2751 item_size); 2752 BUG_ON(ret); 2753 } 2754 kfree(chunk); 2755 return 0; 2756 } 2757 2758 /* 2759 * Chunk allocation falls into two parts. The first part does works 2760 * that make the new allocated chunk useable, but not do any operation 2761 * that modifies the chunk tree. The second part does the works that 2762 * require modifying the chunk tree. This division is important for the 2763 * bootstrap process of adding storage to a seed btrfs. 2764 */ 2765 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2766 struct btrfs_root *extent_root, u64 type) 2767 { 2768 u64 chunk_offset; 2769 u64 chunk_size; 2770 u64 stripe_size; 2771 struct map_lookup *map; 2772 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 2773 int ret; 2774 2775 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2776 &chunk_offset); 2777 if (ret) 2778 return ret; 2779 2780 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 2781 &stripe_size, chunk_offset, type); 2782 if (ret) 2783 return ret; 2784 2785 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 2786 chunk_size, stripe_size); 2787 BUG_ON(ret); 2788 return 0; 2789 } 2790 2791 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 2792 struct btrfs_root *root, 2793 struct btrfs_device *device) 2794 { 2795 u64 chunk_offset; 2796 u64 sys_chunk_offset; 2797 u64 chunk_size; 2798 u64 sys_chunk_size; 2799 u64 stripe_size; 2800 u64 sys_stripe_size; 2801 u64 alloc_profile; 2802 struct map_lookup *map; 2803 struct map_lookup *sys_map; 2804 struct btrfs_fs_info *fs_info = root->fs_info; 2805 struct btrfs_root *extent_root = fs_info->extent_root; 2806 int ret; 2807 2808 ret = find_next_chunk(fs_info->chunk_root, 2809 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); 2810 BUG_ON(ret); 2811 2812 alloc_profile = BTRFS_BLOCK_GROUP_METADATA | 2813 (fs_info->metadata_alloc_profile & 2814 fs_info->avail_metadata_alloc_bits); 2815 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); 2816 2817 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 2818 &stripe_size, chunk_offset, alloc_profile); 2819 BUG_ON(ret); 2820 2821 sys_chunk_offset = chunk_offset + chunk_size; 2822 2823 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | 2824 (fs_info->system_alloc_profile & 2825 fs_info->avail_system_alloc_bits); 2826 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); 2827 2828 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, 2829 &sys_chunk_size, &sys_stripe_size, 2830 sys_chunk_offset, alloc_profile); 2831 BUG_ON(ret); 2832 2833 ret = btrfs_add_device(trans, fs_info->chunk_root, device); 2834 BUG_ON(ret); 2835 2836 /* 2837 * Modifying chunk tree needs allocating new blocks from both 2838 * system block group and metadata block group. So we only can 2839 * do operations require modifying the chunk tree after both 2840 * block groups were created. 2841 */ 2842 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 2843 chunk_size, stripe_size); 2844 BUG_ON(ret); 2845 2846 ret = __finish_chunk_alloc(trans, extent_root, sys_map, 2847 sys_chunk_offset, sys_chunk_size, 2848 sys_stripe_size); 2849 BUG_ON(ret); 2850 return 0; 2851 } 2852 2853 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 2854 { 2855 struct extent_map *em; 2856 struct map_lookup *map; 2857 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 2858 int readonly = 0; 2859 int i; 2860 2861 read_lock(&map_tree->map_tree.lock); 2862 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2863 read_unlock(&map_tree->map_tree.lock); 2864 if (!em) 2865 return 1; 2866 2867 if (btrfs_test_opt(root, DEGRADED)) { 2868 free_extent_map(em); 2869 return 0; 2870 } 2871 2872 map = (struct map_lookup *)em->bdev; 2873 for (i = 0; i < map->num_stripes; i++) { 2874 if (!map->stripes[i].dev->writeable) { 2875 readonly = 1; 2876 break; 2877 } 2878 } 2879 free_extent_map(em); 2880 return readonly; 2881 } 2882 2883 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 2884 { 2885 extent_map_tree_init(&tree->map_tree, GFP_NOFS); 2886 } 2887 2888 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 2889 { 2890 struct extent_map *em; 2891 2892 while (1) { 2893 write_lock(&tree->map_tree.lock); 2894 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2895 if (em) 2896 remove_extent_mapping(&tree->map_tree, em); 2897 write_unlock(&tree->map_tree.lock); 2898 if (!em) 2899 break; 2900 kfree(em->bdev); 2901 /* once for us */ 2902 free_extent_map(em); 2903 /* once for the tree */ 2904 free_extent_map(em); 2905 } 2906 } 2907 2908 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) 2909 { 2910 struct extent_map *em; 2911 struct map_lookup *map; 2912 struct extent_map_tree *em_tree = &map_tree->map_tree; 2913 int ret; 2914 2915 read_lock(&em_tree->lock); 2916 em = lookup_extent_mapping(em_tree, logical, len); 2917 read_unlock(&em_tree->lock); 2918 BUG_ON(!em); 2919 2920 BUG_ON(em->start > logical || em->start + em->len < logical); 2921 map = (struct map_lookup *)em->bdev; 2922 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 2923 ret = map->num_stripes; 2924 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 2925 ret = map->sub_stripes; 2926 else 2927 ret = 1; 2928 free_extent_map(em); 2929 return ret; 2930 } 2931 2932 static int find_live_mirror(struct map_lookup *map, int first, int num, 2933 int optimal) 2934 { 2935 int i; 2936 if (map->stripes[optimal].dev->bdev) 2937 return optimal; 2938 for (i = first; i < first + num; i++) { 2939 if (map->stripes[i].dev->bdev) 2940 return i; 2941 } 2942 /* we couldn't find one that doesn't fail. Just return something 2943 * and the io error handling code will clean up eventually 2944 */ 2945 return optimal; 2946 } 2947 2948 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2949 u64 logical, u64 *length, 2950 struct btrfs_multi_bio **multi_ret, 2951 int mirror_num, struct page *unplug_page) 2952 { 2953 struct extent_map *em; 2954 struct map_lookup *map; 2955 struct extent_map_tree *em_tree = &map_tree->map_tree; 2956 u64 offset; 2957 u64 stripe_offset; 2958 u64 stripe_nr; 2959 int stripes_allocated = 8; 2960 int stripes_required = 1; 2961 int stripe_index; 2962 int i; 2963 int num_stripes; 2964 int max_errors = 0; 2965 struct btrfs_multi_bio *multi = NULL; 2966 2967 if (multi_ret && !(rw & REQ_WRITE)) 2968 stripes_allocated = 1; 2969 again: 2970 if (multi_ret) { 2971 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), 2972 GFP_NOFS); 2973 if (!multi) 2974 return -ENOMEM; 2975 2976 atomic_set(&multi->error, 0); 2977 } 2978 2979 read_lock(&em_tree->lock); 2980 em = lookup_extent_mapping(em_tree, logical, *length); 2981 read_unlock(&em_tree->lock); 2982 2983 if (!em && unplug_page) { 2984 kfree(multi); 2985 return 0; 2986 } 2987 2988 if (!em) { 2989 printk(KERN_CRIT "unable to find logical %llu len %llu\n", 2990 (unsigned long long)logical, 2991 (unsigned long long)*length); 2992 BUG(); 2993 } 2994 2995 BUG_ON(em->start > logical || em->start + em->len < logical); 2996 map = (struct map_lookup *)em->bdev; 2997 offset = logical - em->start; 2998 2999 if (mirror_num > map->num_stripes) 3000 mirror_num = 0; 3001 3002 /* if our multi bio struct is too small, back off and try again */ 3003 if (rw & REQ_WRITE) { 3004 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 3005 BTRFS_BLOCK_GROUP_DUP)) { 3006 stripes_required = map->num_stripes; 3007 max_errors = 1; 3008 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 3009 stripes_required = map->sub_stripes; 3010 max_errors = 1; 3011 } 3012 } 3013 if (multi_ret && (rw & REQ_WRITE) && 3014 stripes_allocated < stripes_required) { 3015 stripes_allocated = map->num_stripes; 3016 free_extent_map(em); 3017 kfree(multi); 3018 goto again; 3019 } 3020 stripe_nr = offset; 3021 /* 3022 * stripe_nr counts the total number of stripes we have to stride 3023 * to get to this block 3024 */ 3025 do_div(stripe_nr, map->stripe_len); 3026 3027 stripe_offset = stripe_nr * map->stripe_len; 3028 BUG_ON(offset < stripe_offset); 3029 3030 /* stripe_offset is the offset of this block in its stripe*/ 3031 stripe_offset = offset - stripe_offset; 3032 3033 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 3034 BTRFS_BLOCK_GROUP_RAID10 | 3035 BTRFS_BLOCK_GROUP_DUP)) { 3036 /* we limit the length of each bio to what fits in a stripe */ 3037 *length = min_t(u64, em->len - offset, 3038 map->stripe_len - stripe_offset); 3039 } else { 3040 *length = em->len - offset; 3041 } 3042 3043 if (!multi_ret && !unplug_page) 3044 goto out; 3045 3046 num_stripes = 1; 3047 stripe_index = 0; 3048 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 3049 if (unplug_page || (rw & REQ_WRITE)) 3050 num_stripes = map->num_stripes; 3051 else if (mirror_num) 3052 stripe_index = mirror_num - 1; 3053 else { 3054 stripe_index = find_live_mirror(map, 0, 3055 map->num_stripes, 3056 current->pid % map->num_stripes); 3057 } 3058 3059 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 3060 if (rw & REQ_WRITE) 3061 num_stripes = map->num_stripes; 3062 else if (mirror_num) 3063 stripe_index = mirror_num - 1; 3064 3065 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 3066 int factor = map->num_stripes / map->sub_stripes; 3067 3068 stripe_index = do_div(stripe_nr, factor); 3069 stripe_index *= map->sub_stripes; 3070 3071 if (unplug_page || (rw & REQ_WRITE)) 3072 num_stripes = map->sub_stripes; 3073 else if (mirror_num) 3074 stripe_index += mirror_num - 1; 3075 else { 3076 stripe_index = find_live_mirror(map, stripe_index, 3077 map->sub_stripes, stripe_index + 3078 current->pid % map->sub_stripes); 3079 } 3080 } else { 3081 /* 3082 * after this do_div call, stripe_nr is the number of stripes 3083 * on this device we have to walk to find the data, and 3084 * stripe_index is the number of our device in the stripe array 3085 */ 3086 stripe_index = do_div(stripe_nr, map->num_stripes); 3087 } 3088 BUG_ON(stripe_index >= map->num_stripes); 3089 3090 for (i = 0; i < num_stripes; i++) { 3091 if (unplug_page) { 3092 struct btrfs_device *device; 3093 struct backing_dev_info *bdi; 3094 3095 device = map->stripes[stripe_index].dev; 3096 if (device->bdev) { 3097 bdi = blk_get_backing_dev_info(device->bdev); 3098 if (bdi->unplug_io_fn) 3099 bdi->unplug_io_fn(bdi, unplug_page); 3100 } 3101 } else { 3102 multi->stripes[i].physical = 3103 map->stripes[stripe_index].physical + 3104 stripe_offset + stripe_nr * map->stripe_len; 3105 multi->stripes[i].dev = map->stripes[stripe_index].dev; 3106 } 3107 stripe_index++; 3108 } 3109 if (multi_ret) { 3110 *multi_ret = multi; 3111 multi->num_stripes = num_stripes; 3112 multi->max_errors = max_errors; 3113 } 3114 out: 3115 free_extent_map(em); 3116 return 0; 3117 } 3118 3119 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 3120 u64 logical, u64 *length, 3121 struct btrfs_multi_bio **multi_ret, int mirror_num) 3122 { 3123 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 3124 mirror_num, NULL); 3125 } 3126 3127 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 3128 u64 chunk_start, u64 physical, u64 devid, 3129 u64 **logical, int *naddrs, int *stripe_len) 3130 { 3131 struct extent_map_tree *em_tree = &map_tree->map_tree; 3132 struct extent_map *em; 3133 struct map_lookup *map; 3134 u64 *buf; 3135 u64 bytenr; 3136 u64 length; 3137 u64 stripe_nr; 3138 int i, j, nr = 0; 3139 3140 read_lock(&em_tree->lock); 3141 em = lookup_extent_mapping(em_tree, chunk_start, 1); 3142 read_unlock(&em_tree->lock); 3143 3144 BUG_ON(!em || em->start != chunk_start); 3145 map = (struct map_lookup *)em->bdev; 3146 3147 length = em->len; 3148 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 3149 do_div(length, map->num_stripes / map->sub_stripes); 3150 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 3151 do_div(length, map->num_stripes); 3152 3153 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 3154 BUG_ON(!buf); 3155 3156 for (i = 0; i < map->num_stripes; i++) { 3157 if (devid && map->stripes[i].dev->devid != devid) 3158 continue; 3159 if (map->stripes[i].physical > physical || 3160 map->stripes[i].physical + length <= physical) 3161 continue; 3162 3163 stripe_nr = physical - map->stripes[i].physical; 3164 do_div(stripe_nr, map->stripe_len); 3165 3166 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 3167 stripe_nr = stripe_nr * map->num_stripes + i; 3168 do_div(stripe_nr, map->sub_stripes); 3169 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 3170 stripe_nr = stripe_nr * map->num_stripes + i; 3171 } 3172 bytenr = chunk_start + stripe_nr * map->stripe_len; 3173 WARN_ON(nr >= map->num_stripes); 3174 for (j = 0; j < nr; j++) { 3175 if (buf[j] == bytenr) 3176 break; 3177 } 3178 if (j == nr) { 3179 WARN_ON(nr >= map->num_stripes); 3180 buf[nr++] = bytenr; 3181 } 3182 } 3183 3184 *logical = buf; 3185 *naddrs = nr; 3186 *stripe_len = map->stripe_len; 3187 3188 free_extent_map(em); 3189 return 0; 3190 } 3191 3192 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, 3193 u64 logical, struct page *page) 3194 { 3195 u64 length = PAGE_CACHE_SIZE; 3196 return __btrfs_map_block(map_tree, READ, logical, &length, 3197 NULL, 0, page); 3198 } 3199 3200 static void end_bio_multi_stripe(struct bio *bio, int err) 3201 { 3202 struct btrfs_multi_bio *multi = bio->bi_private; 3203 int is_orig_bio = 0; 3204 3205 if (err) 3206 atomic_inc(&multi->error); 3207 3208 if (bio == multi->orig_bio) 3209 is_orig_bio = 1; 3210 3211 if (atomic_dec_and_test(&multi->stripes_pending)) { 3212 if (!is_orig_bio) { 3213 bio_put(bio); 3214 bio = multi->orig_bio; 3215 } 3216 bio->bi_private = multi->private; 3217 bio->bi_end_io = multi->end_io; 3218 /* only send an error to the higher layers if it is 3219 * beyond the tolerance of the multi-bio 3220 */ 3221 if (atomic_read(&multi->error) > multi->max_errors) { 3222 err = -EIO; 3223 } else if (err) { 3224 /* 3225 * this bio is actually up to date, we didn't 3226 * go over the max number of errors 3227 */ 3228 set_bit(BIO_UPTODATE, &bio->bi_flags); 3229 err = 0; 3230 } 3231 kfree(multi); 3232 3233 bio_endio(bio, err); 3234 } else if (!is_orig_bio) { 3235 bio_put(bio); 3236 } 3237 } 3238 3239 struct async_sched { 3240 struct bio *bio; 3241 int rw; 3242 struct btrfs_fs_info *info; 3243 struct btrfs_work work; 3244 }; 3245 3246 /* 3247 * see run_scheduled_bios for a description of why bios are collected for 3248 * async submit. 3249 * 3250 * This will add one bio to the pending list for a device and make sure 3251 * the work struct is scheduled. 3252 */ 3253 static noinline int schedule_bio(struct btrfs_root *root, 3254 struct btrfs_device *device, 3255 int rw, struct bio *bio) 3256 { 3257 int should_queue = 1; 3258 struct btrfs_pending_bios *pending_bios; 3259 3260 /* don't bother with additional async steps for reads, right now */ 3261 if (!(rw & REQ_WRITE)) { 3262 bio_get(bio); 3263 submit_bio(rw, bio); 3264 bio_put(bio); 3265 return 0; 3266 } 3267 3268 /* 3269 * nr_async_bios allows us to reliably return congestion to the 3270 * higher layers. Otherwise, the async bio makes it appear we have 3271 * made progress against dirty pages when we've really just put it 3272 * on a queue for later 3273 */ 3274 atomic_inc(&root->fs_info->nr_async_bios); 3275 WARN_ON(bio->bi_next); 3276 bio->bi_next = NULL; 3277 bio->bi_rw |= rw; 3278 3279 spin_lock(&device->io_lock); 3280 if (bio->bi_rw & REQ_SYNC) 3281 pending_bios = &device->pending_sync_bios; 3282 else 3283 pending_bios = &device->pending_bios; 3284 3285 if (pending_bios->tail) 3286 pending_bios->tail->bi_next = bio; 3287 3288 pending_bios->tail = bio; 3289 if (!pending_bios->head) 3290 pending_bios->head = bio; 3291 if (device->running_pending) 3292 should_queue = 0; 3293 3294 spin_unlock(&device->io_lock); 3295 3296 if (should_queue) 3297 btrfs_queue_worker(&root->fs_info->submit_workers, 3298 &device->work); 3299 return 0; 3300 } 3301 3302 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 3303 int mirror_num, int async_submit) 3304 { 3305 struct btrfs_mapping_tree *map_tree; 3306 struct btrfs_device *dev; 3307 struct bio *first_bio = bio; 3308 u64 logical = (u64)bio->bi_sector << 9; 3309 u64 length = 0; 3310 u64 map_length; 3311 struct btrfs_multi_bio *multi = NULL; 3312 int ret; 3313 int dev_nr = 0; 3314 int total_devs = 1; 3315 3316 length = bio->bi_size; 3317 map_tree = &root->fs_info->mapping_tree; 3318 map_length = length; 3319 3320 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, 3321 mirror_num); 3322 BUG_ON(ret); 3323 3324 total_devs = multi->num_stripes; 3325 if (map_length < length) { 3326 printk(KERN_CRIT "mapping failed logical %llu bio len %llu " 3327 "len %llu\n", (unsigned long long)logical, 3328 (unsigned long long)length, 3329 (unsigned long long)map_length); 3330 BUG(); 3331 } 3332 multi->end_io = first_bio->bi_end_io; 3333 multi->private = first_bio->bi_private; 3334 multi->orig_bio = first_bio; 3335 atomic_set(&multi->stripes_pending, multi->num_stripes); 3336 3337 while (dev_nr < total_devs) { 3338 if (total_devs > 1) { 3339 if (dev_nr < total_devs - 1) { 3340 bio = bio_clone(first_bio, GFP_NOFS); 3341 BUG_ON(!bio); 3342 } else { 3343 bio = first_bio; 3344 } 3345 bio->bi_private = multi; 3346 bio->bi_end_io = end_bio_multi_stripe; 3347 } 3348 bio->bi_sector = multi->stripes[dev_nr].physical >> 9; 3349 dev = multi->stripes[dev_nr].dev; 3350 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { 3351 bio->bi_bdev = dev->bdev; 3352 if (async_submit) 3353 schedule_bio(root, dev, rw, bio); 3354 else 3355 submit_bio(rw, bio); 3356 } else { 3357 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; 3358 bio->bi_sector = logical >> 9; 3359 bio_endio(bio, -EIO); 3360 } 3361 dev_nr++; 3362 } 3363 if (total_devs == 1) 3364 kfree(multi); 3365 return 0; 3366 } 3367 3368 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, 3369 u8 *uuid, u8 *fsid) 3370 { 3371 struct btrfs_device *device; 3372 struct btrfs_fs_devices *cur_devices; 3373 3374 cur_devices = root->fs_info->fs_devices; 3375 while (cur_devices) { 3376 if (!fsid || 3377 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 3378 device = __find_device(&cur_devices->devices, 3379 devid, uuid); 3380 if (device) 3381 return device; 3382 } 3383 cur_devices = cur_devices->seed; 3384 } 3385 return NULL; 3386 } 3387 3388 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 3389 u64 devid, u8 *dev_uuid) 3390 { 3391 struct btrfs_device *device; 3392 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 3393 3394 device = kzalloc(sizeof(*device), GFP_NOFS); 3395 if (!device) 3396 return NULL; 3397 list_add(&device->dev_list, 3398 &fs_devices->devices); 3399 device->dev_root = root->fs_info->dev_root; 3400 device->devid = devid; 3401 device->work.func = pending_bios_fn; 3402 device->fs_devices = fs_devices; 3403 device->missing = 1; 3404 fs_devices->num_devices++; 3405 fs_devices->missing_devices++; 3406 spin_lock_init(&device->io_lock); 3407 INIT_LIST_HEAD(&device->dev_alloc_list); 3408 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); 3409 return device; 3410 } 3411 3412 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 3413 struct extent_buffer *leaf, 3414 struct btrfs_chunk *chunk) 3415 { 3416 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 3417 struct map_lookup *map; 3418 struct extent_map *em; 3419 u64 logical; 3420 u64 length; 3421 u64 devid; 3422 u8 uuid[BTRFS_UUID_SIZE]; 3423 int num_stripes; 3424 int ret; 3425 int i; 3426 3427 logical = key->offset; 3428 length = btrfs_chunk_length(leaf, chunk); 3429 3430 read_lock(&map_tree->map_tree.lock); 3431 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 3432 read_unlock(&map_tree->map_tree.lock); 3433 3434 /* already mapped? */ 3435 if (em && em->start <= logical && em->start + em->len > logical) { 3436 free_extent_map(em); 3437 return 0; 3438 } else if (em) { 3439 free_extent_map(em); 3440 } 3441 3442 em = alloc_extent_map(GFP_NOFS); 3443 if (!em) 3444 return -ENOMEM; 3445 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3446 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 3447 if (!map) { 3448 free_extent_map(em); 3449 return -ENOMEM; 3450 } 3451 3452 em->bdev = (struct block_device *)map; 3453 em->start = logical; 3454 em->len = length; 3455 em->block_start = 0; 3456 em->block_len = em->len; 3457 3458 map->num_stripes = num_stripes; 3459 map->io_width = btrfs_chunk_io_width(leaf, chunk); 3460 map->io_align = btrfs_chunk_io_align(leaf, chunk); 3461 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 3462 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 3463 map->type = btrfs_chunk_type(leaf, chunk); 3464 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 3465 for (i = 0; i < num_stripes; i++) { 3466 map->stripes[i].physical = 3467 btrfs_stripe_offset_nr(leaf, chunk, i); 3468 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 3469 read_extent_buffer(leaf, uuid, (unsigned long) 3470 btrfs_stripe_dev_uuid_nr(chunk, i), 3471 BTRFS_UUID_SIZE); 3472 map->stripes[i].dev = btrfs_find_device(root, devid, uuid, 3473 NULL); 3474 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 3475 kfree(map); 3476 free_extent_map(em); 3477 return -EIO; 3478 } 3479 if (!map->stripes[i].dev) { 3480 map->stripes[i].dev = 3481 add_missing_dev(root, devid, uuid); 3482 if (!map->stripes[i].dev) { 3483 kfree(map); 3484 free_extent_map(em); 3485 return -EIO; 3486 } 3487 } 3488 map->stripes[i].dev->in_fs_metadata = 1; 3489 } 3490 3491 write_lock(&map_tree->map_tree.lock); 3492 ret = add_extent_mapping(&map_tree->map_tree, em); 3493 write_unlock(&map_tree->map_tree.lock); 3494 BUG_ON(ret); 3495 free_extent_map(em); 3496 3497 return 0; 3498 } 3499 3500 static int fill_device_from_item(struct extent_buffer *leaf, 3501 struct btrfs_dev_item *dev_item, 3502 struct btrfs_device *device) 3503 { 3504 unsigned long ptr; 3505 3506 device->devid = btrfs_device_id(leaf, dev_item); 3507 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 3508 device->total_bytes = device->disk_total_bytes; 3509 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 3510 device->type = btrfs_device_type(leaf, dev_item); 3511 device->io_align = btrfs_device_io_align(leaf, dev_item); 3512 device->io_width = btrfs_device_io_width(leaf, dev_item); 3513 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 3514 3515 ptr = (unsigned long)btrfs_device_uuid(dev_item); 3516 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 3517 3518 return 0; 3519 } 3520 3521 static int open_seed_devices(struct btrfs_root *root, u8 *fsid) 3522 { 3523 struct btrfs_fs_devices *fs_devices; 3524 int ret; 3525 3526 mutex_lock(&uuid_mutex); 3527 3528 fs_devices = root->fs_info->fs_devices->seed; 3529 while (fs_devices) { 3530 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 3531 ret = 0; 3532 goto out; 3533 } 3534 fs_devices = fs_devices->seed; 3535 } 3536 3537 fs_devices = find_fsid(fsid); 3538 if (!fs_devices) { 3539 ret = -ENOENT; 3540 goto out; 3541 } 3542 3543 fs_devices = clone_fs_devices(fs_devices); 3544 if (IS_ERR(fs_devices)) { 3545 ret = PTR_ERR(fs_devices); 3546 goto out; 3547 } 3548 3549 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 3550 root->fs_info->bdev_holder); 3551 if (ret) 3552 goto out; 3553 3554 if (!fs_devices->seeding) { 3555 __btrfs_close_devices(fs_devices); 3556 free_fs_devices(fs_devices); 3557 ret = -EINVAL; 3558 goto out; 3559 } 3560 3561 fs_devices->seed = root->fs_info->fs_devices->seed; 3562 root->fs_info->fs_devices->seed = fs_devices; 3563 out: 3564 mutex_unlock(&uuid_mutex); 3565 return ret; 3566 } 3567 3568 static int read_one_dev(struct btrfs_root *root, 3569 struct extent_buffer *leaf, 3570 struct btrfs_dev_item *dev_item) 3571 { 3572 struct btrfs_device *device; 3573 u64 devid; 3574 int ret; 3575 u8 fs_uuid[BTRFS_UUID_SIZE]; 3576 u8 dev_uuid[BTRFS_UUID_SIZE]; 3577 3578 devid = btrfs_device_id(leaf, dev_item); 3579 read_extent_buffer(leaf, dev_uuid, 3580 (unsigned long)btrfs_device_uuid(dev_item), 3581 BTRFS_UUID_SIZE); 3582 read_extent_buffer(leaf, fs_uuid, 3583 (unsigned long)btrfs_device_fsid(dev_item), 3584 BTRFS_UUID_SIZE); 3585 3586 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 3587 ret = open_seed_devices(root, fs_uuid); 3588 if (ret && !btrfs_test_opt(root, DEGRADED)) 3589 return ret; 3590 } 3591 3592 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 3593 if (!device || !device->bdev) { 3594 if (!btrfs_test_opt(root, DEGRADED)) 3595 return -EIO; 3596 3597 if (!device) { 3598 printk(KERN_WARNING "warning devid %llu missing\n", 3599 (unsigned long long)devid); 3600 device = add_missing_dev(root, devid, dev_uuid); 3601 if (!device) 3602 return -ENOMEM; 3603 } else if (!device->missing) { 3604 /* 3605 * this happens when a device that was properly setup 3606 * in the device info lists suddenly goes bad. 3607 * device->bdev is NULL, and so we have to set 3608 * device->missing to one here 3609 */ 3610 root->fs_info->fs_devices->missing_devices++; 3611 device->missing = 1; 3612 } 3613 } 3614 3615 if (device->fs_devices != root->fs_info->fs_devices) { 3616 BUG_ON(device->writeable); 3617 if (device->generation != 3618 btrfs_device_generation(leaf, dev_item)) 3619 return -EINVAL; 3620 } 3621 3622 fill_device_from_item(leaf, dev_item, device); 3623 device->dev_root = root->fs_info->dev_root; 3624 device->in_fs_metadata = 1; 3625 if (device->writeable) 3626 device->fs_devices->total_rw_bytes += device->total_bytes; 3627 ret = 0; 3628 return ret; 3629 } 3630 3631 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) 3632 { 3633 struct btrfs_dev_item *dev_item; 3634 3635 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, 3636 dev_item); 3637 return read_one_dev(root, buf, dev_item); 3638 } 3639 3640 int btrfs_read_sys_array(struct btrfs_root *root) 3641 { 3642 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 3643 struct extent_buffer *sb; 3644 struct btrfs_disk_key *disk_key; 3645 struct btrfs_chunk *chunk; 3646 u8 *ptr; 3647 unsigned long sb_ptr; 3648 int ret = 0; 3649 u32 num_stripes; 3650 u32 array_size; 3651 u32 len = 0; 3652 u32 cur; 3653 struct btrfs_key key; 3654 3655 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, 3656 BTRFS_SUPER_INFO_SIZE); 3657 if (!sb) 3658 return -ENOMEM; 3659 btrfs_set_buffer_uptodate(sb); 3660 btrfs_set_buffer_lockdep_class(sb, 0); 3661 3662 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 3663 array_size = btrfs_super_sys_array_size(super_copy); 3664 3665 ptr = super_copy->sys_chunk_array; 3666 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); 3667 cur = 0; 3668 3669 while (cur < array_size) { 3670 disk_key = (struct btrfs_disk_key *)ptr; 3671 btrfs_disk_key_to_cpu(&key, disk_key); 3672 3673 len = sizeof(*disk_key); ptr += len; 3674 sb_ptr += len; 3675 cur += len; 3676 3677 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3678 chunk = (struct btrfs_chunk *)sb_ptr; 3679 ret = read_one_chunk(root, &key, sb, chunk); 3680 if (ret) 3681 break; 3682 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 3683 len = btrfs_chunk_item_size(num_stripes); 3684 } else { 3685 ret = -EIO; 3686 break; 3687 } 3688 ptr += len; 3689 sb_ptr += len; 3690 cur += len; 3691 } 3692 free_extent_buffer(sb); 3693 return ret; 3694 } 3695 3696 int btrfs_read_chunk_tree(struct btrfs_root *root) 3697 { 3698 struct btrfs_path *path; 3699 struct extent_buffer *leaf; 3700 struct btrfs_key key; 3701 struct btrfs_key found_key; 3702 int ret; 3703 int slot; 3704 3705 root = root->fs_info->chunk_root; 3706 3707 path = btrfs_alloc_path(); 3708 if (!path) 3709 return -ENOMEM; 3710 3711 /* first we search for all of the device items, and then we 3712 * read in all of the chunk items. This way we can create chunk 3713 * mappings that reference all of the devices that are afound 3714 */ 3715 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 3716 key.offset = 0; 3717 key.type = 0; 3718 again: 3719 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3720 if (ret < 0) 3721 goto error; 3722 while (1) { 3723 leaf = path->nodes[0]; 3724 slot = path->slots[0]; 3725 if (slot >= btrfs_header_nritems(leaf)) { 3726 ret = btrfs_next_leaf(root, path); 3727 if (ret == 0) 3728 continue; 3729 if (ret < 0) 3730 goto error; 3731 break; 3732 } 3733 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3734 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3735 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) 3736 break; 3737 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 3738 struct btrfs_dev_item *dev_item; 3739 dev_item = btrfs_item_ptr(leaf, slot, 3740 struct btrfs_dev_item); 3741 ret = read_one_dev(root, leaf, dev_item); 3742 if (ret) 3743 goto error; 3744 } 3745 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 3746 struct btrfs_chunk *chunk; 3747 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3748 ret = read_one_chunk(root, &found_key, leaf, chunk); 3749 if (ret) 3750 goto error; 3751 } 3752 path->slots[0]++; 3753 } 3754 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3755 key.objectid = 0; 3756 btrfs_release_path(root, path); 3757 goto again; 3758 } 3759 ret = 0; 3760 error: 3761 btrfs_free_path(path); 3762 return ret; 3763 } 3764