1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/random.h> 24 #include <linux/iocontext.h> 25 #include <asm/div64.h> 26 #include "compat.h" 27 #include "ctree.h" 28 #include "extent_map.h" 29 #include "disk-io.h" 30 #include "transaction.h" 31 #include "print-tree.h" 32 #include "volumes.h" 33 #include "async-thread.h" 34 35 struct map_lookup { 36 u64 type; 37 int io_align; 38 int io_width; 39 int stripe_len; 40 int sector_size; 41 int num_stripes; 42 int sub_stripes; 43 struct btrfs_bio_stripe stripes[]; 44 }; 45 46 static int init_first_rw_device(struct btrfs_trans_handle *trans, 47 struct btrfs_root *root, 48 struct btrfs_device *device); 49 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 50 51 #define map_lookup_size(n) (sizeof(struct map_lookup) + \ 52 (sizeof(struct btrfs_bio_stripe) * (n))) 53 54 static DEFINE_MUTEX(uuid_mutex); 55 static LIST_HEAD(fs_uuids); 56 57 void btrfs_lock_volumes(void) 58 { 59 mutex_lock(&uuid_mutex); 60 } 61 62 void btrfs_unlock_volumes(void) 63 { 64 mutex_unlock(&uuid_mutex); 65 } 66 67 static void lock_chunks(struct btrfs_root *root) 68 { 69 mutex_lock(&root->fs_info->chunk_mutex); 70 } 71 72 static void unlock_chunks(struct btrfs_root *root) 73 { 74 mutex_unlock(&root->fs_info->chunk_mutex); 75 } 76 77 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 78 { 79 struct btrfs_device *device; 80 WARN_ON(fs_devices->opened); 81 while (!list_empty(&fs_devices->devices)) { 82 device = list_entry(fs_devices->devices.next, 83 struct btrfs_device, dev_list); 84 list_del(&device->dev_list); 85 kfree(device->name); 86 kfree(device); 87 } 88 kfree(fs_devices); 89 } 90 91 int btrfs_cleanup_fs_uuids(void) 92 { 93 struct btrfs_fs_devices *fs_devices; 94 95 while (!list_empty(&fs_uuids)) { 96 fs_devices = list_entry(fs_uuids.next, 97 struct btrfs_fs_devices, list); 98 list_del(&fs_devices->list); 99 free_fs_devices(fs_devices); 100 } 101 return 0; 102 } 103 104 static noinline struct btrfs_device *__find_device(struct list_head *head, 105 u64 devid, u8 *uuid) 106 { 107 struct btrfs_device *dev; 108 109 list_for_each_entry(dev, head, dev_list) { 110 if (dev->devid == devid && 111 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 112 return dev; 113 } 114 } 115 return NULL; 116 } 117 118 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 119 { 120 struct btrfs_fs_devices *fs_devices; 121 122 list_for_each_entry(fs_devices, &fs_uuids, list) { 123 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 124 return fs_devices; 125 } 126 return NULL; 127 } 128 129 static void requeue_list(struct btrfs_pending_bios *pending_bios, 130 struct bio *head, struct bio *tail) 131 { 132 133 struct bio *old_head; 134 135 old_head = pending_bios->head; 136 pending_bios->head = head; 137 if (pending_bios->tail) 138 tail->bi_next = old_head; 139 else 140 pending_bios->tail = tail; 141 } 142 143 /* 144 * we try to collect pending bios for a device so we don't get a large 145 * number of procs sending bios down to the same device. This greatly 146 * improves the schedulers ability to collect and merge the bios. 147 * 148 * But, it also turns into a long list of bios to process and that is sure 149 * to eventually make the worker thread block. The solution here is to 150 * make some progress and then put this work struct back at the end of 151 * the list if the block device is congested. This way, multiple devices 152 * can make progress from a single worker thread. 153 */ 154 static noinline int run_scheduled_bios(struct btrfs_device *device) 155 { 156 struct bio *pending; 157 struct backing_dev_info *bdi; 158 struct btrfs_fs_info *fs_info; 159 struct btrfs_pending_bios *pending_bios; 160 struct bio *tail; 161 struct bio *cur; 162 int again = 0; 163 unsigned long num_run; 164 unsigned long num_sync_run; 165 unsigned long batch_run = 0; 166 unsigned long limit; 167 unsigned long last_waited = 0; 168 int force_reg = 0; 169 170 bdi = blk_get_backing_dev_info(device->bdev); 171 fs_info = device->dev_root->fs_info; 172 limit = btrfs_async_submit_limit(fs_info); 173 limit = limit * 2 / 3; 174 175 /* we want to make sure that every time we switch from the sync 176 * list to the normal list, we unplug 177 */ 178 num_sync_run = 0; 179 180 loop: 181 spin_lock(&device->io_lock); 182 183 loop_lock: 184 num_run = 0; 185 186 /* take all the bios off the list at once and process them 187 * later on (without the lock held). But, remember the 188 * tail and other pointers so the bios can be properly reinserted 189 * into the list if we hit congestion 190 */ 191 if (!force_reg && device->pending_sync_bios.head) { 192 pending_bios = &device->pending_sync_bios; 193 force_reg = 1; 194 } else { 195 pending_bios = &device->pending_bios; 196 force_reg = 0; 197 } 198 199 pending = pending_bios->head; 200 tail = pending_bios->tail; 201 WARN_ON(pending && !tail); 202 203 /* 204 * if pending was null this time around, no bios need processing 205 * at all and we can stop. Otherwise it'll loop back up again 206 * and do an additional check so no bios are missed. 207 * 208 * device->running_pending is used to synchronize with the 209 * schedule_bio code. 210 */ 211 if (device->pending_sync_bios.head == NULL && 212 device->pending_bios.head == NULL) { 213 again = 0; 214 device->running_pending = 0; 215 } else { 216 again = 1; 217 device->running_pending = 1; 218 } 219 220 pending_bios->head = NULL; 221 pending_bios->tail = NULL; 222 223 spin_unlock(&device->io_lock); 224 225 /* 226 * if we're doing the regular priority list, make sure we unplug 227 * for any high prio bios we've sent down 228 */ 229 if (pending_bios == &device->pending_bios && num_sync_run > 0) { 230 num_sync_run = 0; 231 blk_run_backing_dev(bdi, NULL); 232 } 233 234 while (pending) { 235 236 rmb(); 237 /* we want to work on both lists, but do more bios on the 238 * sync list than the regular list 239 */ 240 if ((num_run > 32 && 241 pending_bios != &device->pending_sync_bios && 242 device->pending_sync_bios.head) || 243 (num_run > 64 && pending_bios == &device->pending_sync_bios && 244 device->pending_bios.head)) { 245 spin_lock(&device->io_lock); 246 requeue_list(pending_bios, pending, tail); 247 goto loop_lock; 248 } 249 250 cur = pending; 251 pending = pending->bi_next; 252 cur->bi_next = NULL; 253 atomic_dec(&fs_info->nr_async_bios); 254 255 if (atomic_read(&fs_info->nr_async_bios) < limit && 256 waitqueue_active(&fs_info->async_submit_wait)) 257 wake_up(&fs_info->async_submit_wait); 258 259 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 260 261 if (cur->bi_rw & REQ_SYNC) 262 num_sync_run++; 263 264 submit_bio(cur->bi_rw, cur); 265 num_run++; 266 batch_run++; 267 if (need_resched()) { 268 if (num_sync_run) { 269 blk_run_backing_dev(bdi, NULL); 270 num_sync_run = 0; 271 } 272 cond_resched(); 273 } 274 275 /* 276 * we made progress, there is more work to do and the bdi 277 * is now congested. Back off and let other work structs 278 * run instead 279 */ 280 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 281 fs_info->fs_devices->open_devices > 1) { 282 struct io_context *ioc; 283 284 ioc = current->io_context; 285 286 /* 287 * the main goal here is that we don't want to 288 * block if we're going to be able to submit 289 * more requests without blocking. 290 * 291 * This code does two great things, it pokes into 292 * the elevator code from a filesystem _and_ 293 * it makes assumptions about how batching works. 294 */ 295 if (ioc && ioc->nr_batch_requests > 0 && 296 time_before(jiffies, ioc->last_waited + HZ/50UL) && 297 (last_waited == 0 || 298 ioc->last_waited == last_waited)) { 299 /* 300 * we want to go through our batch of 301 * requests and stop. So, we copy out 302 * the ioc->last_waited time and test 303 * against it before looping 304 */ 305 last_waited = ioc->last_waited; 306 if (need_resched()) { 307 if (num_sync_run) { 308 blk_run_backing_dev(bdi, NULL); 309 num_sync_run = 0; 310 } 311 cond_resched(); 312 } 313 continue; 314 } 315 spin_lock(&device->io_lock); 316 requeue_list(pending_bios, pending, tail); 317 device->running_pending = 1; 318 319 spin_unlock(&device->io_lock); 320 btrfs_requeue_work(&device->work); 321 goto done; 322 } 323 } 324 325 if (num_sync_run) { 326 num_sync_run = 0; 327 blk_run_backing_dev(bdi, NULL); 328 } 329 /* 330 * IO has already been through a long path to get here. Checksumming, 331 * async helper threads, perhaps compression. We've done a pretty 332 * good job of collecting a batch of IO and should just unplug 333 * the device right away. 334 * 335 * This will help anyone who is waiting on the IO, they might have 336 * already unplugged, but managed to do so before the bio they 337 * cared about found its way down here. 338 */ 339 blk_run_backing_dev(bdi, NULL); 340 341 cond_resched(); 342 if (again) 343 goto loop; 344 345 spin_lock(&device->io_lock); 346 if (device->pending_bios.head || device->pending_sync_bios.head) 347 goto loop_lock; 348 spin_unlock(&device->io_lock); 349 350 done: 351 return 0; 352 } 353 354 static void pending_bios_fn(struct btrfs_work *work) 355 { 356 struct btrfs_device *device; 357 358 device = container_of(work, struct btrfs_device, work); 359 run_scheduled_bios(device); 360 } 361 362 static noinline int device_list_add(const char *path, 363 struct btrfs_super_block *disk_super, 364 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 365 { 366 struct btrfs_device *device; 367 struct btrfs_fs_devices *fs_devices; 368 u64 found_transid = btrfs_super_generation(disk_super); 369 char *name; 370 371 fs_devices = find_fsid(disk_super->fsid); 372 if (!fs_devices) { 373 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 374 if (!fs_devices) 375 return -ENOMEM; 376 INIT_LIST_HEAD(&fs_devices->devices); 377 INIT_LIST_HEAD(&fs_devices->alloc_list); 378 list_add(&fs_devices->list, &fs_uuids); 379 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 380 fs_devices->latest_devid = devid; 381 fs_devices->latest_trans = found_transid; 382 mutex_init(&fs_devices->device_list_mutex); 383 device = NULL; 384 } else { 385 device = __find_device(&fs_devices->devices, devid, 386 disk_super->dev_item.uuid); 387 } 388 if (!device) { 389 if (fs_devices->opened) 390 return -EBUSY; 391 392 device = kzalloc(sizeof(*device), GFP_NOFS); 393 if (!device) { 394 /* we can safely leave the fs_devices entry around */ 395 return -ENOMEM; 396 } 397 device->devid = devid; 398 device->work.func = pending_bios_fn; 399 memcpy(device->uuid, disk_super->dev_item.uuid, 400 BTRFS_UUID_SIZE); 401 spin_lock_init(&device->io_lock); 402 device->name = kstrdup(path, GFP_NOFS); 403 if (!device->name) { 404 kfree(device); 405 return -ENOMEM; 406 } 407 INIT_LIST_HEAD(&device->dev_alloc_list); 408 409 mutex_lock(&fs_devices->device_list_mutex); 410 list_add(&device->dev_list, &fs_devices->devices); 411 mutex_unlock(&fs_devices->device_list_mutex); 412 413 device->fs_devices = fs_devices; 414 fs_devices->num_devices++; 415 } else if (!device->name || strcmp(device->name, path)) { 416 name = kstrdup(path, GFP_NOFS); 417 if (!name) 418 return -ENOMEM; 419 kfree(device->name); 420 device->name = name; 421 if (device->missing) { 422 fs_devices->missing_devices--; 423 device->missing = 0; 424 } 425 } 426 427 if (found_transid > fs_devices->latest_trans) { 428 fs_devices->latest_devid = devid; 429 fs_devices->latest_trans = found_transid; 430 } 431 *fs_devices_ret = fs_devices; 432 return 0; 433 } 434 435 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 436 { 437 struct btrfs_fs_devices *fs_devices; 438 struct btrfs_device *device; 439 struct btrfs_device *orig_dev; 440 441 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 442 if (!fs_devices) 443 return ERR_PTR(-ENOMEM); 444 445 INIT_LIST_HEAD(&fs_devices->devices); 446 INIT_LIST_HEAD(&fs_devices->alloc_list); 447 INIT_LIST_HEAD(&fs_devices->list); 448 mutex_init(&fs_devices->device_list_mutex); 449 fs_devices->latest_devid = orig->latest_devid; 450 fs_devices->latest_trans = orig->latest_trans; 451 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); 452 453 mutex_lock(&orig->device_list_mutex); 454 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 455 device = kzalloc(sizeof(*device), GFP_NOFS); 456 if (!device) 457 goto error; 458 459 device->name = kstrdup(orig_dev->name, GFP_NOFS); 460 if (!device->name) { 461 kfree(device); 462 goto error; 463 } 464 465 device->devid = orig_dev->devid; 466 device->work.func = pending_bios_fn; 467 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); 468 spin_lock_init(&device->io_lock); 469 INIT_LIST_HEAD(&device->dev_list); 470 INIT_LIST_HEAD(&device->dev_alloc_list); 471 472 list_add(&device->dev_list, &fs_devices->devices); 473 device->fs_devices = fs_devices; 474 fs_devices->num_devices++; 475 } 476 mutex_unlock(&orig->device_list_mutex); 477 return fs_devices; 478 error: 479 mutex_unlock(&orig->device_list_mutex); 480 free_fs_devices(fs_devices); 481 return ERR_PTR(-ENOMEM); 482 } 483 484 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 485 { 486 struct btrfs_device *device, *next; 487 488 mutex_lock(&uuid_mutex); 489 again: 490 mutex_lock(&fs_devices->device_list_mutex); 491 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 492 if (device->in_fs_metadata) 493 continue; 494 495 if (device->bdev) { 496 close_bdev_exclusive(device->bdev, device->mode); 497 device->bdev = NULL; 498 fs_devices->open_devices--; 499 } 500 if (device->writeable) { 501 list_del_init(&device->dev_alloc_list); 502 device->writeable = 0; 503 fs_devices->rw_devices--; 504 } 505 list_del_init(&device->dev_list); 506 fs_devices->num_devices--; 507 kfree(device->name); 508 kfree(device); 509 } 510 mutex_unlock(&fs_devices->device_list_mutex); 511 512 if (fs_devices->seed) { 513 fs_devices = fs_devices->seed; 514 goto again; 515 } 516 517 mutex_unlock(&uuid_mutex); 518 return 0; 519 } 520 521 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 522 { 523 struct btrfs_device *device; 524 525 if (--fs_devices->opened > 0) 526 return 0; 527 528 list_for_each_entry(device, &fs_devices->devices, dev_list) { 529 if (device->bdev) { 530 close_bdev_exclusive(device->bdev, device->mode); 531 fs_devices->open_devices--; 532 } 533 if (device->writeable) { 534 list_del_init(&device->dev_alloc_list); 535 fs_devices->rw_devices--; 536 } 537 538 device->bdev = NULL; 539 device->writeable = 0; 540 device->in_fs_metadata = 0; 541 } 542 WARN_ON(fs_devices->open_devices); 543 WARN_ON(fs_devices->rw_devices); 544 fs_devices->opened = 0; 545 fs_devices->seeding = 0; 546 547 return 0; 548 } 549 550 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 551 { 552 struct btrfs_fs_devices *seed_devices = NULL; 553 int ret; 554 555 mutex_lock(&uuid_mutex); 556 ret = __btrfs_close_devices(fs_devices); 557 if (!fs_devices->opened) { 558 seed_devices = fs_devices->seed; 559 fs_devices->seed = NULL; 560 } 561 mutex_unlock(&uuid_mutex); 562 563 while (seed_devices) { 564 fs_devices = seed_devices; 565 seed_devices = fs_devices->seed; 566 __btrfs_close_devices(fs_devices); 567 free_fs_devices(fs_devices); 568 } 569 return ret; 570 } 571 572 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 573 fmode_t flags, void *holder) 574 { 575 struct block_device *bdev; 576 struct list_head *head = &fs_devices->devices; 577 struct btrfs_device *device; 578 struct block_device *latest_bdev = NULL; 579 struct buffer_head *bh; 580 struct btrfs_super_block *disk_super; 581 u64 latest_devid = 0; 582 u64 latest_transid = 0; 583 u64 devid; 584 int seeding = 1; 585 int ret = 0; 586 587 list_for_each_entry(device, head, dev_list) { 588 if (device->bdev) 589 continue; 590 if (!device->name) 591 continue; 592 593 bdev = open_bdev_exclusive(device->name, flags, holder); 594 if (IS_ERR(bdev)) { 595 printk(KERN_INFO "open %s failed\n", device->name); 596 goto error; 597 } 598 set_blocksize(bdev, 4096); 599 600 bh = btrfs_read_dev_super(bdev); 601 if (!bh) 602 goto error_close; 603 604 disk_super = (struct btrfs_super_block *)bh->b_data; 605 devid = btrfs_stack_device_id(&disk_super->dev_item); 606 if (devid != device->devid) 607 goto error_brelse; 608 609 if (memcmp(device->uuid, disk_super->dev_item.uuid, 610 BTRFS_UUID_SIZE)) 611 goto error_brelse; 612 613 device->generation = btrfs_super_generation(disk_super); 614 if (!latest_transid || device->generation > latest_transid) { 615 latest_devid = devid; 616 latest_transid = device->generation; 617 latest_bdev = bdev; 618 } 619 620 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 621 device->writeable = 0; 622 } else { 623 device->writeable = !bdev_read_only(bdev); 624 seeding = 0; 625 } 626 627 device->bdev = bdev; 628 device->in_fs_metadata = 0; 629 device->mode = flags; 630 631 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 632 fs_devices->rotating = 1; 633 634 fs_devices->open_devices++; 635 if (device->writeable) { 636 fs_devices->rw_devices++; 637 list_add(&device->dev_alloc_list, 638 &fs_devices->alloc_list); 639 } 640 continue; 641 642 error_brelse: 643 brelse(bh); 644 error_close: 645 close_bdev_exclusive(bdev, FMODE_READ); 646 error: 647 continue; 648 } 649 if (fs_devices->open_devices == 0) { 650 ret = -EIO; 651 goto out; 652 } 653 fs_devices->seeding = seeding; 654 fs_devices->opened = 1; 655 fs_devices->latest_bdev = latest_bdev; 656 fs_devices->latest_devid = latest_devid; 657 fs_devices->latest_trans = latest_transid; 658 fs_devices->total_rw_bytes = 0; 659 out: 660 return ret; 661 } 662 663 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 664 fmode_t flags, void *holder) 665 { 666 int ret; 667 668 mutex_lock(&uuid_mutex); 669 if (fs_devices->opened) { 670 fs_devices->opened++; 671 ret = 0; 672 } else { 673 ret = __btrfs_open_devices(fs_devices, flags, holder); 674 } 675 mutex_unlock(&uuid_mutex); 676 return ret; 677 } 678 679 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 680 struct btrfs_fs_devices **fs_devices_ret) 681 { 682 struct btrfs_super_block *disk_super; 683 struct block_device *bdev; 684 struct buffer_head *bh; 685 int ret; 686 u64 devid; 687 u64 transid; 688 689 mutex_lock(&uuid_mutex); 690 691 bdev = open_bdev_exclusive(path, flags, holder); 692 693 if (IS_ERR(bdev)) { 694 ret = PTR_ERR(bdev); 695 goto error; 696 } 697 698 ret = set_blocksize(bdev, 4096); 699 if (ret) 700 goto error_close; 701 bh = btrfs_read_dev_super(bdev); 702 if (!bh) { 703 ret = -EIO; 704 goto error_close; 705 } 706 disk_super = (struct btrfs_super_block *)bh->b_data; 707 devid = btrfs_stack_device_id(&disk_super->dev_item); 708 transid = btrfs_super_generation(disk_super); 709 if (disk_super->label[0]) 710 printk(KERN_INFO "device label %s ", disk_super->label); 711 else { 712 /* FIXME, make a readl uuid parser */ 713 printk(KERN_INFO "device fsid %llx-%llx ", 714 *(unsigned long long *)disk_super->fsid, 715 *(unsigned long long *)(disk_super->fsid + 8)); 716 } 717 printk(KERN_CONT "devid %llu transid %llu %s\n", 718 (unsigned long long)devid, (unsigned long long)transid, path); 719 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 720 721 brelse(bh); 722 error_close: 723 close_bdev_exclusive(bdev, flags); 724 error: 725 mutex_unlock(&uuid_mutex); 726 return ret; 727 } 728 729 /* 730 * this uses a pretty simple search, the expectation is that it is 731 * called very infrequently and that a given device has a small number 732 * of extents 733 */ 734 int find_free_dev_extent(struct btrfs_trans_handle *trans, 735 struct btrfs_device *device, u64 num_bytes, 736 u64 *start, u64 *max_avail) 737 { 738 struct btrfs_key key; 739 struct btrfs_root *root = device->dev_root; 740 struct btrfs_dev_extent *dev_extent = NULL; 741 struct btrfs_path *path; 742 u64 hole_size = 0; 743 u64 last_byte = 0; 744 u64 search_start = 0; 745 u64 search_end = device->total_bytes; 746 int ret; 747 int slot = 0; 748 int start_found; 749 struct extent_buffer *l; 750 751 path = btrfs_alloc_path(); 752 if (!path) 753 return -ENOMEM; 754 path->reada = 2; 755 start_found = 0; 756 757 /* FIXME use last free of some kind */ 758 759 /* we don't want to overwrite the superblock on the drive, 760 * so we make sure to start at an offset of at least 1MB 761 */ 762 search_start = max((u64)1024 * 1024, search_start); 763 764 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes) 765 search_start = max(root->fs_info->alloc_start, search_start); 766 767 key.objectid = device->devid; 768 key.offset = search_start; 769 key.type = BTRFS_DEV_EXTENT_KEY; 770 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 771 if (ret < 0) 772 goto error; 773 if (ret > 0) { 774 ret = btrfs_previous_item(root, path, key.objectid, key.type); 775 if (ret < 0) 776 goto error; 777 if (ret > 0) 778 start_found = 1; 779 } 780 l = path->nodes[0]; 781 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 782 while (1) { 783 l = path->nodes[0]; 784 slot = path->slots[0]; 785 if (slot >= btrfs_header_nritems(l)) { 786 ret = btrfs_next_leaf(root, path); 787 if (ret == 0) 788 continue; 789 if (ret < 0) 790 goto error; 791 no_more_items: 792 if (!start_found) { 793 if (search_start >= search_end) { 794 ret = -ENOSPC; 795 goto error; 796 } 797 *start = search_start; 798 start_found = 1; 799 goto check_pending; 800 } 801 *start = last_byte > search_start ? 802 last_byte : search_start; 803 if (search_end <= *start) { 804 ret = -ENOSPC; 805 goto error; 806 } 807 goto check_pending; 808 } 809 btrfs_item_key_to_cpu(l, &key, slot); 810 811 if (key.objectid < device->devid) 812 goto next; 813 814 if (key.objectid > device->devid) 815 goto no_more_items; 816 817 if (key.offset >= search_start && key.offset > last_byte && 818 start_found) { 819 if (last_byte < search_start) 820 last_byte = search_start; 821 hole_size = key.offset - last_byte; 822 823 if (hole_size > *max_avail) 824 *max_avail = hole_size; 825 826 if (key.offset > last_byte && 827 hole_size >= num_bytes) { 828 *start = last_byte; 829 goto check_pending; 830 } 831 } 832 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 833 goto next; 834 835 start_found = 1; 836 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 837 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent); 838 next: 839 path->slots[0]++; 840 cond_resched(); 841 } 842 check_pending: 843 /* we have to make sure we didn't find an extent that has already 844 * been allocated by the map tree or the original allocation 845 */ 846 BUG_ON(*start < search_start); 847 848 if (*start + num_bytes > search_end) { 849 ret = -ENOSPC; 850 goto error; 851 } 852 /* check for pending inserts here */ 853 ret = 0; 854 855 error: 856 btrfs_free_path(path); 857 return ret; 858 } 859 860 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 861 struct btrfs_device *device, 862 u64 start) 863 { 864 int ret; 865 struct btrfs_path *path; 866 struct btrfs_root *root = device->dev_root; 867 struct btrfs_key key; 868 struct btrfs_key found_key; 869 struct extent_buffer *leaf = NULL; 870 struct btrfs_dev_extent *extent = NULL; 871 872 path = btrfs_alloc_path(); 873 if (!path) 874 return -ENOMEM; 875 876 key.objectid = device->devid; 877 key.offset = start; 878 key.type = BTRFS_DEV_EXTENT_KEY; 879 880 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 881 if (ret > 0) { 882 ret = btrfs_previous_item(root, path, key.objectid, 883 BTRFS_DEV_EXTENT_KEY); 884 BUG_ON(ret); 885 leaf = path->nodes[0]; 886 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 887 extent = btrfs_item_ptr(leaf, path->slots[0], 888 struct btrfs_dev_extent); 889 BUG_ON(found_key.offset > start || found_key.offset + 890 btrfs_dev_extent_length(leaf, extent) < start); 891 ret = 0; 892 } else if (ret == 0) { 893 leaf = path->nodes[0]; 894 extent = btrfs_item_ptr(leaf, path->slots[0], 895 struct btrfs_dev_extent); 896 } 897 BUG_ON(ret); 898 899 if (device->bytes_used > 0) 900 device->bytes_used -= btrfs_dev_extent_length(leaf, extent); 901 ret = btrfs_del_item(trans, root, path); 902 BUG_ON(ret); 903 904 btrfs_free_path(path); 905 return ret; 906 } 907 908 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 909 struct btrfs_device *device, 910 u64 chunk_tree, u64 chunk_objectid, 911 u64 chunk_offset, u64 start, u64 num_bytes) 912 { 913 int ret; 914 struct btrfs_path *path; 915 struct btrfs_root *root = device->dev_root; 916 struct btrfs_dev_extent *extent; 917 struct extent_buffer *leaf; 918 struct btrfs_key key; 919 920 WARN_ON(!device->in_fs_metadata); 921 path = btrfs_alloc_path(); 922 if (!path) 923 return -ENOMEM; 924 925 key.objectid = device->devid; 926 key.offset = start; 927 key.type = BTRFS_DEV_EXTENT_KEY; 928 ret = btrfs_insert_empty_item(trans, root, path, &key, 929 sizeof(*extent)); 930 BUG_ON(ret); 931 932 leaf = path->nodes[0]; 933 extent = btrfs_item_ptr(leaf, path->slots[0], 934 struct btrfs_dev_extent); 935 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 936 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 937 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 938 939 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 940 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent), 941 BTRFS_UUID_SIZE); 942 943 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 944 btrfs_mark_buffer_dirty(leaf); 945 btrfs_free_path(path); 946 return ret; 947 } 948 949 static noinline int find_next_chunk(struct btrfs_root *root, 950 u64 objectid, u64 *offset) 951 { 952 struct btrfs_path *path; 953 int ret; 954 struct btrfs_key key; 955 struct btrfs_chunk *chunk; 956 struct btrfs_key found_key; 957 958 path = btrfs_alloc_path(); 959 BUG_ON(!path); 960 961 key.objectid = objectid; 962 key.offset = (u64)-1; 963 key.type = BTRFS_CHUNK_ITEM_KEY; 964 965 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 966 if (ret < 0) 967 goto error; 968 969 BUG_ON(ret == 0); 970 971 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); 972 if (ret) { 973 *offset = 0; 974 } else { 975 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 976 path->slots[0]); 977 if (found_key.objectid != objectid) 978 *offset = 0; 979 else { 980 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0], 981 struct btrfs_chunk); 982 *offset = found_key.offset + 983 btrfs_chunk_length(path->nodes[0], chunk); 984 } 985 } 986 ret = 0; 987 error: 988 btrfs_free_path(path); 989 return ret; 990 } 991 992 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid) 993 { 994 int ret; 995 struct btrfs_key key; 996 struct btrfs_key found_key; 997 struct btrfs_path *path; 998 999 root = root->fs_info->chunk_root; 1000 1001 path = btrfs_alloc_path(); 1002 if (!path) 1003 return -ENOMEM; 1004 1005 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1006 key.type = BTRFS_DEV_ITEM_KEY; 1007 key.offset = (u64)-1; 1008 1009 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1010 if (ret < 0) 1011 goto error; 1012 1013 BUG_ON(ret == 0); 1014 1015 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, 1016 BTRFS_DEV_ITEM_KEY); 1017 if (ret) { 1018 *objectid = 1; 1019 } else { 1020 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1021 path->slots[0]); 1022 *objectid = found_key.offset + 1; 1023 } 1024 ret = 0; 1025 error: 1026 btrfs_free_path(path); 1027 return ret; 1028 } 1029 1030 /* 1031 * the device information is stored in the chunk root 1032 * the btrfs_device struct should be fully filled in 1033 */ 1034 int btrfs_add_device(struct btrfs_trans_handle *trans, 1035 struct btrfs_root *root, 1036 struct btrfs_device *device) 1037 { 1038 int ret; 1039 struct btrfs_path *path; 1040 struct btrfs_dev_item *dev_item; 1041 struct extent_buffer *leaf; 1042 struct btrfs_key key; 1043 unsigned long ptr; 1044 1045 root = root->fs_info->chunk_root; 1046 1047 path = btrfs_alloc_path(); 1048 if (!path) 1049 return -ENOMEM; 1050 1051 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1052 key.type = BTRFS_DEV_ITEM_KEY; 1053 key.offset = device->devid; 1054 1055 ret = btrfs_insert_empty_item(trans, root, path, &key, 1056 sizeof(*dev_item)); 1057 if (ret) 1058 goto out; 1059 1060 leaf = path->nodes[0]; 1061 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1062 1063 btrfs_set_device_id(leaf, dev_item, device->devid); 1064 btrfs_set_device_generation(leaf, dev_item, 0); 1065 btrfs_set_device_type(leaf, dev_item, device->type); 1066 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1067 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1068 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1069 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 1070 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1071 btrfs_set_device_group(leaf, dev_item, 0); 1072 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1073 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1074 btrfs_set_device_start_offset(leaf, dev_item, 0); 1075 1076 ptr = (unsigned long)btrfs_device_uuid(dev_item); 1077 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1078 ptr = (unsigned long)btrfs_device_fsid(dev_item); 1079 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1080 btrfs_mark_buffer_dirty(leaf); 1081 1082 ret = 0; 1083 out: 1084 btrfs_free_path(path); 1085 return ret; 1086 } 1087 1088 static int btrfs_rm_dev_item(struct btrfs_root *root, 1089 struct btrfs_device *device) 1090 { 1091 int ret; 1092 struct btrfs_path *path; 1093 struct btrfs_key key; 1094 struct btrfs_trans_handle *trans; 1095 1096 root = root->fs_info->chunk_root; 1097 1098 path = btrfs_alloc_path(); 1099 if (!path) 1100 return -ENOMEM; 1101 1102 trans = btrfs_start_transaction(root, 0); 1103 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1104 key.type = BTRFS_DEV_ITEM_KEY; 1105 key.offset = device->devid; 1106 lock_chunks(root); 1107 1108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1109 if (ret < 0) 1110 goto out; 1111 1112 if (ret > 0) { 1113 ret = -ENOENT; 1114 goto out; 1115 } 1116 1117 ret = btrfs_del_item(trans, root, path); 1118 if (ret) 1119 goto out; 1120 out: 1121 btrfs_free_path(path); 1122 unlock_chunks(root); 1123 btrfs_commit_transaction(trans, root); 1124 return ret; 1125 } 1126 1127 int btrfs_rm_device(struct btrfs_root *root, char *device_path) 1128 { 1129 struct btrfs_device *device; 1130 struct btrfs_device *next_device; 1131 struct block_device *bdev; 1132 struct buffer_head *bh = NULL; 1133 struct btrfs_super_block *disk_super; 1134 u64 all_avail; 1135 u64 devid; 1136 u64 num_devices; 1137 u8 *dev_uuid; 1138 int ret = 0; 1139 1140 mutex_lock(&uuid_mutex); 1141 mutex_lock(&root->fs_info->volume_mutex); 1142 1143 all_avail = root->fs_info->avail_data_alloc_bits | 1144 root->fs_info->avail_system_alloc_bits | 1145 root->fs_info->avail_metadata_alloc_bits; 1146 1147 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && 1148 root->fs_info->fs_devices->num_devices <= 4) { 1149 printk(KERN_ERR "btrfs: unable to go below four devices " 1150 "on raid10\n"); 1151 ret = -EINVAL; 1152 goto out; 1153 } 1154 1155 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && 1156 root->fs_info->fs_devices->num_devices <= 2) { 1157 printk(KERN_ERR "btrfs: unable to go below two " 1158 "devices on raid1\n"); 1159 ret = -EINVAL; 1160 goto out; 1161 } 1162 1163 if (strcmp(device_path, "missing") == 0) { 1164 struct list_head *devices; 1165 struct btrfs_device *tmp; 1166 1167 device = NULL; 1168 devices = &root->fs_info->fs_devices->devices; 1169 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1170 list_for_each_entry(tmp, devices, dev_list) { 1171 if (tmp->in_fs_metadata && !tmp->bdev) { 1172 device = tmp; 1173 break; 1174 } 1175 } 1176 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1177 bdev = NULL; 1178 bh = NULL; 1179 disk_super = NULL; 1180 if (!device) { 1181 printk(KERN_ERR "btrfs: no missing devices found to " 1182 "remove\n"); 1183 goto out; 1184 } 1185 } else { 1186 bdev = open_bdev_exclusive(device_path, FMODE_READ, 1187 root->fs_info->bdev_holder); 1188 if (IS_ERR(bdev)) { 1189 ret = PTR_ERR(bdev); 1190 goto out; 1191 } 1192 1193 set_blocksize(bdev, 4096); 1194 bh = btrfs_read_dev_super(bdev); 1195 if (!bh) { 1196 ret = -EIO; 1197 goto error_close; 1198 } 1199 disk_super = (struct btrfs_super_block *)bh->b_data; 1200 devid = btrfs_stack_device_id(&disk_super->dev_item); 1201 dev_uuid = disk_super->dev_item.uuid; 1202 device = btrfs_find_device(root, devid, dev_uuid, 1203 disk_super->fsid); 1204 if (!device) { 1205 ret = -ENOENT; 1206 goto error_brelse; 1207 } 1208 } 1209 1210 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1211 printk(KERN_ERR "btrfs: unable to remove the only writeable " 1212 "device\n"); 1213 ret = -EINVAL; 1214 goto error_brelse; 1215 } 1216 1217 if (device->writeable) { 1218 list_del_init(&device->dev_alloc_list); 1219 root->fs_info->fs_devices->rw_devices--; 1220 } 1221 1222 ret = btrfs_shrink_device(device, 0); 1223 if (ret) 1224 goto error_brelse; 1225 1226 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1227 if (ret) 1228 goto error_brelse; 1229 1230 device->in_fs_metadata = 0; 1231 1232 /* 1233 * the device list mutex makes sure that we don't change 1234 * the device list while someone else is writing out all 1235 * the device supers. 1236 */ 1237 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1238 list_del_init(&device->dev_list); 1239 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1240 1241 device->fs_devices->num_devices--; 1242 1243 if (device->missing) 1244 root->fs_info->fs_devices->missing_devices--; 1245 1246 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1247 struct btrfs_device, dev_list); 1248 if (device->bdev == root->fs_info->sb->s_bdev) 1249 root->fs_info->sb->s_bdev = next_device->bdev; 1250 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1251 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1252 1253 if (device->bdev) { 1254 close_bdev_exclusive(device->bdev, device->mode); 1255 device->bdev = NULL; 1256 device->fs_devices->open_devices--; 1257 } 1258 1259 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; 1260 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); 1261 1262 if (device->fs_devices->open_devices == 0) { 1263 struct btrfs_fs_devices *fs_devices; 1264 fs_devices = root->fs_info->fs_devices; 1265 while (fs_devices) { 1266 if (fs_devices->seed == device->fs_devices) 1267 break; 1268 fs_devices = fs_devices->seed; 1269 } 1270 fs_devices->seed = device->fs_devices->seed; 1271 device->fs_devices->seed = NULL; 1272 __btrfs_close_devices(device->fs_devices); 1273 free_fs_devices(device->fs_devices); 1274 } 1275 1276 /* 1277 * at this point, the device is zero sized. We want to 1278 * remove it from the devices list and zero out the old super 1279 */ 1280 if (device->writeable) { 1281 /* make sure this device isn't detected as part of 1282 * the FS anymore 1283 */ 1284 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 1285 set_buffer_dirty(bh); 1286 sync_dirty_buffer(bh); 1287 } 1288 1289 kfree(device->name); 1290 kfree(device); 1291 ret = 0; 1292 1293 error_brelse: 1294 brelse(bh); 1295 error_close: 1296 if (bdev) 1297 close_bdev_exclusive(bdev, FMODE_READ); 1298 out: 1299 mutex_unlock(&root->fs_info->volume_mutex); 1300 mutex_unlock(&uuid_mutex); 1301 return ret; 1302 } 1303 1304 /* 1305 * does all the dirty work required for changing file system's UUID. 1306 */ 1307 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, 1308 struct btrfs_root *root) 1309 { 1310 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 1311 struct btrfs_fs_devices *old_devices; 1312 struct btrfs_fs_devices *seed_devices; 1313 struct btrfs_super_block *disk_super = &root->fs_info->super_copy; 1314 struct btrfs_device *device; 1315 u64 super_flags; 1316 1317 BUG_ON(!mutex_is_locked(&uuid_mutex)); 1318 if (!fs_devices->seeding) 1319 return -EINVAL; 1320 1321 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 1322 if (!seed_devices) 1323 return -ENOMEM; 1324 1325 old_devices = clone_fs_devices(fs_devices); 1326 if (IS_ERR(old_devices)) { 1327 kfree(seed_devices); 1328 return PTR_ERR(old_devices); 1329 } 1330 1331 list_add(&old_devices->list, &fs_uuids); 1332 1333 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 1334 seed_devices->opened = 1; 1335 INIT_LIST_HEAD(&seed_devices->devices); 1336 INIT_LIST_HEAD(&seed_devices->alloc_list); 1337 mutex_init(&seed_devices->device_list_mutex); 1338 list_splice_init(&fs_devices->devices, &seed_devices->devices); 1339 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 1340 list_for_each_entry(device, &seed_devices->devices, dev_list) { 1341 device->fs_devices = seed_devices; 1342 } 1343 1344 fs_devices->seeding = 0; 1345 fs_devices->num_devices = 0; 1346 fs_devices->open_devices = 0; 1347 fs_devices->seed = seed_devices; 1348 1349 generate_random_uuid(fs_devices->fsid); 1350 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1351 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1352 super_flags = btrfs_super_flags(disk_super) & 1353 ~BTRFS_SUPER_FLAG_SEEDING; 1354 btrfs_set_super_flags(disk_super, super_flags); 1355 1356 return 0; 1357 } 1358 1359 /* 1360 * strore the expected generation for seed devices in device items. 1361 */ 1362 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 1363 struct btrfs_root *root) 1364 { 1365 struct btrfs_path *path; 1366 struct extent_buffer *leaf; 1367 struct btrfs_dev_item *dev_item; 1368 struct btrfs_device *device; 1369 struct btrfs_key key; 1370 u8 fs_uuid[BTRFS_UUID_SIZE]; 1371 u8 dev_uuid[BTRFS_UUID_SIZE]; 1372 u64 devid; 1373 int ret; 1374 1375 path = btrfs_alloc_path(); 1376 if (!path) 1377 return -ENOMEM; 1378 1379 root = root->fs_info->chunk_root; 1380 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1381 key.offset = 0; 1382 key.type = BTRFS_DEV_ITEM_KEY; 1383 1384 while (1) { 1385 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1386 if (ret < 0) 1387 goto error; 1388 1389 leaf = path->nodes[0]; 1390 next_slot: 1391 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1392 ret = btrfs_next_leaf(root, path); 1393 if (ret > 0) 1394 break; 1395 if (ret < 0) 1396 goto error; 1397 leaf = path->nodes[0]; 1398 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1399 btrfs_release_path(root, path); 1400 continue; 1401 } 1402 1403 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1404 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 1405 key.type != BTRFS_DEV_ITEM_KEY) 1406 break; 1407 1408 dev_item = btrfs_item_ptr(leaf, path->slots[0], 1409 struct btrfs_dev_item); 1410 devid = btrfs_device_id(leaf, dev_item); 1411 read_extent_buffer(leaf, dev_uuid, 1412 (unsigned long)btrfs_device_uuid(dev_item), 1413 BTRFS_UUID_SIZE); 1414 read_extent_buffer(leaf, fs_uuid, 1415 (unsigned long)btrfs_device_fsid(dev_item), 1416 BTRFS_UUID_SIZE); 1417 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 1418 BUG_ON(!device); 1419 1420 if (device->fs_devices->seeding) { 1421 btrfs_set_device_generation(leaf, dev_item, 1422 device->generation); 1423 btrfs_mark_buffer_dirty(leaf); 1424 } 1425 1426 path->slots[0]++; 1427 goto next_slot; 1428 } 1429 ret = 0; 1430 error: 1431 btrfs_free_path(path); 1432 return ret; 1433 } 1434 1435 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 1436 { 1437 struct btrfs_trans_handle *trans; 1438 struct btrfs_device *device; 1439 struct block_device *bdev; 1440 struct list_head *devices; 1441 struct super_block *sb = root->fs_info->sb; 1442 u64 total_bytes; 1443 int seeding_dev = 0; 1444 int ret = 0; 1445 1446 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1447 return -EINVAL; 1448 1449 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); 1450 if (IS_ERR(bdev)) 1451 return PTR_ERR(bdev); 1452 1453 if (root->fs_info->fs_devices->seeding) { 1454 seeding_dev = 1; 1455 down_write(&sb->s_umount); 1456 mutex_lock(&uuid_mutex); 1457 } 1458 1459 filemap_write_and_wait(bdev->bd_inode->i_mapping); 1460 mutex_lock(&root->fs_info->volume_mutex); 1461 1462 devices = &root->fs_info->fs_devices->devices; 1463 /* 1464 * we have the volume lock, so we don't need the extra 1465 * device list mutex while reading the list here. 1466 */ 1467 list_for_each_entry(device, devices, dev_list) { 1468 if (device->bdev == bdev) { 1469 ret = -EEXIST; 1470 goto error; 1471 } 1472 } 1473 1474 device = kzalloc(sizeof(*device), GFP_NOFS); 1475 if (!device) { 1476 /* we can safely leave the fs_devices entry around */ 1477 ret = -ENOMEM; 1478 goto error; 1479 } 1480 1481 device->name = kstrdup(device_path, GFP_NOFS); 1482 if (!device->name) { 1483 kfree(device); 1484 ret = -ENOMEM; 1485 goto error; 1486 } 1487 1488 ret = find_next_devid(root, &device->devid); 1489 if (ret) { 1490 kfree(device); 1491 goto error; 1492 } 1493 1494 trans = btrfs_start_transaction(root, 0); 1495 lock_chunks(root); 1496 1497 device->writeable = 1; 1498 device->work.func = pending_bios_fn; 1499 generate_random_uuid(device->uuid); 1500 spin_lock_init(&device->io_lock); 1501 device->generation = trans->transid; 1502 device->io_width = root->sectorsize; 1503 device->io_align = root->sectorsize; 1504 device->sector_size = root->sectorsize; 1505 device->total_bytes = i_size_read(bdev->bd_inode); 1506 device->disk_total_bytes = device->total_bytes; 1507 device->dev_root = root->fs_info->dev_root; 1508 device->bdev = bdev; 1509 device->in_fs_metadata = 1; 1510 device->mode = 0; 1511 set_blocksize(device->bdev, 4096); 1512 1513 if (seeding_dev) { 1514 sb->s_flags &= ~MS_RDONLY; 1515 ret = btrfs_prepare_sprout(trans, root); 1516 BUG_ON(ret); 1517 } 1518 1519 device->fs_devices = root->fs_info->fs_devices; 1520 1521 /* 1522 * we don't want write_supers to jump in here with our device 1523 * half setup 1524 */ 1525 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1526 list_add(&device->dev_list, &root->fs_info->fs_devices->devices); 1527 list_add(&device->dev_alloc_list, 1528 &root->fs_info->fs_devices->alloc_list); 1529 root->fs_info->fs_devices->num_devices++; 1530 root->fs_info->fs_devices->open_devices++; 1531 root->fs_info->fs_devices->rw_devices++; 1532 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 1533 1534 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 1535 root->fs_info->fs_devices->rotating = 1; 1536 1537 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); 1538 btrfs_set_super_total_bytes(&root->fs_info->super_copy, 1539 total_bytes + device->total_bytes); 1540 1541 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy); 1542 btrfs_set_super_num_devices(&root->fs_info->super_copy, 1543 total_bytes + 1); 1544 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1545 1546 if (seeding_dev) { 1547 ret = init_first_rw_device(trans, root, device); 1548 BUG_ON(ret); 1549 ret = btrfs_finish_sprout(trans, root); 1550 BUG_ON(ret); 1551 } else { 1552 ret = btrfs_add_device(trans, root, device); 1553 } 1554 1555 /* 1556 * we've got more storage, clear any full flags on the space 1557 * infos 1558 */ 1559 btrfs_clear_space_info_full(root->fs_info); 1560 1561 unlock_chunks(root); 1562 btrfs_commit_transaction(trans, root); 1563 1564 if (seeding_dev) { 1565 mutex_unlock(&uuid_mutex); 1566 up_write(&sb->s_umount); 1567 1568 ret = btrfs_relocate_sys_chunks(root); 1569 BUG_ON(ret); 1570 } 1571 out: 1572 mutex_unlock(&root->fs_info->volume_mutex); 1573 return ret; 1574 error: 1575 close_bdev_exclusive(bdev, 0); 1576 if (seeding_dev) { 1577 mutex_unlock(&uuid_mutex); 1578 up_write(&sb->s_umount); 1579 } 1580 goto out; 1581 } 1582 1583 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 1584 struct btrfs_device *device) 1585 { 1586 int ret; 1587 struct btrfs_path *path; 1588 struct btrfs_root *root; 1589 struct btrfs_dev_item *dev_item; 1590 struct extent_buffer *leaf; 1591 struct btrfs_key key; 1592 1593 root = device->dev_root->fs_info->chunk_root; 1594 1595 path = btrfs_alloc_path(); 1596 if (!path) 1597 return -ENOMEM; 1598 1599 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1600 key.type = BTRFS_DEV_ITEM_KEY; 1601 key.offset = device->devid; 1602 1603 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1604 if (ret < 0) 1605 goto out; 1606 1607 if (ret > 0) { 1608 ret = -ENOENT; 1609 goto out; 1610 } 1611 1612 leaf = path->nodes[0]; 1613 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1614 1615 btrfs_set_device_id(leaf, dev_item, device->devid); 1616 btrfs_set_device_type(leaf, dev_item, device->type); 1617 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1618 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1619 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1620 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); 1621 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1622 btrfs_mark_buffer_dirty(leaf); 1623 1624 out: 1625 btrfs_free_path(path); 1626 return ret; 1627 } 1628 1629 static int __btrfs_grow_device(struct btrfs_trans_handle *trans, 1630 struct btrfs_device *device, u64 new_size) 1631 { 1632 struct btrfs_super_block *super_copy = 1633 &device->dev_root->fs_info->super_copy; 1634 u64 old_total = btrfs_super_total_bytes(super_copy); 1635 u64 diff = new_size - device->total_bytes; 1636 1637 if (!device->writeable) 1638 return -EACCES; 1639 if (new_size <= device->total_bytes) 1640 return -EINVAL; 1641 1642 btrfs_set_super_total_bytes(super_copy, old_total + diff); 1643 device->fs_devices->total_rw_bytes += diff; 1644 1645 device->total_bytes = new_size; 1646 device->disk_total_bytes = new_size; 1647 btrfs_clear_space_info_full(device->dev_root->fs_info); 1648 1649 return btrfs_update_device(trans, device); 1650 } 1651 1652 int btrfs_grow_device(struct btrfs_trans_handle *trans, 1653 struct btrfs_device *device, u64 new_size) 1654 { 1655 int ret; 1656 lock_chunks(device->dev_root); 1657 ret = __btrfs_grow_device(trans, device, new_size); 1658 unlock_chunks(device->dev_root); 1659 return ret; 1660 } 1661 1662 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 1663 struct btrfs_root *root, 1664 u64 chunk_tree, u64 chunk_objectid, 1665 u64 chunk_offset) 1666 { 1667 int ret; 1668 struct btrfs_path *path; 1669 struct btrfs_key key; 1670 1671 root = root->fs_info->chunk_root; 1672 path = btrfs_alloc_path(); 1673 if (!path) 1674 return -ENOMEM; 1675 1676 key.objectid = chunk_objectid; 1677 key.offset = chunk_offset; 1678 key.type = BTRFS_CHUNK_ITEM_KEY; 1679 1680 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1681 BUG_ON(ret); 1682 1683 ret = btrfs_del_item(trans, root, path); 1684 BUG_ON(ret); 1685 1686 btrfs_free_path(path); 1687 return 0; 1688 } 1689 1690 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 1691 chunk_offset) 1692 { 1693 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 1694 struct btrfs_disk_key *disk_key; 1695 struct btrfs_chunk *chunk; 1696 u8 *ptr; 1697 int ret = 0; 1698 u32 num_stripes; 1699 u32 array_size; 1700 u32 len = 0; 1701 u32 cur; 1702 struct btrfs_key key; 1703 1704 array_size = btrfs_super_sys_array_size(super_copy); 1705 1706 ptr = super_copy->sys_chunk_array; 1707 cur = 0; 1708 1709 while (cur < array_size) { 1710 disk_key = (struct btrfs_disk_key *)ptr; 1711 btrfs_disk_key_to_cpu(&key, disk_key); 1712 1713 len = sizeof(*disk_key); 1714 1715 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 1716 chunk = (struct btrfs_chunk *)(ptr + len); 1717 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 1718 len += btrfs_chunk_item_size(num_stripes); 1719 } else { 1720 ret = -EIO; 1721 break; 1722 } 1723 if (key.objectid == chunk_objectid && 1724 key.offset == chunk_offset) { 1725 memmove(ptr, ptr + len, array_size - (cur + len)); 1726 array_size -= len; 1727 btrfs_set_super_sys_array_size(super_copy, array_size); 1728 } else { 1729 ptr += len; 1730 cur += len; 1731 } 1732 } 1733 return ret; 1734 } 1735 1736 static int btrfs_relocate_chunk(struct btrfs_root *root, 1737 u64 chunk_tree, u64 chunk_objectid, 1738 u64 chunk_offset) 1739 { 1740 struct extent_map_tree *em_tree; 1741 struct btrfs_root *extent_root; 1742 struct btrfs_trans_handle *trans; 1743 struct extent_map *em; 1744 struct map_lookup *map; 1745 int ret; 1746 int i; 1747 1748 root = root->fs_info->chunk_root; 1749 extent_root = root->fs_info->extent_root; 1750 em_tree = &root->fs_info->mapping_tree.map_tree; 1751 1752 ret = btrfs_can_relocate(extent_root, chunk_offset); 1753 if (ret) 1754 return -ENOSPC; 1755 1756 /* step one, relocate all the extents inside this chunk */ 1757 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 1758 if (ret) 1759 return ret; 1760 1761 trans = btrfs_start_transaction(root, 0); 1762 BUG_ON(!trans); 1763 1764 lock_chunks(root); 1765 1766 /* 1767 * step two, delete the device extents and the 1768 * chunk tree entries 1769 */ 1770 read_lock(&em_tree->lock); 1771 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1772 read_unlock(&em_tree->lock); 1773 1774 BUG_ON(em->start > chunk_offset || 1775 em->start + em->len < chunk_offset); 1776 map = (struct map_lookup *)em->bdev; 1777 1778 for (i = 0; i < map->num_stripes; i++) { 1779 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, 1780 map->stripes[i].physical); 1781 BUG_ON(ret); 1782 1783 if (map->stripes[i].dev) { 1784 ret = btrfs_update_device(trans, map->stripes[i].dev); 1785 BUG_ON(ret); 1786 } 1787 } 1788 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, 1789 chunk_offset); 1790 1791 BUG_ON(ret); 1792 1793 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 1794 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 1795 BUG_ON(ret); 1796 } 1797 1798 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 1799 BUG_ON(ret); 1800 1801 write_lock(&em_tree->lock); 1802 remove_extent_mapping(em_tree, em); 1803 write_unlock(&em_tree->lock); 1804 1805 kfree(map); 1806 em->bdev = NULL; 1807 1808 /* once for the tree */ 1809 free_extent_map(em); 1810 /* once for us */ 1811 free_extent_map(em); 1812 1813 unlock_chunks(root); 1814 btrfs_end_transaction(trans, root); 1815 return 0; 1816 } 1817 1818 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 1819 { 1820 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 1821 struct btrfs_path *path; 1822 struct extent_buffer *leaf; 1823 struct btrfs_chunk *chunk; 1824 struct btrfs_key key; 1825 struct btrfs_key found_key; 1826 u64 chunk_tree = chunk_root->root_key.objectid; 1827 u64 chunk_type; 1828 bool retried = false; 1829 int failed = 0; 1830 int ret; 1831 1832 path = btrfs_alloc_path(); 1833 if (!path) 1834 return -ENOMEM; 1835 1836 again: 1837 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 1838 key.offset = (u64)-1; 1839 key.type = BTRFS_CHUNK_ITEM_KEY; 1840 1841 while (1) { 1842 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 1843 if (ret < 0) 1844 goto error; 1845 BUG_ON(ret == 0); 1846 1847 ret = btrfs_previous_item(chunk_root, path, key.objectid, 1848 key.type); 1849 if (ret < 0) 1850 goto error; 1851 if (ret > 0) 1852 break; 1853 1854 leaf = path->nodes[0]; 1855 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1856 1857 chunk = btrfs_item_ptr(leaf, path->slots[0], 1858 struct btrfs_chunk); 1859 chunk_type = btrfs_chunk_type(leaf, chunk); 1860 btrfs_release_path(chunk_root, path); 1861 1862 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 1863 ret = btrfs_relocate_chunk(chunk_root, chunk_tree, 1864 found_key.objectid, 1865 found_key.offset); 1866 if (ret == -ENOSPC) 1867 failed++; 1868 else if (ret) 1869 BUG(); 1870 } 1871 1872 if (found_key.offset == 0) 1873 break; 1874 key.offset = found_key.offset - 1; 1875 } 1876 ret = 0; 1877 if (failed && !retried) { 1878 failed = 0; 1879 retried = true; 1880 goto again; 1881 } else if (failed && retried) { 1882 WARN_ON(1); 1883 ret = -ENOSPC; 1884 } 1885 error: 1886 btrfs_free_path(path); 1887 return ret; 1888 } 1889 1890 static u64 div_factor(u64 num, int factor) 1891 { 1892 if (factor == 10) 1893 return num; 1894 num *= factor; 1895 do_div(num, 10); 1896 return num; 1897 } 1898 1899 int btrfs_balance(struct btrfs_root *dev_root) 1900 { 1901 int ret; 1902 struct list_head *devices = &dev_root->fs_info->fs_devices->devices; 1903 struct btrfs_device *device; 1904 u64 old_size; 1905 u64 size_to_free; 1906 struct btrfs_path *path; 1907 struct btrfs_key key; 1908 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; 1909 struct btrfs_trans_handle *trans; 1910 struct btrfs_key found_key; 1911 1912 if (dev_root->fs_info->sb->s_flags & MS_RDONLY) 1913 return -EROFS; 1914 1915 mutex_lock(&dev_root->fs_info->volume_mutex); 1916 dev_root = dev_root->fs_info->dev_root; 1917 1918 /* step one make some room on all the devices */ 1919 list_for_each_entry(device, devices, dev_list) { 1920 old_size = device->total_bytes; 1921 size_to_free = div_factor(old_size, 1); 1922 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 1923 if (!device->writeable || 1924 device->total_bytes - device->bytes_used > size_to_free) 1925 continue; 1926 1927 ret = btrfs_shrink_device(device, old_size - size_to_free); 1928 if (ret == -ENOSPC) 1929 break; 1930 BUG_ON(ret); 1931 1932 trans = btrfs_start_transaction(dev_root, 0); 1933 BUG_ON(!trans); 1934 1935 ret = btrfs_grow_device(trans, device, old_size); 1936 BUG_ON(ret); 1937 1938 btrfs_end_transaction(trans, dev_root); 1939 } 1940 1941 /* step two, relocate all the chunks */ 1942 path = btrfs_alloc_path(); 1943 BUG_ON(!path); 1944 1945 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 1946 key.offset = (u64)-1; 1947 key.type = BTRFS_CHUNK_ITEM_KEY; 1948 1949 while (1) { 1950 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 1951 if (ret < 0) 1952 goto error; 1953 1954 /* 1955 * this shouldn't happen, it means the last relocate 1956 * failed 1957 */ 1958 if (ret == 0) 1959 break; 1960 1961 ret = btrfs_previous_item(chunk_root, path, 0, 1962 BTRFS_CHUNK_ITEM_KEY); 1963 if (ret) 1964 break; 1965 1966 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1967 path->slots[0]); 1968 if (found_key.objectid != key.objectid) 1969 break; 1970 1971 /* chunk zero is special */ 1972 if (found_key.offset == 0) 1973 break; 1974 1975 btrfs_release_path(chunk_root, path); 1976 ret = btrfs_relocate_chunk(chunk_root, 1977 chunk_root->root_key.objectid, 1978 found_key.objectid, 1979 found_key.offset); 1980 BUG_ON(ret && ret != -ENOSPC); 1981 key.offset = found_key.offset - 1; 1982 } 1983 ret = 0; 1984 error: 1985 btrfs_free_path(path); 1986 mutex_unlock(&dev_root->fs_info->volume_mutex); 1987 return ret; 1988 } 1989 1990 /* 1991 * shrinking a device means finding all of the device extents past 1992 * the new size, and then following the back refs to the chunks. 1993 * The chunk relocation code actually frees the device extent 1994 */ 1995 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 1996 { 1997 struct btrfs_trans_handle *trans; 1998 struct btrfs_root *root = device->dev_root; 1999 struct btrfs_dev_extent *dev_extent = NULL; 2000 struct btrfs_path *path; 2001 u64 length; 2002 u64 chunk_tree; 2003 u64 chunk_objectid; 2004 u64 chunk_offset; 2005 int ret; 2006 int slot; 2007 int failed = 0; 2008 bool retried = false; 2009 struct extent_buffer *l; 2010 struct btrfs_key key; 2011 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2012 u64 old_total = btrfs_super_total_bytes(super_copy); 2013 u64 old_size = device->total_bytes; 2014 u64 diff = device->total_bytes - new_size; 2015 2016 if (new_size >= device->total_bytes) 2017 return -EINVAL; 2018 2019 path = btrfs_alloc_path(); 2020 if (!path) 2021 return -ENOMEM; 2022 2023 path->reada = 2; 2024 2025 lock_chunks(root); 2026 2027 device->total_bytes = new_size; 2028 if (device->writeable) 2029 device->fs_devices->total_rw_bytes -= diff; 2030 unlock_chunks(root); 2031 2032 again: 2033 key.objectid = device->devid; 2034 key.offset = (u64)-1; 2035 key.type = BTRFS_DEV_EXTENT_KEY; 2036 2037 while (1) { 2038 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2039 if (ret < 0) 2040 goto done; 2041 2042 ret = btrfs_previous_item(root, path, 0, key.type); 2043 if (ret < 0) 2044 goto done; 2045 if (ret) { 2046 ret = 0; 2047 btrfs_release_path(root, path); 2048 break; 2049 } 2050 2051 l = path->nodes[0]; 2052 slot = path->slots[0]; 2053 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2054 2055 if (key.objectid != device->devid) { 2056 btrfs_release_path(root, path); 2057 break; 2058 } 2059 2060 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2061 length = btrfs_dev_extent_length(l, dev_extent); 2062 2063 if (key.offset + length <= new_size) { 2064 btrfs_release_path(root, path); 2065 break; 2066 } 2067 2068 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2069 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2070 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2071 btrfs_release_path(root, path); 2072 2073 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, 2074 chunk_offset); 2075 if (ret && ret != -ENOSPC) 2076 goto done; 2077 if (ret == -ENOSPC) 2078 failed++; 2079 key.offset -= 1; 2080 } 2081 2082 if (failed && !retried) { 2083 failed = 0; 2084 retried = true; 2085 goto again; 2086 } else if (failed && retried) { 2087 ret = -ENOSPC; 2088 lock_chunks(root); 2089 2090 device->total_bytes = old_size; 2091 if (device->writeable) 2092 device->fs_devices->total_rw_bytes += diff; 2093 unlock_chunks(root); 2094 goto done; 2095 } 2096 2097 /* Shrinking succeeded, else we would be at "done". */ 2098 trans = btrfs_start_transaction(root, 0); 2099 lock_chunks(root); 2100 2101 device->disk_total_bytes = new_size; 2102 /* Now btrfs_update_device() will change the on-disk size. */ 2103 ret = btrfs_update_device(trans, device); 2104 if (ret) { 2105 unlock_chunks(root); 2106 btrfs_end_transaction(trans, root); 2107 goto done; 2108 } 2109 WARN_ON(diff > old_total); 2110 btrfs_set_super_total_bytes(super_copy, old_total - diff); 2111 unlock_chunks(root); 2112 btrfs_end_transaction(trans, root); 2113 done: 2114 btrfs_free_path(path); 2115 return ret; 2116 } 2117 2118 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, 2119 struct btrfs_root *root, 2120 struct btrfs_key *key, 2121 struct btrfs_chunk *chunk, int item_size) 2122 { 2123 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2124 struct btrfs_disk_key disk_key; 2125 u32 array_size; 2126 u8 *ptr; 2127 2128 array_size = btrfs_super_sys_array_size(super_copy); 2129 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 2130 return -EFBIG; 2131 2132 ptr = super_copy->sys_chunk_array + array_size; 2133 btrfs_cpu_key_to_disk(&disk_key, key); 2134 memcpy(ptr, &disk_key, sizeof(disk_key)); 2135 ptr += sizeof(disk_key); 2136 memcpy(ptr, chunk, item_size); 2137 item_size += sizeof(disk_key); 2138 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 2139 return 0; 2140 } 2141 2142 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, 2143 int num_stripes, int sub_stripes) 2144 { 2145 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 2146 return calc_size; 2147 else if (type & BTRFS_BLOCK_GROUP_RAID10) 2148 return calc_size * (num_stripes / sub_stripes); 2149 else 2150 return calc_size * num_stripes; 2151 } 2152 2153 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2154 struct btrfs_root *extent_root, 2155 struct map_lookup **map_ret, 2156 u64 *num_bytes, u64 *stripe_size, 2157 u64 start, u64 type) 2158 { 2159 struct btrfs_fs_info *info = extent_root->fs_info; 2160 struct btrfs_device *device = NULL; 2161 struct btrfs_fs_devices *fs_devices = info->fs_devices; 2162 struct list_head *cur; 2163 struct map_lookup *map = NULL; 2164 struct extent_map_tree *em_tree; 2165 struct extent_map *em; 2166 struct list_head private_devs; 2167 int min_stripe_size = 1 * 1024 * 1024; 2168 u64 calc_size = 1024 * 1024 * 1024; 2169 u64 max_chunk_size = calc_size; 2170 u64 min_free; 2171 u64 avail; 2172 u64 max_avail = 0; 2173 u64 dev_offset; 2174 int num_stripes = 1; 2175 int min_stripes = 1; 2176 int sub_stripes = 0; 2177 int looped = 0; 2178 int ret; 2179 int index; 2180 int stripe_len = 64 * 1024; 2181 2182 if ((type & BTRFS_BLOCK_GROUP_RAID1) && 2183 (type & BTRFS_BLOCK_GROUP_DUP)) { 2184 WARN_ON(1); 2185 type &= ~BTRFS_BLOCK_GROUP_DUP; 2186 } 2187 if (list_empty(&fs_devices->alloc_list)) 2188 return -ENOSPC; 2189 2190 if (type & (BTRFS_BLOCK_GROUP_RAID0)) { 2191 num_stripes = fs_devices->rw_devices; 2192 min_stripes = 2; 2193 } 2194 if (type & (BTRFS_BLOCK_GROUP_DUP)) { 2195 num_stripes = 2; 2196 min_stripes = 2; 2197 } 2198 if (type & (BTRFS_BLOCK_GROUP_RAID1)) { 2199 if (fs_devices->rw_devices < 2) 2200 return -ENOSPC; 2201 num_stripes = 2; 2202 min_stripes = 2; 2203 } 2204 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2205 num_stripes = fs_devices->rw_devices; 2206 if (num_stripes < 4) 2207 return -ENOSPC; 2208 num_stripes &= ~(u32)1; 2209 sub_stripes = 2; 2210 min_stripes = 4; 2211 } 2212 2213 if (type & BTRFS_BLOCK_GROUP_DATA) { 2214 max_chunk_size = 10 * calc_size; 2215 min_stripe_size = 64 * 1024 * 1024; 2216 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 2217 max_chunk_size = 256 * 1024 * 1024; 2218 min_stripe_size = 32 * 1024 * 1024; 2219 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 2220 calc_size = 8 * 1024 * 1024; 2221 max_chunk_size = calc_size * 2; 2222 min_stripe_size = 1 * 1024 * 1024; 2223 } 2224 2225 /* we don't want a chunk larger than 10% of writeable space */ 2226 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 2227 max_chunk_size); 2228 2229 again: 2230 max_avail = 0; 2231 if (!map || map->num_stripes != num_stripes) { 2232 kfree(map); 2233 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 2234 if (!map) 2235 return -ENOMEM; 2236 map->num_stripes = num_stripes; 2237 } 2238 2239 if (calc_size * num_stripes > max_chunk_size) { 2240 calc_size = max_chunk_size; 2241 do_div(calc_size, num_stripes); 2242 do_div(calc_size, stripe_len); 2243 calc_size *= stripe_len; 2244 } 2245 2246 /* we don't want tiny stripes */ 2247 if (!looped) 2248 calc_size = max_t(u64, min_stripe_size, calc_size); 2249 2250 /* 2251 * we're about to do_div by the stripe_len so lets make sure 2252 * we end up with something bigger than a stripe 2253 */ 2254 calc_size = max_t(u64, calc_size, stripe_len * 4); 2255 2256 do_div(calc_size, stripe_len); 2257 calc_size *= stripe_len; 2258 2259 cur = fs_devices->alloc_list.next; 2260 index = 0; 2261 2262 if (type & BTRFS_BLOCK_GROUP_DUP) 2263 min_free = calc_size * 2; 2264 else 2265 min_free = calc_size; 2266 2267 /* 2268 * we add 1MB because we never use the first 1MB of the device, unless 2269 * we've looped, then we are likely allocating the maximum amount of 2270 * space left already 2271 */ 2272 if (!looped) 2273 min_free += 1024 * 1024; 2274 2275 INIT_LIST_HEAD(&private_devs); 2276 while (index < num_stripes) { 2277 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 2278 BUG_ON(!device->writeable); 2279 if (device->total_bytes > device->bytes_used) 2280 avail = device->total_bytes - device->bytes_used; 2281 else 2282 avail = 0; 2283 cur = cur->next; 2284 2285 if (device->in_fs_metadata && avail >= min_free) { 2286 ret = find_free_dev_extent(trans, device, 2287 min_free, &dev_offset, 2288 &max_avail); 2289 if (ret == 0) { 2290 list_move_tail(&device->dev_alloc_list, 2291 &private_devs); 2292 map->stripes[index].dev = device; 2293 map->stripes[index].physical = dev_offset; 2294 index++; 2295 if (type & BTRFS_BLOCK_GROUP_DUP) { 2296 map->stripes[index].dev = device; 2297 map->stripes[index].physical = 2298 dev_offset + calc_size; 2299 index++; 2300 } 2301 } 2302 } else if (device->in_fs_metadata && avail > max_avail) 2303 max_avail = avail; 2304 if (cur == &fs_devices->alloc_list) 2305 break; 2306 } 2307 list_splice(&private_devs, &fs_devices->alloc_list); 2308 if (index < num_stripes) { 2309 if (index >= min_stripes) { 2310 num_stripes = index; 2311 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2312 num_stripes /= sub_stripes; 2313 num_stripes *= sub_stripes; 2314 } 2315 looped = 1; 2316 goto again; 2317 } 2318 if (!looped && max_avail > 0) { 2319 looped = 1; 2320 calc_size = max_avail; 2321 goto again; 2322 } 2323 kfree(map); 2324 return -ENOSPC; 2325 } 2326 map->sector_size = extent_root->sectorsize; 2327 map->stripe_len = stripe_len; 2328 map->io_align = stripe_len; 2329 map->io_width = stripe_len; 2330 map->type = type; 2331 map->num_stripes = num_stripes; 2332 map->sub_stripes = sub_stripes; 2333 2334 *map_ret = map; 2335 *stripe_size = calc_size; 2336 *num_bytes = chunk_bytes_by_type(type, calc_size, 2337 num_stripes, sub_stripes); 2338 2339 em = alloc_extent_map(GFP_NOFS); 2340 if (!em) { 2341 kfree(map); 2342 return -ENOMEM; 2343 } 2344 em->bdev = (struct block_device *)map; 2345 em->start = start; 2346 em->len = *num_bytes; 2347 em->block_start = 0; 2348 em->block_len = em->len; 2349 2350 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 2351 write_lock(&em_tree->lock); 2352 ret = add_extent_mapping(em_tree, em); 2353 write_unlock(&em_tree->lock); 2354 BUG_ON(ret); 2355 free_extent_map(em); 2356 2357 ret = btrfs_make_block_group(trans, extent_root, 0, type, 2358 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2359 start, *num_bytes); 2360 BUG_ON(ret); 2361 2362 index = 0; 2363 while (index < map->num_stripes) { 2364 device = map->stripes[index].dev; 2365 dev_offset = map->stripes[index].physical; 2366 2367 ret = btrfs_alloc_dev_extent(trans, device, 2368 info->chunk_root->root_key.objectid, 2369 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2370 start, dev_offset, calc_size); 2371 BUG_ON(ret); 2372 index++; 2373 } 2374 2375 return 0; 2376 } 2377 2378 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, 2379 struct btrfs_root *extent_root, 2380 struct map_lookup *map, u64 chunk_offset, 2381 u64 chunk_size, u64 stripe_size) 2382 { 2383 u64 dev_offset; 2384 struct btrfs_key key; 2385 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 2386 struct btrfs_device *device; 2387 struct btrfs_chunk *chunk; 2388 struct btrfs_stripe *stripe; 2389 size_t item_size = btrfs_chunk_item_size(map->num_stripes); 2390 int index = 0; 2391 int ret; 2392 2393 chunk = kzalloc(item_size, GFP_NOFS); 2394 if (!chunk) 2395 return -ENOMEM; 2396 2397 index = 0; 2398 while (index < map->num_stripes) { 2399 device = map->stripes[index].dev; 2400 device->bytes_used += stripe_size; 2401 ret = btrfs_update_device(trans, device); 2402 BUG_ON(ret); 2403 index++; 2404 } 2405 2406 index = 0; 2407 stripe = &chunk->stripe; 2408 while (index < map->num_stripes) { 2409 device = map->stripes[index].dev; 2410 dev_offset = map->stripes[index].physical; 2411 2412 btrfs_set_stack_stripe_devid(stripe, device->devid); 2413 btrfs_set_stack_stripe_offset(stripe, dev_offset); 2414 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 2415 stripe++; 2416 index++; 2417 } 2418 2419 btrfs_set_stack_chunk_length(chunk, chunk_size); 2420 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 2421 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 2422 btrfs_set_stack_chunk_type(chunk, map->type); 2423 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 2424 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 2425 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 2426 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 2427 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 2428 2429 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2430 key.type = BTRFS_CHUNK_ITEM_KEY; 2431 key.offset = chunk_offset; 2432 2433 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 2434 BUG_ON(ret); 2435 2436 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2437 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, 2438 item_size); 2439 BUG_ON(ret); 2440 } 2441 kfree(chunk); 2442 return 0; 2443 } 2444 2445 /* 2446 * Chunk allocation falls into two parts. The first part does works 2447 * that make the new allocated chunk useable, but not do any operation 2448 * that modifies the chunk tree. The second part does the works that 2449 * require modifying the chunk tree. This division is important for the 2450 * bootstrap process of adding storage to a seed btrfs. 2451 */ 2452 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2453 struct btrfs_root *extent_root, u64 type) 2454 { 2455 u64 chunk_offset; 2456 u64 chunk_size; 2457 u64 stripe_size; 2458 struct map_lookup *map; 2459 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 2460 int ret; 2461 2462 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2463 &chunk_offset); 2464 if (ret) 2465 return ret; 2466 2467 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 2468 &stripe_size, chunk_offset, type); 2469 if (ret) 2470 return ret; 2471 2472 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 2473 chunk_size, stripe_size); 2474 BUG_ON(ret); 2475 return 0; 2476 } 2477 2478 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 2479 struct btrfs_root *root, 2480 struct btrfs_device *device) 2481 { 2482 u64 chunk_offset; 2483 u64 sys_chunk_offset; 2484 u64 chunk_size; 2485 u64 sys_chunk_size; 2486 u64 stripe_size; 2487 u64 sys_stripe_size; 2488 u64 alloc_profile; 2489 struct map_lookup *map; 2490 struct map_lookup *sys_map; 2491 struct btrfs_fs_info *fs_info = root->fs_info; 2492 struct btrfs_root *extent_root = fs_info->extent_root; 2493 int ret; 2494 2495 ret = find_next_chunk(fs_info->chunk_root, 2496 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); 2497 BUG_ON(ret); 2498 2499 alloc_profile = BTRFS_BLOCK_GROUP_METADATA | 2500 (fs_info->metadata_alloc_profile & 2501 fs_info->avail_metadata_alloc_bits); 2502 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); 2503 2504 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 2505 &stripe_size, chunk_offset, alloc_profile); 2506 BUG_ON(ret); 2507 2508 sys_chunk_offset = chunk_offset + chunk_size; 2509 2510 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | 2511 (fs_info->system_alloc_profile & 2512 fs_info->avail_system_alloc_bits); 2513 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); 2514 2515 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, 2516 &sys_chunk_size, &sys_stripe_size, 2517 sys_chunk_offset, alloc_profile); 2518 BUG_ON(ret); 2519 2520 ret = btrfs_add_device(trans, fs_info->chunk_root, device); 2521 BUG_ON(ret); 2522 2523 /* 2524 * Modifying chunk tree needs allocating new blocks from both 2525 * system block group and metadata block group. So we only can 2526 * do operations require modifying the chunk tree after both 2527 * block groups were created. 2528 */ 2529 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 2530 chunk_size, stripe_size); 2531 BUG_ON(ret); 2532 2533 ret = __finish_chunk_alloc(trans, extent_root, sys_map, 2534 sys_chunk_offset, sys_chunk_size, 2535 sys_stripe_size); 2536 BUG_ON(ret); 2537 return 0; 2538 } 2539 2540 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 2541 { 2542 struct extent_map *em; 2543 struct map_lookup *map; 2544 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 2545 int readonly = 0; 2546 int i; 2547 2548 read_lock(&map_tree->map_tree.lock); 2549 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2550 read_unlock(&map_tree->map_tree.lock); 2551 if (!em) 2552 return 1; 2553 2554 if (btrfs_test_opt(root, DEGRADED)) { 2555 free_extent_map(em); 2556 return 0; 2557 } 2558 2559 map = (struct map_lookup *)em->bdev; 2560 for (i = 0; i < map->num_stripes; i++) { 2561 if (!map->stripes[i].dev->writeable) { 2562 readonly = 1; 2563 break; 2564 } 2565 } 2566 free_extent_map(em); 2567 return readonly; 2568 } 2569 2570 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 2571 { 2572 extent_map_tree_init(&tree->map_tree, GFP_NOFS); 2573 } 2574 2575 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 2576 { 2577 struct extent_map *em; 2578 2579 while (1) { 2580 write_lock(&tree->map_tree.lock); 2581 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2582 if (em) 2583 remove_extent_mapping(&tree->map_tree, em); 2584 write_unlock(&tree->map_tree.lock); 2585 if (!em) 2586 break; 2587 kfree(em->bdev); 2588 /* once for us */ 2589 free_extent_map(em); 2590 /* once for the tree */ 2591 free_extent_map(em); 2592 } 2593 } 2594 2595 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) 2596 { 2597 struct extent_map *em; 2598 struct map_lookup *map; 2599 struct extent_map_tree *em_tree = &map_tree->map_tree; 2600 int ret; 2601 2602 read_lock(&em_tree->lock); 2603 em = lookup_extent_mapping(em_tree, logical, len); 2604 read_unlock(&em_tree->lock); 2605 BUG_ON(!em); 2606 2607 BUG_ON(em->start > logical || em->start + em->len < logical); 2608 map = (struct map_lookup *)em->bdev; 2609 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 2610 ret = map->num_stripes; 2611 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 2612 ret = map->sub_stripes; 2613 else 2614 ret = 1; 2615 free_extent_map(em); 2616 return ret; 2617 } 2618 2619 static int find_live_mirror(struct map_lookup *map, int first, int num, 2620 int optimal) 2621 { 2622 int i; 2623 if (map->stripes[optimal].dev->bdev) 2624 return optimal; 2625 for (i = first; i < first + num; i++) { 2626 if (map->stripes[i].dev->bdev) 2627 return i; 2628 } 2629 /* we couldn't find one that doesn't fail. Just return something 2630 * and the io error handling code will clean up eventually 2631 */ 2632 return optimal; 2633 } 2634 2635 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2636 u64 logical, u64 *length, 2637 struct btrfs_multi_bio **multi_ret, 2638 int mirror_num, struct page *unplug_page) 2639 { 2640 struct extent_map *em; 2641 struct map_lookup *map; 2642 struct extent_map_tree *em_tree = &map_tree->map_tree; 2643 u64 offset; 2644 u64 stripe_offset; 2645 u64 stripe_nr; 2646 int stripes_allocated = 8; 2647 int stripes_required = 1; 2648 int stripe_index; 2649 int i; 2650 int num_stripes; 2651 int max_errors = 0; 2652 struct btrfs_multi_bio *multi = NULL; 2653 2654 if (multi_ret && !(rw & REQ_WRITE)) 2655 stripes_allocated = 1; 2656 again: 2657 if (multi_ret) { 2658 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), 2659 GFP_NOFS); 2660 if (!multi) 2661 return -ENOMEM; 2662 2663 atomic_set(&multi->error, 0); 2664 } 2665 2666 read_lock(&em_tree->lock); 2667 em = lookup_extent_mapping(em_tree, logical, *length); 2668 read_unlock(&em_tree->lock); 2669 2670 if (!em && unplug_page) { 2671 kfree(multi); 2672 return 0; 2673 } 2674 2675 if (!em) { 2676 printk(KERN_CRIT "unable to find logical %llu len %llu\n", 2677 (unsigned long long)logical, 2678 (unsigned long long)*length); 2679 BUG(); 2680 } 2681 2682 BUG_ON(em->start > logical || em->start + em->len < logical); 2683 map = (struct map_lookup *)em->bdev; 2684 offset = logical - em->start; 2685 2686 if (mirror_num > map->num_stripes) 2687 mirror_num = 0; 2688 2689 /* if our multi bio struct is too small, back off and try again */ 2690 if (rw & REQ_WRITE) { 2691 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 2692 BTRFS_BLOCK_GROUP_DUP)) { 2693 stripes_required = map->num_stripes; 2694 max_errors = 1; 2695 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2696 stripes_required = map->sub_stripes; 2697 max_errors = 1; 2698 } 2699 } 2700 if (multi_ret && (rw & REQ_WRITE) && 2701 stripes_allocated < stripes_required) { 2702 stripes_allocated = map->num_stripes; 2703 free_extent_map(em); 2704 kfree(multi); 2705 goto again; 2706 } 2707 stripe_nr = offset; 2708 /* 2709 * stripe_nr counts the total number of stripes we have to stride 2710 * to get to this block 2711 */ 2712 do_div(stripe_nr, map->stripe_len); 2713 2714 stripe_offset = stripe_nr * map->stripe_len; 2715 BUG_ON(offset < stripe_offset); 2716 2717 /* stripe_offset is the offset of this block in its stripe*/ 2718 stripe_offset = offset - stripe_offset; 2719 2720 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 2721 BTRFS_BLOCK_GROUP_RAID10 | 2722 BTRFS_BLOCK_GROUP_DUP)) { 2723 /* we limit the length of each bio to what fits in a stripe */ 2724 *length = min_t(u64, em->len - offset, 2725 map->stripe_len - stripe_offset); 2726 } else { 2727 *length = em->len - offset; 2728 } 2729 2730 if (!multi_ret && !unplug_page) 2731 goto out; 2732 2733 num_stripes = 1; 2734 stripe_index = 0; 2735 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2736 if (unplug_page || (rw & REQ_WRITE)) 2737 num_stripes = map->num_stripes; 2738 else if (mirror_num) 2739 stripe_index = mirror_num - 1; 2740 else { 2741 stripe_index = find_live_mirror(map, 0, 2742 map->num_stripes, 2743 current->pid % map->num_stripes); 2744 } 2745 2746 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2747 if (rw & REQ_WRITE) 2748 num_stripes = map->num_stripes; 2749 else if (mirror_num) 2750 stripe_index = mirror_num - 1; 2751 2752 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2753 int factor = map->num_stripes / map->sub_stripes; 2754 2755 stripe_index = do_div(stripe_nr, factor); 2756 stripe_index *= map->sub_stripes; 2757 2758 if (unplug_page || (rw & REQ_WRITE)) 2759 num_stripes = map->sub_stripes; 2760 else if (mirror_num) 2761 stripe_index += mirror_num - 1; 2762 else { 2763 stripe_index = find_live_mirror(map, stripe_index, 2764 map->sub_stripes, stripe_index + 2765 current->pid % map->sub_stripes); 2766 } 2767 } else { 2768 /* 2769 * after this do_div call, stripe_nr is the number of stripes 2770 * on this device we have to walk to find the data, and 2771 * stripe_index is the number of our device in the stripe array 2772 */ 2773 stripe_index = do_div(stripe_nr, map->num_stripes); 2774 } 2775 BUG_ON(stripe_index >= map->num_stripes); 2776 2777 for (i = 0; i < num_stripes; i++) { 2778 if (unplug_page) { 2779 struct btrfs_device *device; 2780 struct backing_dev_info *bdi; 2781 2782 device = map->stripes[stripe_index].dev; 2783 if (device->bdev) { 2784 bdi = blk_get_backing_dev_info(device->bdev); 2785 if (bdi->unplug_io_fn) 2786 bdi->unplug_io_fn(bdi, unplug_page); 2787 } 2788 } else { 2789 multi->stripes[i].physical = 2790 map->stripes[stripe_index].physical + 2791 stripe_offset + stripe_nr * map->stripe_len; 2792 multi->stripes[i].dev = map->stripes[stripe_index].dev; 2793 } 2794 stripe_index++; 2795 } 2796 if (multi_ret) { 2797 *multi_ret = multi; 2798 multi->num_stripes = num_stripes; 2799 multi->max_errors = max_errors; 2800 } 2801 out: 2802 free_extent_map(em); 2803 return 0; 2804 } 2805 2806 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2807 u64 logical, u64 *length, 2808 struct btrfs_multi_bio **multi_ret, int mirror_num) 2809 { 2810 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 2811 mirror_num, NULL); 2812 } 2813 2814 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 2815 u64 chunk_start, u64 physical, u64 devid, 2816 u64 **logical, int *naddrs, int *stripe_len) 2817 { 2818 struct extent_map_tree *em_tree = &map_tree->map_tree; 2819 struct extent_map *em; 2820 struct map_lookup *map; 2821 u64 *buf; 2822 u64 bytenr; 2823 u64 length; 2824 u64 stripe_nr; 2825 int i, j, nr = 0; 2826 2827 read_lock(&em_tree->lock); 2828 em = lookup_extent_mapping(em_tree, chunk_start, 1); 2829 read_unlock(&em_tree->lock); 2830 2831 BUG_ON(!em || em->start != chunk_start); 2832 map = (struct map_lookup *)em->bdev; 2833 2834 length = em->len; 2835 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 2836 do_div(length, map->num_stripes / map->sub_stripes); 2837 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 2838 do_div(length, map->num_stripes); 2839 2840 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 2841 BUG_ON(!buf); 2842 2843 for (i = 0; i < map->num_stripes; i++) { 2844 if (devid && map->stripes[i].dev->devid != devid) 2845 continue; 2846 if (map->stripes[i].physical > physical || 2847 map->stripes[i].physical + length <= physical) 2848 continue; 2849 2850 stripe_nr = physical - map->stripes[i].physical; 2851 do_div(stripe_nr, map->stripe_len); 2852 2853 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2854 stripe_nr = stripe_nr * map->num_stripes + i; 2855 do_div(stripe_nr, map->sub_stripes); 2856 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 2857 stripe_nr = stripe_nr * map->num_stripes + i; 2858 } 2859 bytenr = chunk_start + stripe_nr * map->stripe_len; 2860 WARN_ON(nr >= map->num_stripes); 2861 for (j = 0; j < nr; j++) { 2862 if (buf[j] == bytenr) 2863 break; 2864 } 2865 if (j == nr) { 2866 WARN_ON(nr >= map->num_stripes); 2867 buf[nr++] = bytenr; 2868 } 2869 } 2870 2871 *logical = buf; 2872 *naddrs = nr; 2873 *stripe_len = map->stripe_len; 2874 2875 free_extent_map(em); 2876 return 0; 2877 } 2878 2879 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, 2880 u64 logical, struct page *page) 2881 { 2882 u64 length = PAGE_CACHE_SIZE; 2883 return __btrfs_map_block(map_tree, READ, logical, &length, 2884 NULL, 0, page); 2885 } 2886 2887 static void end_bio_multi_stripe(struct bio *bio, int err) 2888 { 2889 struct btrfs_multi_bio *multi = bio->bi_private; 2890 int is_orig_bio = 0; 2891 2892 if (err) 2893 atomic_inc(&multi->error); 2894 2895 if (bio == multi->orig_bio) 2896 is_orig_bio = 1; 2897 2898 if (atomic_dec_and_test(&multi->stripes_pending)) { 2899 if (!is_orig_bio) { 2900 bio_put(bio); 2901 bio = multi->orig_bio; 2902 } 2903 bio->bi_private = multi->private; 2904 bio->bi_end_io = multi->end_io; 2905 /* only send an error to the higher layers if it is 2906 * beyond the tolerance of the multi-bio 2907 */ 2908 if (atomic_read(&multi->error) > multi->max_errors) { 2909 err = -EIO; 2910 } else if (err) { 2911 /* 2912 * this bio is actually up to date, we didn't 2913 * go over the max number of errors 2914 */ 2915 set_bit(BIO_UPTODATE, &bio->bi_flags); 2916 err = 0; 2917 } 2918 kfree(multi); 2919 2920 bio_endio(bio, err); 2921 } else if (!is_orig_bio) { 2922 bio_put(bio); 2923 } 2924 } 2925 2926 struct async_sched { 2927 struct bio *bio; 2928 int rw; 2929 struct btrfs_fs_info *info; 2930 struct btrfs_work work; 2931 }; 2932 2933 /* 2934 * see run_scheduled_bios for a description of why bios are collected for 2935 * async submit. 2936 * 2937 * This will add one bio to the pending list for a device and make sure 2938 * the work struct is scheduled. 2939 */ 2940 static noinline int schedule_bio(struct btrfs_root *root, 2941 struct btrfs_device *device, 2942 int rw, struct bio *bio) 2943 { 2944 int should_queue = 1; 2945 struct btrfs_pending_bios *pending_bios; 2946 2947 /* don't bother with additional async steps for reads, right now */ 2948 if (!(rw & REQ_WRITE)) { 2949 bio_get(bio); 2950 submit_bio(rw, bio); 2951 bio_put(bio); 2952 return 0; 2953 } 2954 2955 /* 2956 * nr_async_bios allows us to reliably return congestion to the 2957 * higher layers. Otherwise, the async bio makes it appear we have 2958 * made progress against dirty pages when we've really just put it 2959 * on a queue for later 2960 */ 2961 atomic_inc(&root->fs_info->nr_async_bios); 2962 WARN_ON(bio->bi_next); 2963 bio->bi_next = NULL; 2964 bio->bi_rw |= rw; 2965 2966 spin_lock(&device->io_lock); 2967 if (bio->bi_rw & REQ_SYNC) 2968 pending_bios = &device->pending_sync_bios; 2969 else 2970 pending_bios = &device->pending_bios; 2971 2972 if (pending_bios->tail) 2973 pending_bios->tail->bi_next = bio; 2974 2975 pending_bios->tail = bio; 2976 if (!pending_bios->head) 2977 pending_bios->head = bio; 2978 if (device->running_pending) 2979 should_queue = 0; 2980 2981 spin_unlock(&device->io_lock); 2982 2983 if (should_queue) 2984 btrfs_queue_worker(&root->fs_info->submit_workers, 2985 &device->work); 2986 return 0; 2987 } 2988 2989 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 2990 int mirror_num, int async_submit) 2991 { 2992 struct btrfs_mapping_tree *map_tree; 2993 struct btrfs_device *dev; 2994 struct bio *first_bio = bio; 2995 u64 logical = (u64)bio->bi_sector << 9; 2996 u64 length = 0; 2997 u64 map_length; 2998 struct btrfs_multi_bio *multi = NULL; 2999 int ret; 3000 int dev_nr = 0; 3001 int total_devs = 1; 3002 3003 length = bio->bi_size; 3004 map_tree = &root->fs_info->mapping_tree; 3005 map_length = length; 3006 3007 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, 3008 mirror_num); 3009 BUG_ON(ret); 3010 3011 total_devs = multi->num_stripes; 3012 if (map_length < length) { 3013 printk(KERN_CRIT "mapping failed logical %llu bio len %llu " 3014 "len %llu\n", (unsigned long long)logical, 3015 (unsigned long long)length, 3016 (unsigned long long)map_length); 3017 BUG(); 3018 } 3019 multi->end_io = first_bio->bi_end_io; 3020 multi->private = first_bio->bi_private; 3021 multi->orig_bio = first_bio; 3022 atomic_set(&multi->stripes_pending, multi->num_stripes); 3023 3024 while (dev_nr < total_devs) { 3025 if (total_devs > 1) { 3026 if (dev_nr < total_devs - 1) { 3027 bio = bio_clone(first_bio, GFP_NOFS); 3028 BUG_ON(!bio); 3029 } else { 3030 bio = first_bio; 3031 } 3032 bio->bi_private = multi; 3033 bio->bi_end_io = end_bio_multi_stripe; 3034 } 3035 bio->bi_sector = multi->stripes[dev_nr].physical >> 9; 3036 dev = multi->stripes[dev_nr].dev; 3037 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { 3038 bio->bi_bdev = dev->bdev; 3039 if (async_submit) 3040 schedule_bio(root, dev, rw, bio); 3041 else 3042 submit_bio(rw, bio); 3043 } else { 3044 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; 3045 bio->bi_sector = logical >> 9; 3046 bio_endio(bio, -EIO); 3047 } 3048 dev_nr++; 3049 } 3050 if (total_devs == 1) 3051 kfree(multi); 3052 return 0; 3053 } 3054 3055 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, 3056 u8 *uuid, u8 *fsid) 3057 { 3058 struct btrfs_device *device; 3059 struct btrfs_fs_devices *cur_devices; 3060 3061 cur_devices = root->fs_info->fs_devices; 3062 while (cur_devices) { 3063 if (!fsid || 3064 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 3065 device = __find_device(&cur_devices->devices, 3066 devid, uuid); 3067 if (device) 3068 return device; 3069 } 3070 cur_devices = cur_devices->seed; 3071 } 3072 return NULL; 3073 } 3074 3075 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 3076 u64 devid, u8 *dev_uuid) 3077 { 3078 struct btrfs_device *device; 3079 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 3080 3081 device = kzalloc(sizeof(*device), GFP_NOFS); 3082 if (!device) 3083 return NULL; 3084 list_add(&device->dev_list, 3085 &fs_devices->devices); 3086 device->dev_root = root->fs_info->dev_root; 3087 device->devid = devid; 3088 device->work.func = pending_bios_fn; 3089 device->fs_devices = fs_devices; 3090 device->missing = 1; 3091 fs_devices->num_devices++; 3092 fs_devices->missing_devices++; 3093 spin_lock_init(&device->io_lock); 3094 INIT_LIST_HEAD(&device->dev_alloc_list); 3095 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); 3096 return device; 3097 } 3098 3099 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 3100 struct extent_buffer *leaf, 3101 struct btrfs_chunk *chunk) 3102 { 3103 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 3104 struct map_lookup *map; 3105 struct extent_map *em; 3106 u64 logical; 3107 u64 length; 3108 u64 devid; 3109 u8 uuid[BTRFS_UUID_SIZE]; 3110 int num_stripes; 3111 int ret; 3112 int i; 3113 3114 logical = key->offset; 3115 length = btrfs_chunk_length(leaf, chunk); 3116 3117 read_lock(&map_tree->map_tree.lock); 3118 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 3119 read_unlock(&map_tree->map_tree.lock); 3120 3121 /* already mapped? */ 3122 if (em && em->start <= logical && em->start + em->len > logical) { 3123 free_extent_map(em); 3124 return 0; 3125 } else if (em) { 3126 free_extent_map(em); 3127 } 3128 3129 em = alloc_extent_map(GFP_NOFS); 3130 if (!em) 3131 return -ENOMEM; 3132 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3133 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 3134 if (!map) { 3135 free_extent_map(em); 3136 return -ENOMEM; 3137 } 3138 3139 em->bdev = (struct block_device *)map; 3140 em->start = logical; 3141 em->len = length; 3142 em->block_start = 0; 3143 em->block_len = em->len; 3144 3145 map->num_stripes = num_stripes; 3146 map->io_width = btrfs_chunk_io_width(leaf, chunk); 3147 map->io_align = btrfs_chunk_io_align(leaf, chunk); 3148 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 3149 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 3150 map->type = btrfs_chunk_type(leaf, chunk); 3151 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 3152 for (i = 0; i < num_stripes; i++) { 3153 map->stripes[i].physical = 3154 btrfs_stripe_offset_nr(leaf, chunk, i); 3155 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 3156 read_extent_buffer(leaf, uuid, (unsigned long) 3157 btrfs_stripe_dev_uuid_nr(chunk, i), 3158 BTRFS_UUID_SIZE); 3159 map->stripes[i].dev = btrfs_find_device(root, devid, uuid, 3160 NULL); 3161 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 3162 kfree(map); 3163 free_extent_map(em); 3164 return -EIO; 3165 } 3166 if (!map->stripes[i].dev) { 3167 map->stripes[i].dev = 3168 add_missing_dev(root, devid, uuid); 3169 if (!map->stripes[i].dev) { 3170 kfree(map); 3171 free_extent_map(em); 3172 return -EIO; 3173 } 3174 } 3175 map->stripes[i].dev->in_fs_metadata = 1; 3176 } 3177 3178 write_lock(&map_tree->map_tree.lock); 3179 ret = add_extent_mapping(&map_tree->map_tree, em); 3180 write_unlock(&map_tree->map_tree.lock); 3181 BUG_ON(ret); 3182 free_extent_map(em); 3183 3184 return 0; 3185 } 3186 3187 static int fill_device_from_item(struct extent_buffer *leaf, 3188 struct btrfs_dev_item *dev_item, 3189 struct btrfs_device *device) 3190 { 3191 unsigned long ptr; 3192 3193 device->devid = btrfs_device_id(leaf, dev_item); 3194 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 3195 device->total_bytes = device->disk_total_bytes; 3196 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 3197 device->type = btrfs_device_type(leaf, dev_item); 3198 device->io_align = btrfs_device_io_align(leaf, dev_item); 3199 device->io_width = btrfs_device_io_width(leaf, dev_item); 3200 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 3201 3202 ptr = (unsigned long)btrfs_device_uuid(dev_item); 3203 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 3204 3205 return 0; 3206 } 3207 3208 static int open_seed_devices(struct btrfs_root *root, u8 *fsid) 3209 { 3210 struct btrfs_fs_devices *fs_devices; 3211 int ret; 3212 3213 mutex_lock(&uuid_mutex); 3214 3215 fs_devices = root->fs_info->fs_devices->seed; 3216 while (fs_devices) { 3217 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 3218 ret = 0; 3219 goto out; 3220 } 3221 fs_devices = fs_devices->seed; 3222 } 3223 3224 fs_devices = find_fsid(fsid); 3225 if (!fs_devices) { 3226 ret = -ENOENT; 3227 goto out; 3228 } 3229 3230 fs_devices = clone_fs_devices(fs_devices); 3231 if (IS_ERR(fs_devices)) { 3232 ret = PTR_ERR(fs_devices); 3233 goto out; 3234 } 3235 3236 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 3237 root->fs_info->bdev_holder); 3238 if (ret) 3239 goto out; 3240 3241 if (!fs_devices->seeding) { 3242 __btrfs_close_devices(fs_devices); 3243 free_fs_devices(fs_devices); 3244 ret = -EINVAL; 3245 goto out; 3246 } 3247 3248 fs_devices->seed = root->fs_info->fs_devices->seed; 3249 root->fs_info->fs_devices->seed = fs_devices; 3250 out: 3251 mutex_unlock(&uuid_mutex); 3252 return ret; 3253 } 3254 3255 static int read_one_dev(struct btrfs_root *root, 3256 struct extent_buffer *leaf, 3257 struct btrfs_dev_item *dev_item) 3258 { 3259 struct btrfs_device *device; 3260 u64 devid; 3261 int ret; 3262 u8 fs_uuid[BTRFS_UUID_SIZE]; 3263 u8 dev_uuid[BTRFS_UUID_SIZE]; 3264 3265 devid = btrfs_device_id(leaf, dev_item); 3266 read_extent_buffer(leaf, dev_uuid, 3267 (unsigned long)btrfs_device_uuid(dev_item), 3268 BTRFS_UUID_SIZE); 3269 read_extent_buffer(leaf, fs_uuid, 3270 (unsigned long)btrfs_device_fsid(dev_item), 3271 BTRFS_UUID_SIZE); 3272 3273 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 3274 ret = open_seed_devices(root, fs_uuid); 3275 if (ret && !btrfs_test_opt(root, DEGRADED)) 3276 return ret; 3277 } 3278 3279 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 3280 if (!device || !device->bdev) { 3281 if (!btrfs_test_opt(root, DEGRADED)) 3282 return -EIO; 3283 3284 if (!device) { 3285 printk(KERN_WARNING "warning devid %llu missing\n", 3286 (unsigned long long)devid); 3287 device = add_missing_dev(root, devid, dev_uuid); 3288 if (!device) 3289 return -ENOMEM; 3290 } else if (!device->missing) { 3291 /* 3292 * this happens when a device that was properly setup 3293 * in the device info lists suddenly goes bad. 3294 * device->bdev is NULL, and so we have to set 3295 * device->missing to one here 3296 */ 3297 root->fs_info->fs_devices->missing_devices++; 3298 device->missing = 1; 3299 } 3300 } 3301 3302 if (device->fs_devices != root->fs_info->fs_devices) { 3303 BUG_ON(device->writeable); 3304 if (device->generation != 3305 btrfs_device_generation(leaf, dev_item)) 3306 return -EINVAL; 3307 } 3308 3309 fill_device_from_item(leaf, dev_item, device); 3310 device->dev_root = root->fs_info->dev_root; 3311 device->in_fs_metadata = 1; 3312 if (device->writeable) 3313 device->fs_devices->total_rw_bytes += device->total_bytes; 3314 ret = 0; 3315 return ret; 3316 } 3317 3318 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) 3319 { 3320 struct btrfs_dev_item *dev_item; 3321 3322 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, 3323 dev_item); 3324 return read_one_dev(root, buf, dev_item); 3325 } 3326 3327 int btrfs_read_sys_array(struct btrfs_root *root) 3328 { 3329 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 3330 struct extent_buffer *sb; 3331 struct btrfs_disk_key *disk_key; 3332 struct btrfs_chunk *chunk; 3333 u8 *ptr; 3334 unsigned long sb_ptr; 3335 int ret = 0; 3336 u32 num_stripes; 3337 u32 array_size; 3338 u32 len = 0; 3339 u32 cur; 3340 struct btrfs_key key; 3341 3342 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, 3343 BTRFS_SUPER_INFO_SIZE); 3344 if (!sb) 3345 return -ENOMEM; 3346 btrfs_set_buffer_uptodate(sb); 3347 btrfs_set_buffer_lockdep_class(sb, 0); 3348 3349 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 3350 array_size = btrfs_super_sys_array_size(super_copy); 3351 3352 ptr = super_copy->sys_chunk_array; 3353 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); 3354 cur = 0; 3355 3356 while (cur < array_size) { 3357 disk_key = (struct btrfs_disk_key *)ptr; 3358 btrfs_disk_key_to_cpu(&key, disk_key); 3359 3360 len = sizeof(*disk_key); ptr += len; 3361 sb_ptr += len; 3362 cur += len; 3363 3364 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3365 chunk = (struct btrfs_chunk *)sb_ptr; 3366 ret = read_one_chunk(root, &key, sb, chunk); 3367 if (ret) 3368 break; 3369 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 3370 len = btrfs_chunk_item_size(num_stripes); 3371 } else { 3372 ret = -EIO; 3373 break; 3374 } 3375 ptr += len; 3376 sb_ptr += len; 3377 cur += len; 3378 } 3379 free_extent_buffer(sb); 3380 return ret; 3381 } 3382 3383 int btrfs_read_chunk_tree(struct btrfs_root *root) 3384 { 3385 struct btrfs_path *path; 3386 struct extent_buffer *leaf; 3387 struct btrfs_key key; 3388 struct btrfs_key found_key; 3389 int ret; 3390 int slot; 3391 3392 root = root->fs_info->chunk_root; 3393 3394 path = btrfs_alloc_path(); 3395 if (!path) 3396 return -ENOMEM; 3397 3398 /* first we search for all of the device items, and then we 3399 * read in all of the chunk items. This way we can create chunk 3400 * mappings that reference all of the devices that are afound 3401 */ 3402 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 3403 key.offset = 0; 3404 key.type = 0; 3405 again: 3406 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3407 if (ret < 0) 3408 goto error; 3409 while (1) { 3410 leaf = path->nodes[0]; 3411 slot = path->slots[0]; 3412 if (slot >= btrfs_header_nritems(leaf)) { 3413 ret = btrfs_next_leaf(root, path); 3414 if (ret == 0) 3415 continue; 3416 if (ret < 0) 3417 goto error; 3418 break; 3419 } 3420 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3421 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3422 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) 3423 break; 3424 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 3425 struct btrfs_dev_item *dev_item; 3426 dev_item = btrfs_item_ptr(leaf, slot, 3427 struct btrfs_dev_item); 3428 ret = read_one_dev(root, leaf, dev_item); 3429 if (ret) 3430 goto error; 3431 } 3432 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 3433 struct btrfs_chunk *chunk; 3434 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3435 ret = read_one_chunk(root, &found_key, leaf, chunk); 3436 if (ret) 3437 goto error; 3438 } 3439 path->slots[0]++; 3440 } 3441 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3442 key.objectid = 0; 3443 btrfs_release_path(root, path); 3444 goto again; 3445 } 3446 ret = 0; 3447 error: 3448 btrfs_free_path(path); 3449 return ret; 3450 } 3451