1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/buffer_head.h> 21 #include <asm/div64.h> 22 #include "ctree.h" 23 #include "extent_map.h" 24 #include "disk-io.h" 25 #include "transaction.h" 26 #include "print-tree.h" 27 #include "volumes.h" 28 29 struct map_lookup { 30 u64 type; 31 int io_align; 32 int io_width; 33 int stripe_len; 34 int sector_size; 35 int num_stripes; 36 struct btrfs_bio_stripe stripes[]; 37 }; 38 39 #define map_lookup_size(n) (sizeof(struct map_lookup) + \ 40 (sizeof(struct btrfs_bio_stripe) * (n))) 41 42 static DEFINE_MUTEX(uuid_mutex); 43 static LIST_HEAD(fs_uuids); 44 45 int btrfs_cleanup_fs_uuids(void) 46 { 47 struct btrfs_fs_devices *fs_devices; 48 struct list_head *uuid_cur; 49 struct list_head *devices_cur; 50 struct btrfs_device *dev; 51 52 list_for_each(uuid_cur, &fs_uuids) { 53 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices, 54 list); 55 while(!list_empty(&fs_devices->devices)) { 56 devices_cur = fs_devices->devices.next; 57 dev = list_entry(devices_cur, struct btrfs_device, 58 dev_list); 59 printk("uuid cleanup finds %s\n", dev->name); 60 if (dev->bdev) { 61 printk("closing\n"); 62 close_bdev_excl(dev->bdev); 63 } 64 list_del(&dev->dev_list); 65 kfree(dev); 66 } 67 } 68 return 0; 69 } 70 71 static struct btrfs_device *__find_device(struct list_head *head, u64 devid) 72 { 73 struct btrfs_device *dev; 74 struct list_head *cur; 75 76 list_for_each(cur, head) { 77 dev = list_entry(cur, struct btrfs_device, dev_list); 78 if (dev->devid == devid) 79 return dev; 80 } 81 return NULL; 82 } 83 84 static struct btrfs_fs_devices *find_fsid(u8 *fsid) 85 { 86 struct list_head *cur; 87 struct btrfs_fs_devices *fs_devices; 88 89 list_for_each(cur, &fs_uuids) { 90 fs_devices = list_entry(cur, struct btrfs_fs_devices, list); 91 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 92 return fs_devices; 93 } 94 return NULL; 95 } 96 97 static int device_list_add(const char *path, 98 struct btrfs_super_block *disk_super, 99 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 100 { 101 struct btrfs_device *device; 102 struct btrfs_fs_devices *fs_devices; 103 u64 found_transid = btrfs_super_generation(disk_super); 104 105 fs_devices = find_fsid(disk_super->fsid); 106 if (!fs_devices) { 107 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS); 108 if (!fs_devices) 109 return -ENOMEM; 110 INIT_LIST_HEAD(&fs_devices->devices); 111 list_add(&fs_devices->list, &fs_uuids); 112 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 113 fs_devices->latest_devid = devid; 114 fs_devices->latest_trans = found_transid; 115 fs_devices->lowest_devid = (u64)-1; 116 fs_devices->num_devices = 0; 117 device = NULL; 118 } else { 119 device = __find_device(&fs_devices->devices, devid); 120 } 121 if (!device) { 122 device = kzalloc(sizeof(*device), GFP_NOFS); 123 if (!device) { 124 /* we can safely leave the fs_devices entry around */ 125 return -ENOMEM; 126 } 127 device->devid = devid; 128 device->name = kstrdup(path, GFP_NOFS); 129 if (!device->name) { 130 kfree(device); 131 return -ENOMEM; 132 } 133 list_add(&device->dev_list, &fs_devices->devices); 134 fs_devices->num_devices++; 135 } 136 137 if (found_transid > fs_devices->latest_trans) { 138 fs_devices->latest_devid = devid; 139 fs_devices->latest_trans = found_transid; 140 } 141 if (fs_devices->lowest_devid > devid) { 142 fs_devices->lowest_devid = devid; 143 printk("lowest devid now %Lu\n", devid); 144 } 145 *fs_devices_ret = fs_devices; 146 return 0; 147 } 148 149 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 150 { 151 struct list_head *head = &fs_devices->devices; 152 struct list_head *cur; 153 struct btrfs_device *device; 154 155 mutex_lock(&uuid_mutex); 156 list_for_each(cur, head) { 157 device = list_entry(cur, struct btrfs_device, dev_list); 158 if (device->bdev) { 159 close_bdev_excl(device->bdev); 160 printk("close devices closes %s\n", device->name); 161 } 162 device->bdev = NULL; 163 } 164 mutex_unlock(&uuid_mutex); 165 return 0; 166 } 167 168 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 169 int flags, void *holder) 170 { 171 struct block_device *bdev; 172 struct list_head *head = &fs_devices->devices; 173 struct list_head *cur; 174 struct btrfs_device *device; 175 int ret; 176 177 mutex_lock(&uuid_mutex); 178 list_for_each(cur, head) { 179 device = list_entry(cur, struct btrfs_device, dev_list); 180 bdev = open_bdev_excl(device->name, flags, holder); 181 printk("opening %s devid %Lu\n", device->name, device->devid); 182 if (IS_ERR(bdev)) { 183 printk("open %s failed\n", device->name); 184 ret = PTR_ERR(bdev); 185 goto fail; 186 } 187 if (device->devid == fs_devices->latest_devid) 188 fs_devices->latest_bdev = bdev; 189 if (device->devid == fs_devices->lowest_devid) { 190 fs_devices->lowest_bdev = bdev; 191 printk("lowest bdev %s\n", device->name); 192 } 193 device->bdev = bdev; 194 } 195 mutex_unlock(&uuid_mutex); 196 return 0; 197 fail: 198 mutex_unlock(&uuid_mutex); 199 btrfs_close_devices(fs_devices); 200 return ret; 201 } 202 203 int btrfs_scan_one_device(const char *path, int flags, void *holder, 204 struct btrfs_fs_devices **fs_devices_ret) 205 { 206 struct btrfs_super_block *disk_super; 207 struct block_device *bdev; 208 struct buffer_head *bh; 209 int ret; 210 u64 devid; 211 212 mutex_lock(&uuid_mutex); 213 214 printk("scan one opens %s\n", path); 215 bdev = open_bdev_excl(path, flags, holder); 216 217 if (IS_ERR(bdev)) { 218 printk("open failed\n"); 219 ret = PTR_ERR(bdev); 220 goto error; 221 } 222 223 ret = set_blocksize(bdev, 4096); 224 if (ret) 225 goto error_close; 226 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096); 227 if (!bh) { 228 ret = -EIO; 229 goto error_close; 230 } 231 disk_super = (struct btrfs_super_block *)bh->b_data; 232 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, 233 sizeof(disk_super->magic))) { 234 printk("no btrfs found on %s\n", path); 235 ret = -EINVAL; 236 goto error_brelse; 237 } 238 devid = le64_to_cpu(disk_super->dev_item.devid); 239 printk("found device %Lu on %s\n", devid, path); 240 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 241 242 error_brelse: 243 brelse(bh); 244 error_close: 245 close_bdev_excl(bdev); 246 printk("scan one closes bdev %s\n", path); 247 error: 248 mutex_unlock(&uuid_mutex); 249 return ret; 250 } 251 252 /* 253 * this uses a pretty simple search, the expectation is that it is 254 * called very infrequently and that a given device has a small number 255 * of extents 256 */ 257 static int find_free_dev_extent(struct btrfs_trans_handle *trans, 258 struct btrfs_device *device, 259 struct btrfs_path *path, 260 u64 num_bytes, u64 *start) 261 { 262 struct btrfs_key key; 263 struct btrfs_root *root = device->dev_root; 264 struct btrfs_dev_extent *dev_extent = NULL; 265 u64 hole_size = 0; 266 u64 last_byte = 0; 267 u64 search_start = 0; 268 u64 search_end = device->total_bytes; 269 int ret; 270 int slot = 0; 271 int start_found; 272 struct extent_buffer *l; 273 274 start_found = 0; 275 path->reada = 2; 276 277 /* FIXME use last free of some kind */ 278 279 /* we don't want to overwrite the superblock on the drive, 280 * so we make sure to start at an offset of at least 1MB 281 */ 282 search_start = max((u64)1024 * 1024, search_start); 283 key.objectid = device->devid; 284 key.offset = search_start; 285 key.type = BTRFS_DEV_EXTENT_KEY; 286 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 287 if (ret < 0) 288 goto error; 289 ret = btrfs_previous_item(root, path, 0, key.type); 290 if (ret < 0) 291 goto error; 292 l = path->nodes[0]; 293 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 294 while (1) { 295 l = path->nodes[0]; 296 slot = path->slots[0]; 297 if (slot >= btrfs_header_nritems(l)) { 298 ret = btrfs_next_leaf(root, path); 299 if (ret == 0) 300 continue; 301 if (ret < 0) 302 goto error; 303 no_more_items: 304 if (!start_found) { 305 if (search_start >= search_end) { 306 ret = -ENOSPC; 307 goto error; 308 } 309 *start = search_start; 310 start_found = 1; 311 goto check_pending; 312 } 313 *start = last_byte > search_start ? 314 last_byte : search_start; 315 if (search_end <= *start) { 316 ret = -ENOSPC; 317 goto error; 318 } 319 goto check_pending; 320 } 321 btrfs_item_key_to_cpu(l, &key, slot); 322 323 if (key.objectid < device->devid) 324 goto next; 325 326 if (key.objectid > device->devid) 327 goto no_more_items; 328 329 if (key.offset >= search_start && key.offset > last_byte && 330 start_found) { 331 if (last_byte < search_start) 332 last_byte = search_start; 333 hole_size = key.offset - last_byte; 334 if (key.offset > last_byte && 335 hole_size >= num_bytes) { 336 *start = last_byte; 337 goto check_pending; 338 } 339 } 340 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) { 341 goto next; 342 } 343 344 start_found = 1; 345 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 346 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent); 347 next: 348 path->slots[0]++; 349 cond_resched(); 350 } 351 check_pending: 352 /* we have to make sure we didn't find an extent that has already 353 * been allocated by the map tree or the original allocation 354 */ 355 btrfs_release_path(root, path); 356 BUG_ON(*start < search_start); 357 358 if (*start + num_bytes > search_end) { 359 ret = -ENOSPC; 360 goto error; 361 } 362 /* check for pending inserts here */ 363 return 0; 364 365 error: 366 btrfs_release_path(root, path); 367 return ret; 368 } 369 370 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 371 struct btrfs_device *device, 372 u64 owner, u64 num_bytes, u64 *start) 373 { 374 int ret; 375 struct btrfs_path *path; 376 struct btrfs_root *root = device->dev_root; 377 struct btrfs_dev_extent *extent; 378 struct extent_buffer *leaf; 379 struct btrfs_key key; 380 381 path = btrfs_alloc_path(); 382 if (!path) 383 return -ENOMEM; 384 385 ret = find_free_dev_extent(trans, device, path, num_bytes, start); 386 if (ret) { 387 goto err; 388 } 389 390 key.objectid = device->devid; 391 key.offset = *start; 392 key.type = BTRFS_DEV_EXTENT_KEY; 393 ret = btrfs_insert_empty_item(trans, root, path, &key, 394 sizeof(*extent)); 395 BUG_ON(ret); 396 397 leaf = path->nodes[0]; 398 extent = btrfs_item_ptr(leaf, path->slots[0], 399 struct btrfs_dev_extent); 400 btrfs_set_dev_extent_owner(leaf, extent, owner); 401 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 402 btrfs_mark_buffer_dirty(leaf); 403 err: 404 btrfs_free_path(path); 405 return ret; 406 } 407 408 static int find_next_chunk(struct btrfs_root *root, u64 *objectid) 409 { 410 struct btrfs_path *path; 411 int ret; 412 struct btrfs_key key; 413 struct btrfs_key found_key; 414 415 path = btrfs_alloc_path(); 416 BUG_ON(!path); 417 418 key.objectid = (u64)-1; 419 key.offset = (u64)-1; 420 key.type = BTRFS_CHUNK_ITEM_KEY; 421 422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 423 if (ret < 0) 424 goto error; 425 426 BUG_ON(ret == 0); 427 428 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); 429 if (ret) { 430 *objectid = 0; 431 } else { 432 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 433 path->slots[0]); 434 *objectid = found_key.objectid + found_key.offset; 435 } 436 ret = 0; 437 error: 438 btrfs_free_path(path); 439 return ret; 440 } 441 442 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, 443 u64 *objectid) 444 { 445 int ret; 446 struct btrfs_key key; 447 struct btrfs_key found_key; 448 449 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 450 key.type = BTRFS_DEV_ITEM_KEY; 451 key.offset = (u64)-1; 452 453 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 454 if (ret < 0) 455 goto error; 456 457 BUG_ON(ret == 0); 458 459 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, 460 BTRFS_DEV_ITEM_KEY); 461 if (ret) { 462 *objectid = 1; 463 } else { 464 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 465 path->slots[0]); 466 *objectid = found_key.offset + 1; 467 } 468 ret = 0; 469 error: 470 btrfs_release_path(root, path); 471 return ret; 472 } 473 474 /* 475 * the device information is stored in the chunk root 476 * the btrfs_device struct should be fully filled in 477 */ 478 int btrfs_add_device(struct btrfs_trans_handle *trans, 479 struct btrfs_root *root, 480 struct btrfs_device *device) 481 { 482 int ret; 483 struct btrfs_path *path; 484 struct btrfs_dev_item *dev_item; 485 struct extent_buffer *leaf; 486 struct btrfs_key key; 487 unsigned long ptr; 488 u64 free_devid; 489 490 root = root->fs_info->chunk_root; 491 492 path = btrfs_alloc_path(); 493 if (!path) 494 return -ENOMEM; 495 496 ret = find_next_devid(root, path, &free_devid); 497 if (ret) 498 goto out; 499 500 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 501 key.type = BTRFS_DEV_ITEM_KEY; 502 key.offset = free_devid; 503 504 ret = btrfs_insert_empty_item(trans, root, path, &key, 505 sizeof(*dev_item)); 506 if (ret) 507 goto out; 508 509 leaf = path->nodes[0]; 510 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 511 512 device->devid = free_devid; 513 btrfs_set_device_id(leaf, dev_item, device->devid); 514 btrfs_set_device_type(leaf, dev_item, device->type); 515 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 516 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 517 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 518 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 519 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 520 521 ptr = (unsigned long)btrfs_device_uuid(dev_item); 522 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE); 523 btrfs_mark_buffer_dirty(leaf); 524 ret = 0; 525 526 out: 527 btrfs_free_path(path); 528 return ret; 529 } 530 int btrfs_update_device(struct btrfs_trans_handle *trans, 531 struct btrfs_device *device) 532 { 533 int ret; 534 struct btrfs_path *path; 535 struct btrfs_root *root; 536 struct btrfs_dev_item *dev_item; 537 struct extent_buffer *leaf; 538 struct btrfs_key key; 539 540 root = device->dev_root->fs_info->chunk_root; 541 542 path = btrfs_alloc_path(); 543 if (!path) 544 return -ENOMEM; 545 546 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 547 key.type = BTRFS_DEV_ITEM_KEY; 548 key.offset = device->devid; 549 550 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 551 if (ret < 0) 552 goto out; 553 554 if (ret > 0) { 555 ret = -ENOENT; 556 goto out; 557 } 558 559 leaf = path->nodes[0]; 560 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 561 562 btrfs_set_device_id(leaf, dev_item, device->devid); 563 btrfs_set_device_type(leaf, dev_item, device->type); 564 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 565 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 566 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 567 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 568 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 569 btrfs_mark_buffer_dirty(leaf); 570 571 out: 572 btrfs_free_path(path); 573 return ret; 574 } 575 576 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, 577 struct btrfs_root *root, 578 struct btrfs_key *key, 579 struct btrfs_chunk *chunk, int item_size) 580 { 581 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 582 struct btrfs_disk_key disk_key; 583 u32 array_size; 584 u8 *ptr; 585 586 array_size = btrfs_super_sys_array_size(super_copy); 587 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 588 return -EFBIG; 589 590 ptr = super_copy->sys_chunk_array + array_size; 591 btrfs_cpu_key_to_disk(&disk_key, key); 592 memcpy(ptr, &disk_key, sizeof(disk_key)); 593 ptr += sizeof(disk_key); 594 memcpy(ptr, chunk, item_size); 595 item_size += sizeof(disk_key); 596 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 597 return 0; 598 } 599 600 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 601 struct btrfs_root *extent_root, u64 *start, 602 u64 *num_bytes, u64 type) 603 { 604 u64 dev_offset; 605 struct btrfs_fs_info *info = extent_root->fs_info; 606 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 607 struct btrfs_stripe *stripes; 608 struct btrfs_device *device = NULL; 609 struct btrfs_chunk *chunk; 610 struct list_head private_devs; 611 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices; 612 struct list_head *cur; 613 struct extent_map_tree *em_tree; 614 struct map_lookup *map; 615 struct extent_map *em; 616 u64 physical; 617 u64 calc_size = 1024 * 1024 * 1024; 618 u64 min_free = calc_size; 619 u64 avail; 620 u64 max_avail = 0; 621 int num_stripes = 1; 622 int looped = 0; 623 int ret; 624 int index; 625 int stripe_len = 64 * 1024; 626 struct btrfs_key key; 627 628 if (list_empty(dev_list)) 629 return -ENOSPC; 630 631 if (type & (BTRFS_BLOCK_GROUP_RAID0)) 632 num_stripes = btrfs_super_num_devices(&info->super_copy); 633 if (type & (BTRFS_BLOCK_GROUP_DUP)) 634 num_stripes = 2; 635 if (type & (BTRFS_BLOCK_GROUP_RAID1)) { 636 num_stripes = min_t(u64, 2, 637 btrfs_super_num_devices(&info->super_copy)); 638 } 639 again: 640 INIT_LIST_HEAD(&private_devs); 641 cur = dev_list->next; 642 index = 0; 643 644 if (type & BTRFS_BLOCK_GROUP_DUP) 645 min_free = calc_size * 2; 646 647 /* build a private list of devices we will allocate from */ 648 while(index < num_stripes) { 649 device = list_entry(cur, struct btrfs_device, dev_list); 650 651 avail = device->total_bytes - device->bytes_used; 652 cur = cur->next; 653 if (avail > max_avail) 654 max_avail = avail; 655 if (avail >= min_free) { 656 list_move_tail(&device->dev_list, &private_devs); 657 index++; 658 if (type & BTRFS_BLOCK_GROUP_DUP) 659 index++; 660 } 661 if (cur == dev_list) 662 break; 663 } 664 if (index < num_stripes) { 665 list_splice(&private_devs, dev_list); 666 if (!looped && max_avail > 0) { 667 looped = 1; 668 calc_size = max_avail; 669 goto again; 670 } 671 return -ENOSPC; 672 } 673 674 ret = find_next_chunk(chunk_root, &key.objectid); 675 if (ret) 676 return ret; 677 678 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS); 679 if (!chunk) 680 return -ENOMEM; 681 682 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 683 if (!map) { 684 kfree(chunk); 685 return -ENOMEM; 686 } 687 688 stripes = &chunk->stripe; 689 690 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 691 *num_bytes = calc_size; 692 else 693 *num_bytes = calc_size * num_stripes; 694 695 index = 0; 696 printk("new chunk type %Lu start %Lu size %Lu\n", type, key.objectid, *num_bytes); 697 while(index < num_stripes) { 698 BUG_ON(list_empty(&private_devs)); 699 cur = private_devs.next; 700 device = list_entry(cur, struct btrfs_device, dev_list); 701 702 /* loop over this device again if we're doing a dup group */ 703 if (!(type & BTRFS_BLOCK_GROUP_DUP) || 704 (index == num_stripes - 1)) 705 list_move_tail(&device->dev_list, dev_list); 706 707 ret = btrfs_alloc_dev_extent(trans, device, 708 key.objectid, 709 calc_size, &dev_offset); 710 BUG_ON(ret); 711 printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.objectid, calc_size, device->devid, type); 712 device->bytes_used += calc_size; 713 ret = btrfs_update_device(trans, device); 714 BUG_ON(ret); 715 716 map->stripes[index].dev = device; 717 map->stripes[index].physical = dev_offset; 718 btrfs_set_stack_stripe_devid(stripes + index, device->devid); 719 btrfs_set_stack_stripe_offset(stripes + index, dev_offset); 720 physical = dev_offset; 721 index++; 722 } 723 BUG_ON(!list_empty(&private_devs)); 724 725 /* key.objectid was set above */ 726 key.offset = *num_bytes; 727 key.type = BTRFS_CHUNK_ITEM_KEY; 728 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 729 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len); 730 btrfs_set_stack_chunk_type(chunk, type); 731 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes); 732 btrfs_set_stack_chunk_io_align(chunk, stripe_len); 733 btrfs_set_stack_chunk_io_width(chunk, stripe_len); 734 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 735 map->sector_size = extent_root->sectorsize; 736 map->stripe_len = stripe_len; 737 map->io_align = stripe_len; 738 map->io_width = stripe_len; 739 map->type = type; 740 map->num_stripes = num_stripes; 741 742 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, 743 btrfs_chunk_item_size(num_stripes)); 744 BUG_ON(ret); 745 *start = key.objectid; 746 747 em = alloc_extent_map(GFP_NOFS); 748 if (!em) 749 return -ENOMEM; 750 em->bdev = (struct block_device *)map; 751 em->start = key.objectid; 752 em->len = key.offset; 753 em->block_start = 0; 754 755 kfree(chunk); 756 757 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 758 spin_lock(&em_tree->lock); 759 ret = add_extent_mapping(em_tree, em); 760 BUG_ON(ret); 761 spin_unlock(&em_tree->lock); 762 free_extent_map(em); 763 return ret; 764 } 765 766 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 767 { 768 extent_map_tree_init(&tree->map_tree, GFP_NOFS); 769 } 770 771 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 772 { 773 struct extent_map *em; 774 775 while(1) { 776 spin_lock(&tree->map_tree.lock); 777 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 778 if (em) 779 remove_extent_mapping(&tree->map_tree, em); 780 spin_unlock(&tree->map_tree.lock); 781 if (!em) 782 break; 783 kfree(em->bdev); 784 /* once for us */ 785 free_extent_map(em); 786 /* once for the tree */ 787 free_extent_map(em); 788 } 789 } 790 791 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 792 u64 logical, u64 *length, 793 struct btrfs_multi_bio **multi_ret) 794 { 795 struct extent_map *em; 796 struct map_lookup *map; 797 struct extent_map_tree *em_tree = &map_tree->map_tree; 798 u64 offset; 799 u64 stripe_offset; 800 u64 stripe_nr; 801 int stripes_allocated = 8; 802 int stripe_index; 803 int i; 804 struct btrfs_multi_bio *multi = NULL; 805 806 if (multi_ret && !(rw & (1 << BIO_RW))) { 807 stripes_allocated = 1; 808 } 809 again: 810 if (multi_ret) { 811 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), 812 GFP_NOFS); 813 if (!multi) 814 return -ENOMEM; 815 } 816 817 spin_lock(&em_tree->lock); 818 em = lookup_extent_mapping(em_tree, logical, *length); 819 BUG_ON(!em); 820 821 BUG_ON(em->start > logical || em->start + em->len < logical); 822 map = (struct map_lookup *)em->bdev; 823 offset = logical - em->start; 824 825 /* if our multi bio struct is too small, back off and try again */ 826 if (multi_ret && (rw & (1 << BIO_RW)) && 827 stripes_allocated < map->num_stripes && 828 ((map->type & BTRFS_BLOCK_GROUP_RAID1) || 829 (map->type & BTRFS_BLOCK_GROUP_DUP))) { 830 stripes_allocated = map->num_stripes; 831 spin_unlock(&em_tree->lock); 832 free_extent_map(em); 833 kfree(multi); 834 goto again; 835 } 836 stripe_nr = offset; 837 /* 838 * stripe_nr counts the total number of stripes we have to stride 839 * to get to this block 840 */ 841 do_div(stripe_nr, map->stripe_len); 842 843 stripe_offset = stripe_nr * map->stripe_len; 844 BUG_ON(offset < stripe_offset); 845 846 /* stripe_offset is the offset of this block in its stripe*/ 847 stripe_offset = offset - stripe_offset; 848 849 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 850 BTRFS_BLOCK_GROUP_DUP)) { 851 /* we limit the length of each bio to what fits in a stripe */ 852 *length = min_t(u64, em->len - offset, 853 map->stripe_len - stripe_offset); 854 } else { 855 *length = em->len - offset; 856 } 857 if (!multi_ret) 858 goto out; 859 860 multi->num_stripes = 1; 861 stripe_index = 0; 862 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 863 if (rw & (1 << BIO_RW)) 864 multi->num_stripes = map->num_stripes; 865 else { 866 int i; 867 u64 least = (u64)-1; 868 struct btrfs_device *cur; 869 870 for (i = 0; i < map->num_stripes; i++) { 871 cur = map->stripes[i].dev; 872 spin_lock(&cur->io_lock); 873 if (cur->total_ios < least) { 874 least = cur->total_ios; 875 stripe_index = i; 876 } 877 spin_unlock(&cur->io_lock); 878 } 879 } 880 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 881 if (rw & (1 << BIO_RW)) 882 multi->num_stripes = map->num_stripes; 883 } else { 884 /* 885 * after this do_div call, stripe_nr is the number of stripes 886 * on this device we have to walk to find the data, and 887 * stripe_index is the number of our device in the stripe array 888 */ 889 stripe_index = do_div(stripe_nr, map->num_stripes); 890 } 891 BUG_ON(stripe_index >= map->num_stripes); 892 BUG_ON(stripe_index != 0 && multi->num_stripes > 1); 893 894 for (i = 0; i < multi->num_stripes; i++) { 895 multi->stripes[i].physical = 896 map->stripes[stripe_index].physical + stripe_offset + 897 stripe_nr * map->stripe_len; 898 multi->stripes[i].dev = map->stripes[stripe_index].dev; 899 stripe_index++; 900 } 901 *multi_ret = multi; 902 out: 903 free_extent_map(em); 904 spin_unlock(&em_tree->lock); 905 return 0; 906 } 907 908 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 909 static void end_bio_multi_stripe(struct bio *bio, int err) 910 #else 911 static int end_bio_multi_stripe(struct bio *bio, 912 unsigned int bytes_done, int err) 913 #endif 914 { 915 struct btrfs_multi_bio *multi = bio->bi_private; 916 917 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 918 if (bio->bi_size) 919 return 1; 920 #endif 921 if (err) 922 multi->error = err; 923 924 if (atomic_dec_and_test(&multi->stripes_pending)) { 925 bio->bi_private = multi->private; 926 bio->bi_end_io = multi->end_io; 927 928 if (!err && multi->error) 929 err = multi->error; 930 kfree(multi); 931 932 bio_endio(bio, err); 933 } else { 934 bio_put(bio); 935 } 936 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 937 return 0; 938 #endif 939 } 940 941 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio) 942 { 943 struct btrfs_mapping_tree *map_tree; 944 struct btrfs_device *dev; 945 struct bio *first_bio = bio; 946 u64 logical = bio->bi_sector << 9; 947 u64 length = 0; 948 u64 map_length; 949 struct bio_vec *bvec; 950 struct btrfs_multi_bio *multi = NULL; 951 int i; 952 int ret; 953 int dev_nr = 0; 954 int total_devs = 1; 955 956 bio_for_each_segment(bvec, bio, i) { 957 length += bvec->bv_len; 958 } 959 960 map_tree = &root->fs_info->mapping_tree; 961 map_length = length; 962 963 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi); 964 BUG_ON(ret); 965 966 total_devs = multi->num_stripes; 967 if (map_length < length) { 968 printk("mapping failed logical %Lu bio len %Lu " 969 "len %Lu\n", logical, length, map_length); 970 BUG(); 971 } 972 multi->end_io = first_bio->bi_end_io; 973 multi->private = first_bio->bi_private; 974 atomic_set(&multi->stripes_pending, multi->num_stripes); 975 976 while(dev_nr < total_devs) { 977 if (total_devs > 1) { 978 if (dev_nr < total_devs - 1) { 979 bio = bio_clone(first_bio, GFP_NOFS); 980 BUG_ON(!bio); 981 } else { 982 bio = first_bio; 983 } 984 bio->bi_private = multi; 985 bio->bi_end_io = end_bio_multi_stripe; 986 } 987 bio->bi_sector = multi->stripes[dev_nr].physical >> 9; 988 dev = multi->stripes[dev_nr].dev; 989 bio->bi_bdev = dev->bdev; 990 spin_lock(&dev->io_lock); 991 dev->total_ios++; 992 spin_unlock(&dev->io_lock); 993 submit_bio(rw, bio); 994 dev_nr++; 995 } 996 if (total_devs == 1) 997 kfree(multi); 998 return 0; 999 } 1000 1001 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid) 1002 { 1003 struct list_head *head = &root->fs_info->fs_devices->devices; 1004 1005 return __find_device(head, devid); 1006 } 1007 1008 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 1009 struct extent_buffer *leaf, 1010 struct btrfs_chunk *chunk) 1011 { 1012 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 1013 struct map_lookup *map; 1014 struct extent_map *em; 1015 u64 logical; 1016 u64 length; 1017 u64 devid; 1018 int num_stripes; 1019 int ret; 1020 int i; 1021 1022 logical = key->objectid; 1023 length = key->offset; 1024 spin_lock(&map_tree->map_tree.lock); 1025 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 1026 1027 /* already mapped? */ 1028 if (em && em->start <= logical && em->start + em->len > logical) { 1029 free_extent_map(em); 1030 spin_unlock(&map_tree->map_tree.lock); 1031 return 0; 1032 } else if (em) { 1033 free_extent_map(em); 1034 } 1035 spin_unlock(&map_tree->map_tree.lock); 1036 1037 map = kzalloc(sizeof(*map), GFP_NOFS); 1038 if (!map) 1039 return -ENOMEM; 1040 1041 em = alloc_extent_map(GFP_NOFS); 1042 if (!em) 1043 return -ENOMEM; 1044 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 1045 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 1046 if (!map) { 1047 free_extent_map(em); 1048 return -ENOMEM; 1049 } 1050 1051 em->bdev = (struct block_device *)map; 1052 em->start = logical; 1053 em->len = length; 1054 em->block_start = 0; 1055 1056 map->num_stripes = num_stripes; 1057 map->io_width = btrfs_chunk_io_width(leaf, chunk); 1058 map->io_align = btrfs_chunk_io_align(leaf, chunk); 1059 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 1060 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 1061 map->type = btrfs_chunk_type(leaf, chunk); 1062 for (i = 0; i < num_stripes; i++) { 1063 map->stripes[i].physical = 1064 btrfs_stripe_offset_nr(leaf, chunk, i); 1065 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 1066 map->stripes[i].dev = btrfs_find_device(root, devid); 1067 if (!map->stripes[i].dev) { 1068 kfree(map); 1069 free_extent_map(em); 1070 return -EIO; 1071 } 1072 } 1073 1074 spin_lock(&map_tree->map_tree.lock); 1075 ret = add_extent_mapping(&map_tree->map_tree, em); 1076 BUG_ON(ret); 1077 spin_unlock(&map_tree->map_tree.lock); 1078 free_extent_map(em); 1079 1080 return 0; 1081 } 1082 1083 static int fill_device_from_item(struct extent_buffer *leaf, 1084 struct btrfs_dev_item *dev_item, 1085 struct btrfs_device *device) 1086 { 1087 unsigned long ptr; 1088 1089 device->devid = btrfs_device_id(leaf, dev_item); 1090 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item); 1091 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 1092 device->type = btrfs_device_type(leaf, dev_item); 1093 device->io_align = btrfs_device_io_align(leaf, dev_item); 1094 device->io_width = btrfs_device_io_width(leaf, dev_item); 1095 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 1096 1097 ptr = (unsigned long)btrfs_device_uuid(dev_item); 1098 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE); 1099 1100 return 0; 1101 } 1102 1103 static int read_one_dev(struct btrfs_root *root, 1104 struct extent_buffer *leaf, 1105 struct btrfs_dev_item *dev_item) 1106 { 1107 struct btrfs_device *device; 1108 u64 devid; 1109 int ret; 1110 1111 devid = btrfs_device_id(leaf, dev_item); 1112 device = btrfs_find_device(root, devid); 1113 if (!device) { 1114 printk("warning devid %Lu not found already\n", devid); 1115 device = kmalloc(sizeof(*device), GFP_NOFS); 1116 if (!device) 1117 return -ENOMEM; 1118 list_add(&device->dev_list, 1119 &root->fs_info->fs_devices->devices); 1120 device->total_ios = 0; 1121 spin_lock_init(&device->io_lock); 1122 } 1123 1124 fill_device_from_item(leaf, dev_item, device); 1125 device->dev_root = root->fs_info->dev_root; 1126 ret = 0; 1127 #if 0 1128 ret = btrfs_open_device(device); 1129 if (ret) { 1130 kfree(device); 1131 } 1132 #endif 1133 return ret; 1134 } 1135 1136 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) 1137 { 1138 struct btrfs_dev_item *dev_item; 1139 1140 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, 1141 dev_item); 1142 return read_one_dev(root, buf, dev_item); 1143 } 1144 1145 int btrfs_read_sys_array(struct btrfs_root *root) 1146 { 1147 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 1148 struct extent_buffer *sb = root->fs_info->sb_buffer; 1149 struct btrfs_disk_key *disk_key; 1150 struct btrfs_chunk *chunk; 1151 struct btrfs_key key; 1152 u32 num_stripes; 1153 u32 array_size; 1154 u32 len = 0; 1155 u8 *ptr; 1156 unsigned long sb_ptr; 1157 u32 cur; 1158 int ret; 1159 1160 array_size = btrfs_super_sys_array_size(super_copy); 1161 1162 /* 1163 * we do this loop twice, once for the device items and 1164 * once for all of the chunks. This way there are device 1165 * structs filled in for every chunk 1166 */ 1167 ptr = super_copy->sys_chunk_array; 1168 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); 1169 cur = 0; 1170 1171 while (cur < array_size) { 1172 disk_key = (struct btrfs_disk_key *)ptr; 1173 btrfs_disk_key_to_cpu(&key, disk_key); 1174 1175 len = sizeof(*disk_key); 1176 ptr += len; 1177 sb_ptr += len; 1178 cur += len; 1179 1180 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 1181 chunk = (struct btrfs_chunk *)sb_ptr; 1182 ret = read_one_chunk(root, &key, sb, chunk); 1183 BUG_ON(ret); 1184 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 1185 len = btrfs_chunk_item_size(num_stripes); 1186 } else { 1187 BUG(); 1188 } 1189 ptr += len; 1190 sb_ptr += len; 1191 cur += len; 1192 } 1193 return 0; 1194 } 1195 1196 int btrfs_read_chunk_tree(struct btrfs_root *root) 1197 { 1198 struct btrfs_path *path; 1199 struct extent_buffer *leaf; 1200 struct btrfs_key key; 1201 struct btrfs_key found_key; 1202 int ret; 1203 int slot; 1204 1205 root = root->fs_info->chunk_root; 1206 1207 path = btrfs_alloc_path(); 1208 if (!path) 1209 return -ENOMEM; 1210 1211 /* first we search for all of the device items, and then we 1212 * read in all of the chunk items. This way we can create chunk 1213 * mappings that reference all of the devices that are afound 1214 */ 1215 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1216 key.offset = 0; 1217 key.type = 0; 1218 again: 1219 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1220 while(1) { 1221 leaf = path->nodes[0]; 1222 slot = path->slots[0]; 1223 if (slot >= btrfs_header_nritems(leaf)) { 1224 ret = btrfs_next_leaf(root, path); 1225 if (ret == 0) 1226 continue; 1227 if (ret < 0) 1228 goto error; 1229 break; 1230 } 1231 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1232 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 1233 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) 1234 break; 1235 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 1236 struct btrfs_dev_item *dev_item; 1237 dev_item = btrfs_item_ptr(leaf, slot, 1238 struct btrfs_dev_item); 1239 ret = read_one_dev(root, leaf, dev_item); 1240 BUG_ON(ret); 1241 } 1242 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 1243 struct btrfs_chunk *chunk; 1244 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 1245 ret = read_one_chunk(root, &found_key, leaf, chunk); 1246 } 1247 path->slots[0]++; 1248 } 1249 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 1250 key.objectid = 0; 1251 btrfs_release_path(root, path); 1252 goto again; 1253 } 1254 1255 btrfs_free_path(path); 1256 ret = 0; 1257 error: 1258 return ret; 1259 } 1260 1261