1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "ctree.h" 12 #include "disk-io.h" 13 #include "transaction.h" 14 #include "print-tree.h" 15 #include "locking.h" 16 #include "volumes.h" 17 #include "qgroup.h" 18 #include "tree-mod-log.h" 19 #include "tree-checker.h" 20 21 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 22 *root, struct btrfs_path *path, int level); 23 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 24 const struct btrfs_key *ins_key, struct btrfs_path *path, 25 int data_size, int extend); 26 static int push_node_left(struct btrfs_trans_handle *trans, 27 struct extent_buffer *dst, 28 struct extent_buffer *src, int empty); 29 static int balance_node_right(struct btrfs_trans_handle *trans, 30 struct extent_buffer *dst_buf, 31 struct extent_buffer *src_buf); 32 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 33 int level, int slot); 34 35 static const struct btrfs_csums { 36 u16 size; 37 const char name[10]; 38 const char driver[12]; 39 } btrfs_csums[] = { 40 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 41 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 42 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 43 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 44 .driver = "blake2b-256" }, 45 }; 46 47 int btrfs_super_csum_size(const struct btrfs_super_block *s) 48 { 49 u16 t = btrfs_super_csum_type(s); 50 /* 51 * csum type is validated at mount time 52 */ 53 return btrfs_csums[t].size; 54 } 55 56 const char *btrfs_super_csum_name(u16 csum_type) 57 { 58 /* csum type is validated at mount time */ 59 return btrfs_csums[csum_type].name; 60 } 61 62 /* 63 * Return driver name if defined, otherwise the name that's also a valid driver 64 * name 65 */ 66 const char *btrfs_super_csum_driver(u16 csum_type) 67 { 68 /* csum type is validated at mount time */ 69 return btrfs_csums[csum_type].driver[0] ? 70 btrfs_csums[csum_type].driver : 71 btrfs_csums[csum_type].name; 72 } 73 74 size_t __attribute_const__ btrfs_get_num_csums(void) 75 { 76 return ARRAY_SIZE(btrfs_csums); 77 } 78 79 struct btrfs_path *btrfs_alloc_path(void) 80 { 81 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 82 } 83 84 /* this also releases the path */ 85 void btrfs_free_path(struct btrfs_path *p) 86 { 87 if (!p) 88 return; 89 btrfs_release_path(p); 90 kmem_cache_free(btrfs_path_cachep, p); 91 } 92 93 /* 94 * path release drops references on the extent buffers in the path 95 * and it drops any locks held by this path 96 * 97 * It is safe to call this on paths that no locks or extent buffers held. 98 */ 99 noinline void btrfs_release_path(struct btrfs_path *p) 100 { 101 int i; 102 103 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 104 p->slots[i] = 0; 105 if (!p->nodes[i]) 106 continue; 107 if (p->locks[i]) { 108 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 109 p->locks[i] = 0; 110 } 111 free_extent_buffer(p->nodes[i]); 112 p->nodes[i] = NULL; 113 } 114 } 115 116 /* 117 * safely gets a reference on the root node of a tree. A lock 118 * is not taken, so a concurrent writer may put a different node 119 * at the root of the tree. See btrfs_lock_root_node for the 120 * looping required. 121 * 122 * The extent buffer returned by this has a reference taken, so 123 * it won't disappear. It may stop being the root of the tree 124 * at any time because there are no locks held. 125 */ 126 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 127 { 128 struct extent_buffer *eb; 129 130 while (1) { 131 rcu_read_lock(); 132 eb = rcu_dereference(root->node); 133 134 /* 135 * RCU really hurts here, we could free up the root node because 136 * it was COWed but we may not get the new root node yet so do 137 * the inc_not_zero dance and if it doesn't work then 138 * synchronize_rcu and try again. 139 */ 140 if (atomic_inc_not_zero(&eb->refs)) { 141 rcu_read_unlock(); 142 break; 143 } 144 rcu_read_unlock(); 145 synchronize_rcu(); 146 } 147 return eb; 148 } 149 150 /* 151 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 152 * just get put onto a simple dirty list. Transaction walks this list to make 153 * sure they get properly updated on disk. 154 */ 155 static void add_root_to_dirty_list(struct btrfs_root *root) 156 { 157 struct btrfs_fs_info *fs_info = root->fs_info; 158 159 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 160 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 161 return; 162 163 spin_lock(&fs_info->trans_lock); 164 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 165 /* Want the extent tree to be the last on the list */ 166 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 167 list_move_tail(&root->dirty_list, 168 &fs_info->dirty_cowonly_roots); 169 else 170 list_move(&root->dirty_list, 171 &fs_info->dirty_cowonly_roots); 172 } 173 spin_unlock(&fs_info->trans_lock); 174 } 175 176 /* 177 * used by snapshot creation to make a copy of a root for a tree with 178 * a given objectid. The buffer with the new root node is returned in 179 * cow_ret, and this func returns zero on success or a negative error code. 180 */ 181 int btrfs_copy_root(struct btrfs_trans_handle *trans, 182 struct btrfs_root *root, 183 struct extent_buffer *buf, 184 struct extent_buffer **cow_ret, u64 new_root_objectid) 185 { 186 struct btrfs_fs_info *fs_info = root->fs_info; 187 struct extent_buffer *cow; 188 int ret = 0; 189 int level; 190 struct btrfs_disk_key disk_key; 191 192 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 193 trans->transid != fs_info->running_transaction->transid); 194 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 195 trans->transid != root->last_trans); 196 197 level = btrfs_header_level(buf); 198 if (level == 0) 199 btrfs_item_key(buf, &disk_key, 0); 200 else 201 btrfs_node_key(buf, &disk_key, 0); 202 203 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 204 &disk_key, level, buf->start, 0, 205 BTRFS_NESTING_NEW_ROOT); 206 if (IS_ERR(cow)) 207 return PTR_ERR(cow); 208 209 copy_extent_buffer_full(cow, buf); 210 btrfs_set_header_bytenr(cow, cow->start); 211 btrfs_set_header_generation(cow, trans->transid); 212 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 213 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 214 BTRFS_HEADER_FLAG_RELOC); 215 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 216 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 217 else 218 btrfs_set_header_owner(cow, new_root_objectid); 219 220 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 221 222 WARN_ON(btrfs_header_generation(buf) > trans->transid); 223 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 224 ret = btrfs_inc_ref(trans, root, cow, 1); 225 else 226 ret = btrfs_inc_ref(trans, root, cow, 0); 227 if (ret) { 228 btrfs_tree_unlock(cow); 229 free_extent_buffer(cow); 230 btrfs_abort_transaction(trans, ret); 231 return ret; 232 } 233 234 btrfs_mark_buffer_dirty(cow); 235 *cow_ret = cow; 236 return 0; 237 } 238 239 /* 240 * check if the tree block can be shared by multiple trees 241 */ 242 int btrfs_block_can_be_shared(struct btrfs_root *root, 243 struct extent_buffer *buf) 244 { 245 /* 246 * Tree blocks not in shareable trees and tree roots are never shared. 247 * If a block was allocated after the last snapshot and the block was 248 * not allocated by tree relocation, we know the block is not shared. 249 */ 250 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 251 buf != root->node && buf != root->commit_root && 252 (btrfs_header_generation(buf) <= 253 btrfs_root_last_snapshot(&root->root_item) || 254 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 255 return 1; 256 257 return 0; 258 } 259 260 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 261 struct btrfs_root *root, 262 struct extent_buffer *buf, 263 struct extent_buffer *cow, 264 int *last_ref) 265 { 266 struct btrfs_fs_info *fs_info = root->fs_info; 267 u64 refs; 268 u64 owner; 269 u64 flags; 270 u64 new_flags = 0; 271 int ret; 272 273 /* 274 * Backrefs update rules: 275 * 276 * Always use full backrefs for extent pointers in tree block 277 * allocated by tree relocation. 278 * 279 * If a shared tree block is no longer referenced by its owner 280 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 281 * use full backrefs for extent pointers in tree block. 282 * 283 * If a tree block is been relocating 284 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 285 * use full backrefs for extent pointers in tree block. 286 * The reason for this is some operations (such as drop tree) 287 * are only allowed for blocks use full backrefs. 288 */ 289 290 if (btrfs_block_can_be_shared(root, buf)) { 291 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 292 btrfs_header_level(buf), 1, 293 &refs, &flags); 294 if (ret) 295 return ret; 296 if (refs == 0) { 297 ret = -EROFS; 298 btrfs_handle_fs_error(fs_info, ret, NULL); 299 return ret; 300 } 301 } else { 302 refs = 1; 303 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 304 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 305 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 306 else 307 flags = 0; 308 } 309 310 owner = btrfs_header_owner(buf); 311 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 312 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 313 314 if (refs > 1) { 315 if ((owner == root->root_key.objectid || 316 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 317 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 318 ret = btrfs_inc_ref(trans, root, buf, 1); 319 if (ret) 320 return ret; 321 322 if (root->root_key.objectid == 323 BTRFS_TREE_RELOC_OBJECTID) { 324 ret = btrfs_dec_ref(trans, root, buf, 0); 325 if (ret) 326 return ret; 327 ret = btrfs_inc_ref(trans, root, cow, 1); 328 if (ret) 329 return ret; 330 } 331 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 332 } else { 333 334 if (root->root_key.objectid == 335 BTRFS_TREE_RELOC_OBJECTID) 336 ret = btrfs_inc_ref(trans, root, cow, 1); 337 else 338 ret = btrfs_inc_ref(trans, root, cow, 0); 339 if (ret) 340 return ret; 341 } 342 if (new_flags != 0) { 343 int level = btrfs_header_level(buf); 344 345 ret = btrfs_set_disk_extent_flags(trans, buf, 346 new_flags, level); 347 if (ret) 348 return ret; 349 } 350 } else { 351 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 352 if (root->root_key.objectid == 353 BTRFS_TREE_RELOC_OBJECTID) 354 ret = btrfs_inc_ref(trans, root, cow, 1); 355 else 356 ret = btrfs_inc_ref(trans, root, cow, 0); 357 if (ret) 358 return ret; 359 ret = btrfs_dec_ref(trans, root, buf, 1); 360 if (ret) 361 return ret; 362 } 363 btrfs_clean_tree_block(buf); 364 *last_ref = 1; 365 } 366 return 0; 367 } 368 369 /* 370 * does the dirty work in cow of a single block. The parent block (if 371 * supplied) is updated to point to the new cow copy. The new buffer is marked 372 * dirty and returned locked. If you modify the block it needs to be marked 373 * dirty again. 374 * 375 * search_start -- an allocation hint for the new block 376 * 377 * empty_size -- a hint that you plan on doing more cow. This is the size in 378 * bytes the allocator should try to find free next to the block it returns. 379 * This is just a hint and may be ignored by the allocator. 380 */ 381 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 382 struct btrfs_root *root, 383 struct extent_buffer *buf, 384 struct extent_buffer *parent, int parent_slot, 385 struct extent_buffer **cow_ret, 386 u64 search_start, u64 empty_size, 387 enum btrfs_lock_nesting nest) 388 { 389 struct btrfs_fs_info *fs_info = root->fs_info; 390 struct btrfs_disk_key disk_key; 391 struct extent_buffer *cow; 392 int level, ret; 393 int last_ref = 0; 394 int unlock_orig = 0; 395 u64 parent_start = 0; 396 397 if (*cow_ret == buf) 398 unlock_orig = 1; 399 400 btrfs_assert_tree_write_locked(buf); 401 402 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 403 trans->transid != fs_info->running_transaction->transid); 404 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 405 trans->transid != root->last_trans); 406 407 level = btrfs_header_level(buf); 408 409 if (level == 0) 410 btrfs_item_key(buf, &disk_key, 0); 411 else 412 btrfs_node_key(buf, &disk_key, 0); 413 414 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 415 parent_start = parent->start; 416 417 cow = btrfs_alloc_tree_block(trans, root, parent_start, 418 root->root_key.objectid, &disk_key, level, 419 search_start, empty_size, nest); 420 if (IS_ERR(cow)) 421 return PTR_ERR(cow); 422 423 /* cow is set to blocking by btrfs_init_new_buffer */ 424 425 copy_extent_buffer_full(cow, buf); 426 btrfs_set_header_bytenr(cow, cow->start); 427 btrfs_set_header_generation(cow, trans->transid); 428 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 429 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 430 BTRFS_HEADER_FLAG_RELOC); 431 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 432 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 433 else 434 btrfs_set_header_owner(cow, root->root_key.objectid); 435 436 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 437 438 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 439 if (ret) { 440 btrfs_tree_unlock(cow); 441 free_extent_buffer(cow); 442 btrfs_abort_transaction(trans, ret); 443 return ret; 444 } 445 446 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 447 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 448 if (ret) { 449 btrfs_tree_unlock(cow); 450 free_extent_buffer(cow); 451 btrfs_abort_transaction(trans, ret); 452 return ret; 453 } 454 } 455 456 if (buf == root->node) { 457 WARN_ON(parent && parent != buf); 458 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 459 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 460 parent_start = buf->start; 461 462 atomic_inc(&cow->refs); 463 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 464 BUG_ON(ret < 0); 465 rcu_assign_pointer(root->node, cow); 466 467 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 468 parent_start, last_ref); 469 free_extent_buffer(buf); 470 add_root_to_dirty_list(root); 471 } else { 472 WARN_ON(trans->transid != btrfs_header_generation(parent)); 473 btrfs_tree_mod_log_insert_key(parent, parent_slot, 474 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 475 btrfs_set_node_blockptr(parent, parent_slot, 476 cow->start); 477 btrfs_set_node_ptr_generation(parent, parent_slot, 478 trans->transid); 479 btrfs_mark_buffer_dirty(parent); 480 if (last_ref) { 481 ret = btrfs_tree_mod_log_free_eb(buf); 482 if (ret) { 483 btrfs_tree_unlock(cow); 484 free_extent_buffer(cow); 485 btrfs_abort_transaction(trans, ret); 486 return ret; 487 } 488 } 489 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 490 parent_start, last_ref); 491 } 492 if (unlock_orig) 493 btrfs_tree_unlock(buf); 494 free_extent_buffer_stale(buf); 495 btrfs_mark_buffer_dirty(cow); 496 *cow_ret = cow; 497 return 0; 498 } 499 500 static inline int should_cow_block(struct btrfs_trans_handle *trans, 501 struct btrfs_root *root, 502 struct extent_buffer *buf) 503 { 504 if (btrfs_is_testing(root->fs_info)) 505 return 0; 506 507 /* Ensure we can see the FORCE_COW bit */ 508 smp_mb__before_atomic(); 509 510 /* 511 * We do not need to cow a block if 512 * 1) this block is not created or changed in this transaction; 513 * 2) this block does not belong to TREE_RELOC tree; 514 * 3) the root is not forced COW. 515 * 516 * What is forced COW: 517 * when we create snapshot during committing the transaction, 518 * after we've finished copying src root, we must COW the shared 519 * block to ensure the metadata consistency. 520 */ 521 if (btrfs_header_generation(buf) == trans->transid && 522 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 523 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 524 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 525 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 526 return 0; 527 return 1; 528 } 529 530 /* 531 * cows a single block, see __btrfs_cow_block for the real work. 532 * This version of it has extra checks so that a block isn't COWed more than 533 * once per transaction, as long as it hasn't been written yet 534 */ 535 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 536 struct btrfs_root *root, struct extent_buffer *buf, 537 struct extent_buffer *parent, int parent_slot, 538 struct extent_buffer **cow_ret, 539 enum btrfs_lock_nesting nest) 540 { 541 struct btrfs_fs_info *fs_info = root->fs_info; 542 u64 search_start; 543 int ret; 544 545 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 546 btrfs_err(fs_info, 547 "COW'ing blocks on a fs root that's being dropped"); 548 549 if (trans->transaction != fs_info->running_transaction) 550 WARN(1, KERN_CRIT "trans %llu running %llu\n", 551 trans->transid, 552 fs_info->running_transaction->transid); 553 554 if (trans->transid != fs_info->generation) 555 WARN(1, KERN_CRIT "trans %llu running %llu\n", 556 trans->transid, fs_info->generation); 557 558 if (!should_cow_block(trans, root, buf)) { 559 *cow_ret = buf; 560 return 0; 561 } 562 563 search_start = buf->start & ~((u64)SZ_1G - 1); 564 565 /* 566 * Before CoWing this block for later modification, check if it's 567 * the subtree root and do the delayed subtree trace if needed. 568 * 569 * Also We don't care about the error, as it's handled internally. 570 */ 571 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 572 ret = __btrfs_cow_block(trans, root, buf, parent, 573 parent_slot, cow_ret, search_start, 0, nest); 574 575 trace_btrfs_cow_block(root, buf, *cow_ret); 576 577 return ret; 578 } 579 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 580 581 /* 582 * helper function for defrag to decide if two blocks pointed to by a 583 * node are actually close by 584 */ 585 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 586 { 587 if (blocknr < other && other - (blocknr + blocksize) < 32768) 588 return 1; 589 if (blocknr > other && blocknr - (other + blocksize) < 32768) 590 return 1; 591 return 0; 592 } 593 594 #ifdef __LITTLE_ENDIAN 595 596 /* 597 * Compare two keys, on little-endian the disk order is same as CPU order and 598 * we can avoid the conversion. 599 */ 600 static int comp_keys(const struct btrfs_disk_key *disk_key, 601 const struct btrfs_key *k2) 602 { 603 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 604 605 return btrfs_comp_cpu_keys(k1, k2); 606 } 607 608 #else 609 610 /* 611 * compare two keys in a memcmp fashion 612 */ 613 static int comp_keys(const struct btrfs_disk_key *disk, 614 const struct btrfs_key *k2) 615 { 616 struct btrfs_key k1; 617 618 btrfs_disk_key_to_cpu(&k1, disk); 619 620 return btrfs_comp_cpu_keys(&k1, k2); 621 } 622 #endif 623 624 /* 625 * same as comp_keys only with two btrfs_key's 626 */ 627 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 628 { 629 if (k1->objectid > k2->objectid) 630 return 1; 631 if (k1->objectid < k2->objectid) 632 return -1; 633 if (k1->type > k2->type) 634 return 1; 635 if (k1->type < k2->type) 636 return -1; 637 if (k1->offset > k2->offset) 638 return 1; 639 if (k1->offset < k2->offset) 640 return -1; 641 return 0; 642 } 643 644 /* 645 * this is used by the defrag code to go through all the 646 * leaves pointed to by a node and reallocate them so that 647 * disk order is close to key order 648 */ 649 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 650 struct btrfs_root *root, struct extent_buffer *parent, 651 int start_slot, u64 *last_ret, 652 struct btrfs_key *progress) 653 { 654 struct btrfs_fs_info *fs_info = root->fs_info; 655 struct extent_buffer *cur; 656 u64 blocknr; 657 u64 search_start = *last_ret; 658 u64 last_block = 0; 659 u64 other; 660 u32 parent_nritems; 661 int end_slot; 662 int i; 663 int err = 0; 664 u32 blocksize; 665 int progress_passed = 0; 666 struct btrfs_disk_key disk_key; 667 668 WARN_ON(trans->transaction != fs_info->running_transaction); 669 WARN_ON(trans->transid != fs_info->generation); 670 671 parent_nritems = btrfs_header_nritems(parent); 672 blocksize = fs_info->nodesize; 673 end_slot = parent_nritems - 1; 674 675 if (parent_nritems <= 1) 676 return 0; 677 678 for (i = start_slot; i <= end_slot; i++) { 679 int close = 1; 680 681 btrfs_node_key(parent, &disk_key, i); 682 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 683 continue; 684 685 progress_passed = 1; 686 blocknr = btrfs_node_blockptr(parent, i); 687 if (last_block == 0) 688 last_block = blocknr; 689 690 if (i > 0) { 691 other = btrfs_node_blockptr(parent, i - 1); 692 close = close_blocks(blocknr, other, blocksize); 693 } 694 if (!close && i < end_slot) { 695 other = btrfs_node_blockptr(parent, i + 1); 696 close = close_blocks(blocknr, other, blocksize); 697 } 698 if (close) { 699 last_block = blocknr; 700 continue; 701 } 702 703 cur = btrfs_read_node_slot(parent, i); 704 if (IS_ERR(cur)) 705 return PTR_ERR(cur); 706 if (search_start == 0) 707 search_start = last_block; 708 709 btrfs_tree_lock(cur); 710 err = __btrfs_cow_block(trans, root, cur, parent, i, 711 &cur, search_start, 712 min(16 * blocksize, 713 (end_slot - i) * blocksize), 714 BTRFS_NESTING_COW); 715 if (err) { 716 btrfs_tree_unlock(cur); 717 free_extent_buffer(cur); 718 break; 719 } 720 search_start = cur->start; 721 last_block = cur->start; 722 *last_ret = search_start; 723 btrfs_tree_unlock(cur); 724 free_extent_buffer(cur); 725 } 726 return err; 727 } 728 729 /* 730 * Search for a key in the given extent_buffer. 731 * 732 * The lower boundary for the search is specified by the slot number @low. Use a 733 * value of 0 to search over the whole extent buffer. 734 * 735 * The slot in the extent buffer is returned via @slot. If the key exists in the 736 * extent buffer, then @slot will point to the slot where the key is, otherwise 737 * it points to the slot where you would insert the key. 738 * 739 * Slot may point to the total number of items (i.e. one position beyond the last 740 * key) if the key is bigger than the last key in the extent buffer. 741 */ 742 static noinline int generic_bin_search(struct extent_buffer *eb, int low, 743 const struct btrfs_key *key, int *slot) 744 { 745 unsigned long p; 746 int item_size; 747 int high = btrfs_header_nritems(eb); 748 int ret; 749 const int key_size = sizeof(struct btrfs_disk_key); 750 751 if (low > high) { 752 btrfs_err(eb->fs_info, 753 "%s: low (%d) > high (%d) eb %llu owner %llu level %d", 754 __func__, low, high, eb->start, 755 btrfs_header_owner(eb), btrfs_header_level(eb)); 756 return -EINVAL; 757 } 758 759 if (btrfs_header_level(eb) == 0) { 760 p = offsetof(struct btrfs_leaf, items); 761 item_size = sizeof(struct btrfs_item); 762 } else { 763 p = offsetof(struct btrfs_node, ptrs); 764 item_size = sizeof(struct btrfs_key_ptr); 765 } 766 767 while (low < high) { 768 unsigned long oip; 769 unsigned long offset; 770 struct btrfs_disk_key *tmp; 771 struct btrfs_disk_key unaligned; 772 int mid; 773 774 mid = (low + high) / 2; 775 offset = p + mid * item_size; 776 oip = offset_in_page(offset); 777 778 if (oip + key_size <= PAGE_SIZE) { 779 const unsigned long idx = get_eb_page_index(offset); 780 char *kaddr = page_address(eb->pages[idx]); 781 782 oip = get_eb_offset_in_page(eb, offset); 783 tmp = (struct btrfs_disk_key *)(kaddr + oip); 784 } else { 785 read_extent_buffer(eb, &unaligned, offset, key_size); 786 tmp = &unaligned; 787 } 788 789 ret = comp_keys(tmp, key); 790 791 if (ret < 0) 792 low = mid + 1; 793 else if (ret > 0) 794 high = mid; 795 else { 796 *slot = mid; 797 return 0; 798 } 799 } 800 *slot = low; 801 return 1; 802 } 803 804 /* 805 * Simple binary search on an extent buffer. Works for both leaves and nodes, and 806 * always searches over the whole range of keys (slot 0 to slot 'nritems - 1'). 807 */ 808 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, 809 int *slot) 810 { 811 return generic_bin_search(eb, 0, key, slot); 812 } 813 814 static void root_add_used(struct btrfs_root *root, u32 size) 815 { 816 spin_lock(&root->accounting_lock); 817 btrfs_set_root_used(&root->root_item, 818 btrfs_root_used(&root->root_item) + size); 819 spin_unlock(&root->accounting_lock); 820 } 821 822 static void root_sub_used(struct btrfs_root *root, u32 size) 823 { 824 spin_lock(&root->accounting_lock); 825 btrfs_set_root_used(&root->root_item, 826 btrfs_root_used(&root->root_item) - size); 827 spin_unlock(&root->accounting_lock); 828 } 829 830 /* given a node and slot number, this reads the blocks it points to. The 831 * extent buffer is returned with a reference taken (but unlocked). 832 */ 833 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 834 int slot) 835 { 836 int level = btrfs_header_level(parent); 837 struct extent_buffer *eb; 838 struct btrfs_key first_key; 839 840 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 841 return ERR_PTR(-ENOENT); 842 843 BUG_ON(level == 0); 844 845 btrfs_node_key_to_cpu(parent, &first_key, slot); 846 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 847 btrfs_header_owner(parent), 848 btrfs_node_ptr_generation(parent, slot), 849 level - 1, &first_key); 850 if (IS_ERR(eb)) 851 return eb; 852 if (!extent_buffer_uptodate(eb)) { 853 free_extent_buffer(eb); 854 return ERR_PTR(-EIO); 855 } 856 857 return eb; 858 } 859 860 /* 861 * node level balancing, used to make sure nodes are in proper order for 862 * item deletion. We balance from the top down, so we have to make sure 863 * that a deletion won't leave an node completely empty later on. 864 */ 865 static noinline int balance_level(struct btrfs_trans_handle *trans, 866 struct btrfs_root *root, 867 struct btrfs_path *path, int level) 868 { 869 struct btrfs_fs_info *fs_info = root->fs_info; 870 struct extent_buffer *right = NULL; 871 struct extent_buffer *mid; 872 struct extent_buffer *left = NULL; 873 struct extent_buffer *parent = NULL; 874 int ret = 0; 875 int wret; 876 int pslot; 877 int orig_slot = path->slots[level]; 878 u64 orig_ptr; 879 880 ASSERT(level > 0); 881 882 mid = path->nodes[level]; 883 884 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 885 WARN_ON(btrfs_header_generation(mid) != trans->transid); 886 887 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 888 889 if (level < BTRFS_MAX_LEVEL - 1) { 890 parent = path->nodes[level + 1]; 891 pslot = path->slots[level + 1]; 892 } 893 894 /* 895 * deal with the case where there is only one pointer in the root 896 * by promoting the node below to a root 897 */ 898 if (!parent) { 899 struct extent_buffer *child; 900 901 if (btrfs_header_nritems(mid) != 1) 902 return 0; 903 904 /* promote the child to a root */ 905 child = btrfs_read_node_slot(mid, 0); 906 if (IS_ERR(child)) { 907 ret = PTR_ERR(child); 908 btrfs_handle_fs_error(fs_info, ret, NULL); 909 goto enospc; 910 } 911 912 btrfs_tree_lock(child); 913 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 914 BTRFS_NESTING_COW); 915 if (ret) { 916 btrfs_tree_unlock(child); 917 free_extent_buffer(child); 918 goto enospc; 919 } 920 921 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 922 BUG_ON(ret < 0); 923 rcu_assign_pointer(root->node, child); 924 925 add_root_to_dirty_list(root); 926 btrfs_tree_unlock(child); 927 928 path->locks[level] = 0; 929 path->nodes[level] = NULL; 930 btrfs_clean_tree_block(mid); 931 btrfs_tree_unlock(mid); 932 /* once for the path */ 933 free_extent_buffer(mid); 934 935 root_sub_used(root, mid->len); 936 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 937 /* once for the root ptr */ 938 free_extent_buffer_stale(mid); 939 return 0; 940 } 941 if (btrfs_header_nritems(mid) > 942 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 943 return 0; 944 945 left = btrfs_read_node_slot(parent, pslot - 1); 946 if (IS_ERR(left)) 947 left = NULL; 948 949 if (left) { 950 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 951 wret = btrfs_cow_block(trans, root, left, 952 parent, pslot - 1, &left, 953 BTRFS_NESTING_LEFT_COW); 954 if (wret) { 955 ret = wret; 956 goto enospc; 957 } 958 } 959 960 right = btrfs_read_node_slot(parent, pslot + 1); 961 if (IS_ERR(right)) 962 right = NULL; 963 964 if (right) { 965 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 966 wret = btrfs_cow_block(trans, root, right, 967 parent, pslot + 1, &right, 968 BTRFS_NESTING_RIGHT_COW); 969 if (wret) { 970 ret = wret; 971 goto enospc; 972 } 973 } 974 975 /* first, try to make some room in the middle buffer */ 976 if (left) { 977 orig_slot += btrfs_header_nritems(left); 978 wret = push_node_left(trans, left, mid, 1); 979 if (wret < 0) 980 ret = wret; 981 } 982 983 /* 984 * then try to empty the right most buffer into the middle 985 */ 986 if (right) { 987 wret = push_node_left(trans, mid, right, 1); 988 if (wret < 0 && wret != -ENOSPC) 989 ret = wret; 990 if (btrfs_header_nritems(right) == 0) { 991 btrfs_clean_tree_block(right); 992 btrfs_tree_unlock(right); 993 del_ptr(root, path, level + 1, pslot + 1); 994 root_sub_used(root, right->len); 995 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 996 0, 1); 997 free_extent_buffer_stale(right); 998 right = NULL; 999 } else { 1000 struct btrfs_disk_key right_key; 1001 btrfs_node_key(right, &right_key, 0); 1002 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1003 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1004 BUG_ON(ret < 0); 1005 btrfs_set_node_key(parent, &right_key, pslot + 1); 1006 btrfs_mark_buffer_dirty(parent); 1007 } 1008 } 1009 if (btrfs_header_nritems(mid) == 1) { 1010 /* 1011 * we're not allowed to leave a node with one item in the 1012 * tree during a delete. A deletion from lower in the tree 1013 * could try to delete the only pointer in this node. 1014 * So, pull some keys from the left. 1015 * There has to be a left pointer at this point because 1016 * otherwise we would have pulled some pointers from the 1017 * right 1018 */ 1019 if (!left) { 1020 ret = -EROFS; 1021 btrfs_handle_fs_error(fs_info, ret, NULL); 1022 goto enospc; 1023 } 1024 wret = balance_node_right(trans, mid, left); 1025 if (wret < 0) { 1026 ret = wret; 1027 goto enospc; 1028 } 1029 if (wret == 1) { 1030 wret = push_node_left(trans, left, mid, 1); 1031 if (wret < 0) 1032 ret = wret; 1033 } 1034 BUG_ON(wret == 1); 1035 } 1036 if (btrfs_header_nritems(mid) == 0) { 1037 btrfs_clean_tree_block(mid); 1038 btrfs_tree_unlock(mid); 1039 del_ptr(root, path, level + 1, pslot); 1040 root_sub_used(root, mid->len); 1041 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1042 free_extent_buffer_stale(mid); 1043 mid = NULL; 1044 } else { 1045 /* update the parent key to reflect our changes */ 1046 struct btrfs_disk_key mid_key; 1047 btrfs_node_key(mid, &mid_key, 0); 1048 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1049 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1050 BUG_ON(ret < 0); 1051 btrfs_set_node_key(parent, &mid_key, pslot); 1052 btrfs_mark_buffer_dirty(parent); 1053 } 1054 1055 /* update the path */ 1056 if (left) { 1057 if (btrfs_header_nritems(left) > orig_slot) { 1058 atomic_inc(&left->refs); 1059 /* left was locked after cow */ 1060 path->nodes[level] = left; 1061 path->slots[level + 1] -= 1; 1062 path->slots[level] = orig_slot; 1063 if (mid) { 1064 btrfs_tree_unlock(mid); 1065 free_extent_buffer(mid); 1066 } 1067 } else { 1068 orig_slot -= btrfs_header_nritems(left); 1069 path->slots[level] = orig_slot; 1070 } 1071 } 1072 /* double check we haven't messed things up */ 1073 if (orig_ptr != 1074 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1075 BUG(); 1076 enospc: 1077 if (right) { 1078 btrfs_tree_unlock(right); 1079 free_extent_buffer(right); 1080 } 1081 if (left) { 1082 if (path->nodes[level] != left) 1083 btrfs_tree_unlock(left); 1084 free_extent_buffer(left); 1085 } 1086 return ret; 1087 } 1088 1089 /* Node balancing for insertion. Here we only split or push nodes around 1090 * when they are completely full. This is also done top down, so we 1091 * have to be pessimistic. 1092 */ 1093 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1094 struct btrfs_root *root, 1095 struct btrfs_path *path, int level) 1096 { 1097 struct btrfs_fs_info *fs_info = root->fs_info; 1098 struct extent_buffer *right = NULL; 1099 struct extent_buffer *mid; 1100 struct extent_buffer *left = NULL; 1101 struct extent_buffer *parent = NULL; 1102 int ret = 0; 1103 int wret; 1104 int pslot; 1105 int orig_slot = path->slots[level]; 1106 1107 if (level == 0) 1108 return 1; 1109 1110 mid = path->nodes[level]; 1111 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1112 1113 if (level < BTRFS_MAX_LEVEL - 1) { 1114 parent = path->nodes[level + 1]; 1115 pslot = path->slots[level + 1]; 1116 } 1117 1118 if (!parent) 1119 return 1; 1120 1121 left = btrfs_read_node_slot(parent, pslot - 1); 1122 if (IS_ERR(left)) 1123 left = NULL; 1124 1125 /* first, try to make some room in the middle buffer */ 1126 if (left) { 1127 u32 left_nr; 1128 1129 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1130 1131 left_nr = btrfs_header_nritems(left); 1132 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1133 wret = 1; 1134 } else { 1135 ret = btrfs_cow_block(trans, root, left, parent, 1136 pslot - 1, &left, 1137 BTRFS_NESTING_LEFT_COW); 1138 if (ret) 1139 wret = 1; 1140 else { 1141 wret = push_node_left(trans, left, mid, 0); 1142 } 1143 } 1144 if (wret < 0) 1145 ret = wret; 1146 if (wret == 0) { 1147 struct btrfs_disk_key disk_key; 1148 orig_slot += left_nr; 1149 btrfs_node_key(mid, &disk_key, 0); 1150 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1151 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1152 BUG_ON(ret < 0); 1153 btrfs_set_node_key(parent, &disk_key, pslot); 1154 btrfs_mark_buffer_dirty(parent); 1155 if (btrfs_header_nritems(left) > orig_slot) { 1156 path->nodes[level] = left; 1157 path->slots[level + 1] -= 1; 1158 path->slots[level] = orig_slot; 1159 btrfs_tree_unlock(mid); 1160 free_extent_buffer(mid); 1161 } else { 1162 orig_slot -= 1163 btrfs_header_nritems(left); 1164 path->slots[level] = orig_slot; 1165 btrfs_tree_unlock(left); 1166 free_extent_buffer(left); 1167 } 1168 return 0; 1169 } 1170 btrfs_tree_unlock(left); 1171 free_extent_buffer(left); 1172 } 1173 right = btrfs_read_node_slot(parent, pslot + 1); 1174 if (IS_ERR(right)) 1175 right = NULL; 1176 1177 /* 1178 * then try to empty the right most buffer into the middle 1179 */ 1180 if (right) { 1181 u32 right_nr; 1182 1183 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1184 1185 right_nr = btrfs_header_nritems(right); 1186 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1187 wret = 1; 1188 } else { 1189 ret = btrfs_cow_block(trans, root, right, 1190 parent, pslot + 1, 1191 &right, BTRFS_NESTING_RIGHT_COW); 1192 if (ret) 1193 wret = 1; 1194 else { 1195 wret = balance_node_right(trans, right, mid); 1196 } 1197 } 1198 if (wret < 0) 1199 ret = wret; 1200 if (wret == 0) { 1201 struct btrfs_disk_key disk_key; 1202 1203 btrfs_node_key(right, &disk_key, 0); 1204 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1205 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1206 BUG_ON(ret < 0); 1207 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1208 btrfs_mark_buffer_dirty(parent); 1209 1210 if (btrfs_header_nritems(mid) <= orig_slot) { 1211 path->nodes[level] = right; 1212 path->slots[level + 1] += 1; 1213 path->slots[level] = orig_slot - 1214 btrfs_header_nritems(mid); 1215 btrfs_tree_unlock(mid); 1216 free_extent_buffer(mid); 1217 } else { 1218 btrfs_tree_unlock(right); 1219 free_extent_buffer(right); 1220 } 1221 return 0; 1222 } 1223 btrfs_tree_unlock(right); 1224 free_extent_buffer(right); 1225 } 1226 return 1; 1227 } 1228 1229 /* 1230 * readahead one full node of leaves, finding things that are close 1231 * to the block in 'slot', and triggering ra on them. 1232 */ 1233 static void reada_for_search(struct btrfs_fs_info *fs_info, 1234 struct btrfs_path *path, 1235 int level, int slot, u64 objectid) 1236 { 1237 struct extent_buffer *node; 1238 struct btrfs_disk_key disk_key; 1239 u32 nritems; 1240 u64 search; 1241 u64 target; 1242 u64 nread = 0; 1243 u64 nread_max; 1244 u32 nr; 1245 u32 blocksize; 1246 u32 nscan = 0; 1247 1248 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1249 return; 1250 1251 if (!path->nodes[level]) 1252 return; 1253 1254 node = path->nodes[level]; 1255 1256 /* 1257 * Since the time between visiting leaves is much shorter than the time 1258 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1259 * much IO at once (possibly random). 1260 */ 1261 if (path->reada == READA_FORWARD_ALWAYS) { 1262 if (level > 1) 1263 nread_max = node->fs_info->nodesize; 1264 else 1265 nread_max = SZ_128K; 1266 } else { 1267 nread_max = SZ_64K; 1268 } 1269 1270 search = btrfs_node_blockptr(node, slot); 1271 blocksize = fs_info->nodesize; 1272 if (path->reada != READA_FORWARD_ALWAYS) { 1273 struct extent_buffer *eb; 1274 1275 eb = find_extent_buffer(fs_info, search); 1276 if (eb) { 1277 free_extent_buffer(eb); 1278 return; 1279 } 1280 } 1281 1282 target = search; 1283 1284 nritems = btrfs_header_nritems(node); 1285 nr = slot; 1286 1287 while (1) { 1288 if (path->reada == READA_BACK) { 1289 if (nr == 0) 1290 break; 1291 nr--; 1292 } else if (path->reada == READA_FORWARD || 1293 path->reada == READA_FORWARD_ALWAYS) { 1294 nr++; 1295 if (nr >= nritems) 1296 break; 1297 } 1298 if (path->reada == READA_BACK && objectid) { 1299 btrfs_node_key(node, &disk_key, nr); 1300 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1301 break; 1302 } 1303 search = btrfs_node_blockptr(node, nr); 1304 if (path->reada == READA_FORWARD_ALWAYS || 1305 (search <= target && target - search <= 65536) || 1306 (search > target && search - target <= 65536)) { 1307 btrfs_readahead_node_child(node, nr); 1308 nread += blocksize; 1309 } 1310 nscan++; 1311 if (nread > nread_max || nscan > 32) 1312 break; 1313 } 1314 } 1315 1316 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1317 { 1318 struct extent_buffer *parent; 1319 int slot; 1320 int nritems; 1321 1322 parent = path->nodes[level + 1]; 1323 if (!parent) 1324 return; 1325 1326 nritems = btrfs_header_nritems(parent); 1327 slot = path->slots[level + 1]; 1328 1329 if (slot > 0) 1330 btrfs_readahead_node_child(parent, slot - 1); 1331 if (slot + 1 < nritems) 1332 btrfs_readahead_node_child(parent, slot + 1); 1333 } 1334 1335 1336 /* 1337 * when we walk down the tree, it is usually safe to unlock the higher layers 1338 * in the tree. The exceptions are when our path goes through slot 0, because 1339 * operations on the tree might require changing key pointers higher up in the 1340 * tree. 1341 * 1342 * callers might also have set path->keep_locks, which tells this code to keep 1343 * the lock if the path points to the last slot in the block. This is part of 1344 * walking through the tree, and selecting the next slot in the higher block. 1345 * 1346 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1347 * if lowest_unlock is 1, level 0 won't be unlocked 1348 */ 1349 static noinline void unlock_up(struct btrfs_path *path, int level, 1350 int lowest_unlock, int min_write_lock_level, 1351 int *write_lock_level) 1352 { 1353 int i; 1354 int skip_level = level; 1355 bool check_skip = true; 1356 1357 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1358 if (!path->nodes[i]) 1359 break; 1360 if (!path->locks[i]) 1361 break; 1362 1363 if (check_skip) { 1364 if (path->slots[i] == 0) { 1365 skip_level = i + 1; 1366 continue; 1367 } 1368 1369 if (path->keep_locks) { 1370 u32 nritems; 1371 1372 nritems = btrfs_header_nritems(path->nodes[i]); 1373 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1374 skip_level = i + 1; 1375 continue; 1376 } 1377 } 1378 } 1379 1380 if (i >= lowest_unlock && i > skip_level) { 1381 check_skip = false; 1382 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1383 path->locks[i] = 0; 1384 if (write_lock_level && 1385 i > min_write_lock_level && 1386 i <= *write_lock_level) { 1387 *write_lock_level = i - 1; 1388 } 1389 } 1390 } 1391 } 1392 1393 /* 1394 * Helper function for btrfs_search_slot() and other functions that do a search 1395 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1396 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1397 * its pages from disk. 1398 * 1399 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1400 * whole btree search, starting again from the current root node. 1401 */ 1402 static int 1403 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1404 struct extent_buffer **eb_ret, int level, int slot, 1405 const struct btrfs_key *key) 1406 { 1407 struct btrfs_fs_info *fs_info = root->fs_info; 1408 u64 blocknr; 1409 u64 gen; 1410 struct extent_buffer *tmp; 1411 struct btrfs_key first_key; 1412 int ret; 1413 int parent_level; 1414 bool unlock_up; 1415 1416 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1417 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1418 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1419 parent_level = btrfs_header_level(*eb_ret); 1420 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot); 1421 1422 /* 1423 * If we need to read an extent buffer from disk and we are holding locks 1424 * on upper level nodes, we unlock all the upper nodes before reading the 1425 * extent buffer, and then return -EAGAIN to the caller as it needs to 1426 * restart the search. We don't release the lock on the current level 1427 * because we need to walk this node to figure out which blocks to read. 1428 */ 1429 tmp = find_extent_buffer(fs_info, blocknr); 1430 if (tmp) { 1431 if (p->reada == READA_FORWARD_ALWAYS) 1432 reada_for_search(fs_info, p, level, slot, key->objectid); 1433 1434 /* first we do an atomic uptodate check */ 1435 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1436 /* 1437 * Do extra check for first_key, eb can be stale due to 1438 * being cached, read from scrub, or have multiple 1439 * parents (shared tree blocks). 1440 */ 1441 if (btrfs_verify_level_key(tmp, 1442 parent_level - 1, &first_key, gen)) { 1443 free_extent_buffer(tmp); 1444 return -EUCLEAN; 1445 } 1446 *eb_ret = tmp; 1447 return 0; 1448 } 1449 1450 if (unlock_up) 1451 btrfs_unlock_up_safe(p, level + 1); 1452 1453 /* now we're allowed to do a blocking uptodate check */ 1454 ret = btrfs_read_extent_buffer(tmp, gen, parent_level - 1, &first_key); 1455 if (ret) { 1456 free_extent_buffer(tmp); 1457 btrfs_release_path(p); 1458 return -EIO; 1459 } 1460 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1461 free_extent_buffer(tmp); 1462 btrfs_release_path(p); 1463 return -EUCLEAN; 1464 } 1465 1466 if (unlock_up) 1467 ret = -EAGAIN; 1468 1469 goto out; 1470 } 1471 1472 if (unlock_up) { 1473 btrfs_unlock_up_safe(p, level + 1); 1474 ret = -EAGAIN; 1475 } else { 1476 ret = 0; 1477 } 1478 1479 if (p->reada != READA_NONE) 1480 reada_for_search(fs_info, p, level, slot, key->objectid); 1481 1482 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid, 1483 gen, parent_level - 1, &first_key); 1484 if (IS_ERR(tmp)) { 1485 btrfs_release_path(p); 1486 return PTR_ERR(tmp); 1487 } 1488 /* 1489 * If the read above didn't mark this buffer up to date, 1490 * it will never end up being up to date. Set ret to EIO now 1491 * and give up so that our caller doesn't loop forever 1492 * on our EAGAINs. 1493 */ 1494 if (!extent_buffer_uptodate(tmp)) 1495 ret = -EIO; 1496 1497 out: 1498 if (ret == 0) { 1499 *eb_ret = tmp; 1500 } else { 1501 free_extent_buffer(tmp); 1502 btrfs_release_path(p); 1503 } 1504 1505 return ret; 1506 } 1507 1508 /* 1509 * helper function for btrfs_search_slot. This does all of the checks 1510 * for node-level blocks and does any balancing required based on 1511 * the ins_len. 1512 * 1513 * If no extra work was required, zero is returned. If we had to 1514 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1515 * start over 1516 */ 1517 static int 1518 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1519 struct btrfs_root *root, struct btrfs_path *p, 1520 struct extent_buffer *b, int level, int ins_len, 1521 int *write_lock_level) 1522 { 1523 struct btrfs_fs_info *fs_info = root->fs_info; 1524 int ret = 0; 1525 1526 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1527 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1528 1529 if (*write_lock_level < level + 1) { 1530 *write_lock_level = level + 1; 1531 btrfs_release_path(p); 1532 return -EAGAIN; 1533 } 1534 1535 reada_for_balance(p, level); 1536 ret = split_node(trans, root, p, level); 1537 1538 b = p->nodes[level]; 1539 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1540 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1541 1542 if (*write_lock_level < level + 1) { 1543 *write_lock_level = level + 1; 1544 btrfs_release_path(p); 1545 return -EAGAIN; 1546 } 1547 1548 reada_for_balance(p, level); 1549 ret = balance_level(trans, root, p, level); 1550 if (ret) 1551 return ret; 1552 1553 b = p->nodes[level]; 1554 if (!b) { 1555 btrfs_release_path(p); 1556 return -EAGAIN; 1557 } 1558 BUG_ON(btrfs_header_nritems(b) == 1); 1559 } 1560 return ret; 1561 } 1562 1563 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1564 u64 iobjectid, u64 ioff, u8 key_type, 1565 struct btrfs_key *found_key) 1566 { 1567 int ret; 1568 struct btrfs_key key; 1569 struct extent_buffer *eb; 1570 1571 ASSERT(path); 1572 ASSERT(found_key); 1573 1574 key.type = key_type; 1575 key.objectid = iobjectid; 1576 key.offset = ioff; 1577 1578 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1579 if (ret < 0) 1580 return ret; 1581 1582 eb = path->nodes[0]; 1583 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1584 ret = btrfs_next_leaf(fs_root, path); 1585 if (ret) 1586 return ret; 1587 eb = path->nodes[0]; 1588 } 1589 1590 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1591 if (found_key->type != key.type || 1592 found_key->objectid != key.objectid) 1593 return 1; 1594 1595 return 0; 1596 } 1597 1598 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1599 struct btrfs_path *p, 1600 int write_lock_level) 1601 { 1602 struct extent_buffer *b; 1603 int root_lock = 0; 1604 int level = 0; 1605 1606 if (p->search_commit_root) { 1607 b = root->commit_root; 1608 atomic_inc(&b->refs); 1609 level = btrfs_header_level(b); 1610 /* 1611 * Ensure that all callers have set skip_locking when 1612 * p->search_commit_root = 1. 1613 */ 1614 ASSERT(p->skip_locking == 1); 1615 1616 goto out; 1617 } 1618 1619 if (p->skip_locking) { 1620 b = btrfs_root_node(root); 1621 level = btrfs_header_level(b); 1622 goto out; 1623 } 1624 1625 /* We try very hard to do read locks on the root */ 1626 root_lock = BTRFS_READ_LOCK; 1627 1628 /* 1629 * If the level is set to maximum, we can skip trying to get the read 1630 * lock. 1631 */ 1632 if (write_lock_level < BTRFS_MAX_LEVEL) { 1633 /* 1634 * We don't know the level of the root node until we actually 1635 * have it read locked 1636 */ 1637 b = btrfs_read_lock_root_node(root); 1638 level = btrfs_header_level(b); 1639 if (level > write_lock_level) 1640 goto out; 1641 1642 /* Whoops, must trade for write lock */ 1643 btrfs_tree_read_unlock(b); 1644 free_extent_buffer(b); 1645 } 1646 1647 b = btrfs_lock_root_node(root); 1648 root_lock = BTRFS_WRITE_LOCK; 1649 1650 /* The level might have changed, check again */ 1651 level = btrfs_header_level(b); 1652 1653 out: 1654 /* 1655 * The root may have failed to write out at some point, and thus is no 1656 * longer valid, return an error in this case. 1657 */ 1658 if (!extent_buffer_uptodate(b)) { 1659 if (root_lock) 1660 btrfs_tree_unlock_rw(b, root_lock); 1661 free_extent_buffer(b); 1662 return ERR_PTR(-EIO); 1663 } 1664 1665 p->nodes[level] = b; 1666 if (!p->skip_locking) 1667 p->locks[level] = root_lock; 1668 /* 1669 * Callers are responsible for dropping b's references. 1670 */ 1671 return b; 1672 } 1673 1674 /* 1675 * Replace the extent buffer at the lowest level of the path with a cloned 1676 * version. The purpose is to be able to use it safely, after releasing the 1677 * commit root semaphore, even if relocation is happening in parallel, the 1678 * transaction used for relocation is committed and the extent buffer is 1679 * reallocated in the next transaction. 1680 * 1681 * This is used in a context where the caller does not prevent transaction 1682 * commits from happening, either by holding a transaction handle or holding 1683 * some lock, while it's doing searches through a commit root. 1684 * At the moment it's only used for send operations. 1685 */ 1686 static int finish_need_commit_sem_search(struct btrfs_path *path) 1687 { 1688 const int i = path->lowest_level; 1689 const int slot = path->slots[i]; 1690 struct extent_buffer *lowest = path->nodes[i]; 1691 struct extent_buffer *clone; 1692 1693 ASSERT(path->need_commit_sem); 1694 1695 if (!lowest) 1696 return 0; 1697 1698 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1699 1700 clone = btrfs_clone_extent_buffer(lowest); 1701 if (!clone) 1702 return -ENOMEM; 1703 1704 btrfs_release_path(path); 1705 path->nodes[i] = clone; 1706 path->slots[i] = slot; 1707 1708 return 0; 1709 } 1710 1711 static inline int search_for_key_slot(struct extent_buffer *eb, 1712 int search_low_slot, 1713 const struct btrfs_key *key, 1714 int prev_cmp, 1715 int *slot) 1716 { 1717 /* 1718 * If a previous call to btrfs_bin_search() on a parent node returned an 1719 * exact match (prev_cmp == 0), we can safely assume the target key will 1720 * always be at slot 0 on lower levels, since each key pointer 1721 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1722 * subtree it points to. Thus we can skip searching lower levels. 1723 */ 1724 if (prev_cmp == 0) { 1725 *slot = 0; 1726 return 0; 1727 } 1728 1729 return generic_bin_search(eb, search_low_slot, key, slot); 1730 } 1731 1732 static int search_leaf(struct btrfs_trans_handle *trans, 1733 struct btrfs_root *root, 1734 const struct btrfs_key *key, 1735 struct btrfs_path *path, 1736 int ins_len, 1737 int prev_cmp) 1738 { 1739 struct extent_buffer *leaf = path->nodes[0]; 1740 int leaf_free_space = -1; 1741 int search_low_slot = 0; 1742 int ret; 1743 bool do_bin_search = true; 1744 1745 /* 1746 * If we are doing an insertion, the leaf has enough free space and the 1747 * destination slot for the key is not slot 0, then we can unlock our 1748 * write lock on the parent, and any other upper nodes, before doing the 1749 * binary search on the leaf (with search_for_key_slot()), allowing other 1750 * tasks to lock the parent and any other upper nodes. 1751 */ 1752 if (ins_len > 0) { 1753 /* 1754 * Cache the leaf free space, since we will need it later and it 1755 * will not change until then. 1756 */ 1757 leaf_free_space = btrfs_leaf_free_space(leaf); 1758 1759 /* 1760 * !path->locks[1] means we have a single node tree, the leaf is 1761 * the root of the tree. 1762 */ 1763 if (path->locks[1] && leaf_free_space >= ins_len) { 1764 struct btrfs_disk_key first_key; 1765 1766 ASSERT(btrfs_header_nritems(leaf) > 0); 1767 btrfs_item_key(leaf, &first_key, 0); 1768 1769 /* 1770 * Doing the extra comparison with the first key is cheap, 1771 * taking into account that the first key is very likely 1772 * already in a cache line because it immediately follows 1773 * the extent buffer's header and we have recently accessed 1774 * the header's level field. 1775 */ 1776 ret = comp_keys(&first_key, key); 1777 if (ret < 0) { 1778 /* 1779 * The first key is smaller than the key we want 1780 * to insert, so we are safe to unlock all upper 1781 * nodes and we have to do the binary search. 1782 * 1783 * We do use btrfs_unlock_up_safe() and not 1784 * unlock_up() because the later does not unlock 1785 * nodes with a slot of 0 - we can safely unlock 1786 * any node even if its slot is 0 since in this 1787 * case the key does not end up at slot 0 of the 1788 * leaf and there's no need to split the leaf. 1789 */ 1790 btrfs_unlock_up_safe(path, 1); 1791 search_low_slot = 1; 1792 } else { 1793 /* 1794 * The first key is >= then the key we want to 1795 * insert, so we can skip the binary search as 1796 * the target key will be at slot 0. 1797 * 1798 * We can not unlock upper nodes when the key is 1799 * less than the first key, because we will need 1800 * to update the key at slot 0 of the parent node 1801 * and possibly of other upper nodes too. 1802 * If the key matches the first key, then we can 1803 * unlock all the upper nodes, using 1804 * btrfs_unlock_up_safe() instead of unlock_up() 1805 * as stated above. 1806 */ 1807 if (ret == 0) 1808 btrfs_unlock_up_safe(path, 1); 1809 /* 1810 * ret is already 0 or 1, matching the result of 1811 * a btrfs_bin_search() call, so there is no need 1812 * to adjust it. 1813 */ 1814 do_bin_search = false; 1815 path->slots[0] = 0; 1816 } 1817 } 1818 } 1819 1820 if (do_bin_search) { 1821 ret = search_for_key_slot(leaf, search_low_slot, key, 1822 prev_cmp, &path->slots[0]); 1823 if (ret < 0) 1824 return ret; 1825 } 1826 1827 if (ins_len > 0) { 1828 /* 1829 * Item key already exists. In this case, if we are allowed to 1830 * insert the item (for example, in dir_item case, item key 1831 * collision is allowed), it will be merged with the original 1832 * item. Only the item size grows, no new btrfs item will be 1833 * added. If search_for_extension is not set, ins_len already 1834 * accounts the size btrfs_item, deduct it here so leaf space 1835 * check will be correct. 1836 */ 1837 if (ret == 0 && !path->search_for_extension) { 1838 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1839 ins_len -= sizeof(struct btrfs_item); 1840 } 1841 1842 ASSERT(leaf_free_space >= 0); 1843 1844 if (leaf_free_space < ins_len) { 1845 int err; 1846 1847 err = split_leaf(trans, root, key, path, ins_len, 1848 (ret == 0)); 1849 ASSERT(err <= 0); 1850 if (WARN_ON(err > 0)) 1851 err = -EUCLEAN; 1852 if (err) 1853 ret = err; 1854 } 1855 } 1856 1857 return ret; 1858 } 1859 1860 /* 1861 * btrfs_search_slot - look for a key in a tree and perform necessary 1862 * modifications to preserve tree invariants. 1863 * 1864 * @trans: Handle of transaction, used when modifying the tree 1865 * @p: Holds all btree nodes along the search path 1866 * @root: The root node of the tree 1867 * @key: The key we are looking for 1868 * @ins_len: Indicates purpose of search: 1869 * >0 for inserts it's size of item inserted (*) 1870 * <0 for deletions 1871 * 0 for plain searches, not modifying the tree 1872 * 1873 * (*) If size of item inserted doesn't include 1874 * sizeof(struct btrfs_item), then p->search_for_extension must 1875 * be set. 1876 * @cow: boolean should CoW operations be performed. Must always be 1 1877 * when modifying the tree. 1878 * 1879 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 1880 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 1881 * 1882 * If @key is found, 0 is returned and you can find the item in the leaf level 1883 * of the path (level 0) 1884 * 1885 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 1886 * points to the slot where it should be inserted 1887 * 1888 * If an error is encountered while searching the tree a negative error number 1889 * is returned 1890 */ 1891 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1892 const struct btrfs_key *key, struct btrfs_path *p, 1893 int ins_len, int cow) 1894 { 1895 struct btrfs_fs_info *fs_info = root->fs_info; 1896 struct extent_buffer *b; 1897 int slot; 1898 int ret; 1899 int err; 1900 int level; 1901 int lowest_unlock = 1; 1902 /* everything at write_lock_level or lower must be write locked */ 1903 int write_lock_level = 0; 1904 u8 lowest_level = 0; 1905 int min_write_lock_level; 1906 int prev_cmp; 1907 1908 lowest_level = p->lowest_level; 1909 WARN_ON(lowest_level && ins_len > 0); 1910 WARN_ON(p->nodes[0] != NULL); 1911 BUG_ON(!cow && ins_len); 1912 1913 if (ins_len < 0) { 1914 lowest_unlock = 2; 1915 1916 /* when we are removing items, we might have to go up to level 1917 * two as we update tree pointers Make sure we keep write 1918 * for those levels as well 1919 */ 1920 write_lock_level = 2; 1921 } else if (ins_len > 0) { 1922 /* 1923 * for inserting items, make sure we have a write lock on 1924 * level 1 so we can update keys 1925 */ 1926 write_lock_level = 1; 1927 } 1928 1929 if (!cow) 1930 write_lock_level = -1; 1931 1932 if (cow && (p->keep_locks || p->lowest_level)) 1933 write_lock_level = BTRFS_MAX_LEVEL; 1934 1935 min_write_lock_level = write_lock_level; 1936 1937 if (p->need_commit_sem) { 1938 ASSERT(p->search_commit_root); 1939 down_read(&fs_info->commit_root_sem); 1940 } 1941 1942 again: 1943 prev_cmp = -1; 1944 b = btrfs_search_slot_get_root(root, p, write_lock_level); 1945 if (IS_ERR(b)) { 1946 ret = PTR_ERR(b); 1947 goto done; 1948 } 1949 1950 while (b) { 1951 int dec = 0; 1952 1953 level = btrfs_header_level(b); 1954 1955 if (cow) { 1956 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 1957 1958 /* 1959 * if we don't really need to cow this block 1960 * then we don't want to set the path blocking, 1961 * so we test it here 1962 */ 1963 if (!should_cow_block(trans, root, b)) 1964 goto cow_done; 1965 1966 /* 1967 * must have write locks on this node and the 1968 * parent 1969 */ 1970 if (level > write_lock_level || 1971 (level + 1 > write_lock_level && 1972 level + 1 < BTRFS_MAX_LEVEL && 1973 p->nodes[level + 1])) { 1974 write_lock_level = level + 1; 1975 btrfs_release_path(p); 1976 goto again; 1977 } 1978 1979 if (last_level) 1980 err = btrfs_cow_block(trans, root, b, NULL, 0, 1981 &b, 1982 BTRFS_NESTING_COW); 1983 else 1984 err = btrfs_cow_block(trans, root, b, 1985 p->nodes[level + 1], 1986 p->slots[level + 1], &b, 1987 BTRFS_NESTING_COW); 1988 if (err) { 1989 ret = err; 1990 goto done; 1991 } 1992 } 1993 cow_done: 1994 p->nodes[level] = b; 1995 1996 /* 1997 * we have a lock on b and as long as we aren't changing 1998 * the tree, there is no way to for the items in b to change. 1999 * It is safe to drop the lock on our parent before we 2000 * go through the expensive btree search on b. 2001 * 2002 * If we're inserting or deleting (ins_len != 0), then we might 2003 * be changing slot zero, which may require changing the parent. 2004 * So, we can't drop the lock until after we know which slot 2005 * we're operating on. 2006 */ 2007 if (!ins_len && !p->keep_locks) { 2008 int u = level + 1; 2009 2010 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2011 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2012 p->locks[u] = 0; 2013 } 2014 } 2015 2016 if (level == 0) { 2017 if (ins_len > 0) 2018 ASSERT(write_lock_level >= 1); 2019 2020 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2021 if (!p->search_for_split) 2022 unlock_up(p, level, lowest_unlock, 2023 min_write_lock_level, NULL); 2024 goto done; 2025 } 2026 2027 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2028 if (ret < 0) 2029 goto done; 2030 prev_cmp = ret; 2031 2032 if (ret && slot > 0) { 2033 dec = 1; 2034 slot--; 2035 } 2036 p->slots[level] = slot; 2037 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2038 &write_lock_level); 2039 if (err == -EAGAIN) 2040 goto again; 2041 if (err) { 2042 ret = err; 2043 goto done; 2044 } 2045 b = p->nodes[level]; 2046 slot = p->slots[level]; 2047 2048 /* 2049 * Slot 0 is special, if we change the key we have to update 2050 * the parent pointer which means we must have a write lock on 2051 * the parent 2052 */ 2053 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2054 write_lock_level = level + 1; 2055 btrfs_release_path(p); 2056 goto again; 2057 } 2058 2059 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2060 &write_lock_level); 2061 2062 if (level == lowest_level) { 2063 if (dec) 2064 p->slots[level]++; 2065 goto done; 2066 } 2067 2068 err = read_block_for_search(root, p, &b, level, slot, key); 2069 if (err == -EAGAIN) 2070 goto again; 2071 if (err) { 2072 ret = err; 2073 goto done; 2074 } 2075 2076 if (!p->skip_locking) { 2077 level = btrfs_header_level(b); 2078 2079 btrfs_maybe_reset_lockdep_class(root, b); 2080 2081 if (level <= write_lock_level) { 2082 btrfs_tree_lock(b); 2083 p->locks[level] = BTRFS_WRITE_LOCK; 2084 } else { 2085 btrfs_tree_read_lock(b); 2086 p->locks[level] = BTRFS_READ_LOCK; 2087 } 2088 p->nodes[level] = b; 2089 } 2090 } 2091 ret = 1; 2092 done: 2093 if (ret < 0 && !p->skip_release_on_error) 2094 btrfs_release_path(p); 2095 2096 if (p->need_commit_sem) { 2097 int ret2; 2098 2099 ret2 = finish_need_commit_sem_search(p); 2100 up_read(&fs_info->commit_root_sem); 2101 if (ret2) 2102 ret = ret2; 2103 } 2104 2105 return ret; 2106 } 2107 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2108 2109 /* 2110 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2111 * current state of the tree together with the operations recorded in the tree 2112 * modification log to search for the key in a previous version of this tree, as 2113 * denoted by the time_seq parameter. 2114 * 2115 * Naturally, there is no support for insert, delete or cow operations. 2116 * 2117 * The resulting path and return value will be set up as if we called 2118 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2119 */ 2120 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2121 struct btrfs_path *p, u64 time_seq) 2122 { 2123 struct btrfs_fs_info *fs_info = root->fs_info; 2124 struct extent_buffer *b; 2125 int slot; 2126 int ret; 2127 int err; 2128 int level; 2129 int lowest_unlock = 1; 2130 u8 lowest_level = 0; 2131 2132 lowest_level = p->lowest_level; 2133 WARN_ON(p->nodes[0] != NULL); 2134 2135 if (p->search_commit_root) { 2136 BUG_ON(time_seq); 2137 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2138 } 2139 2140 again: 2141 b = btrfs_get_old_root(root, time_seq); 2142 if (!b) { 2143 ret = -EIO; 2144 goto done; 2145 } 2146 level = btrfs_header_level(b); 2147 p->locks[level] = BTRFS_READ_LOCK; 2148 2149 while (b) { 2150 int dec = 0; 2151 2152 level = btrfs_header_level(b); 2153 p->nodes[level] = b; 2154 2155 /* 2156 * we have a lock on b and as long as we aren't changing 2157 * the tree, there is no way to for the items in b to change. 2158 * It is safe to drop the lock on our parent before we 2159 * go through the expensive btree search on b. 2160 */ 2161 btrfs_unlock_up_safe(p, level + 1); 2162 2163 ret = btrfs_bin_search(b, key, &slot); 2164 if (ret < 0) 2165 goto done; 2166 2167 if (level == 0) { 2168 p->slots[level] = slot; 2169 unlock_up(p, level, lowest_unlock, 0, NULL); 2170 goto done; 2171 } 2172 2173 if (ret && slot > 0) { 2174 dec = 1; 2175 slot--; 2176 } 2177 p->slots[level] = slot; 2178 unlock_up(p, level, lowest_unlock, 0, NULL); 2179 2180 if (level == lowest_level) { 2181 if (dec) 2182 p->slots[level]++; 2183 goto done; 2184 } 2185 2186 err = read_block_for_search(root, p, &b, level, slot, key); 2187 if (err == -EAGAIN) 2188 goto again; 2189 if (err) { 2190 ret = err; 2191 goto done; 2192 } 2193 2194 level = btrfs_header_level(b); 2195 btrfs_tree_read_lock(b); 2196 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2197 if (!b) { 2198 ret = -ENOMEM; 2199 goto done; 2200 } 2201 p->locks[level] = BTRFS_READ_LOCK; 2202 p->nodes[level] = b; 2203 } 2204 ret = 1; 2205 done: 2206 if (ret < 0) 2207 btrfs_release_path(p); 2208 2209 return ret; 2210 } 2211 2212 /* 2213 * helper to use instead of search slot if no exact match is needed but 2214 * instead the next or previous item should be returned. 2215 * When find_higher is true, the next higher item is returned, the next lower 2216 * otherwise. 2217 * When return_any and find_higher are both true, and no higher item is found, 2218 * return the next lower instead. 2219 * When return_any is true and find_higher is false, and no lower item is found, 2220 * return the next higher instead. 2221 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2222 * < 0 on error 2223 */ 2224 int btrfs_search_slot_for_read(struct btrfs_root *root, 2225 const struct btrfs_key *key, 2226 struct btrfs_path *p, int find_higher, 2227 int return_any) 2228 { 2229 int ret; 2230 struct extent_buffer *leaf; 2231 2232 again: 2233 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2234 if (ret <= 0) 2235 return ret; 2236 /* 2237 * a return value of 1 means the path is at the position where the 2238 * item should be inserted. Normally this is the next bigger item, 2239 * but in case the previous item is the last in a leaf, path points 2240 * to the first free slot in the previous leaf, i.e. at an invalid 2241 * item. 2242 */ 2243 leaf = p->nodes[0]; 2244 2245 if (find_higher) { 2246 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2247 ret = btrfs_next_leaf(root, p); 2248 if (ret <= 0) 2249 return ret; 2250 if (!return_any) 2251 return 1; 2252 /* 2253 * no higher item found, return the next 2254 * lower instead 2255 */ 2256 return_any = 0; 2257 find_higher = 0; 2258 btrfs_release_path(p); 2259 goto again; 2260 } 2261 } else { 2262 if (p->slots[0] == 0) { 2263 ret = btrfs_prev_leaf(root, p); 2264 if (ret < 0) 2265 return ret; 2266 if (!ret) { 2267 leaf = p->nodes[0]; 2268 if (p->slots[0] == btrfs_header_nritems(leaf)) 2269 p->slots[0]--; 2270 return 0; 2271 } 2272 if (!return_any) 2273 return 1; 2274 /* 2275 * no lower item found, return the next 2276 * higher instead 2277 */ 2278 return_any = 0; 2279 find_higher = 1; 2280 btrfs_release_path(p); 2281 goto again; 2282 } else { 2283 --p->slots[0]; 2284 } 2285 } 2286 return 0; 2287 } 2288 2289 /* 2290 * Execute search and call btrfs_previous_item to traverse backwards if the item 2291 * was not found. 2292 * 2293 * Return 0 if found, 1 if not found and < 0 if error. 2294 */ 2295 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2296 struct btrfs_path *path) 2297 { 2298 int ret; 2299 2300 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2301 if (ret > 0) 2302 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2303 2304 if (ret == 0) 2305 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2306 2307 return ret; 2308 } 2309 2310 /** 2311 * Search for a valid slot for the given path. 2312 * 2313 * @root: The root node of the tree. 2314 * @key: Will contain a valid item if found. 2315 * @path: The starting point to validate the slot. 2316 * 2317 * Return: 0 if the item is valid 2318 * 1 if not found 2319 * <0 if error. 2320 */ 2321 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2322 struct btrfs_path *path) 2323 { 2324 while (1) { 2325 int ret; 2326 const int slot = path->slots[0]; 2327 const struct extent_buffer *leaf = path->nodes[0]; 2328 2329 /* This is where we start walking the path. */ 2330 if (slot >= btrfs_header_nritems(leaf)) { 2331 /* 2332 * If we've reached the last slot in this leaf we need 2333 * to go to the next leaf and reset the path. 2334 */ 2335 ret = btrfs_next_leaf(root, path); 2336 if (ret) 2337 return ret; 2338 continue; 2339 } 2340 /* Store the found, valid item in @key. */ 2341 btrfs_item_key_to_cpu(leaf, key, slot); 2342 break; 2343 } 2344 return 0; 2345 } 2346 2347 /* 2348 * adjust the pointers going up the tree, starting at level 2349 * making sure the right key of each node is points to 'key'. 2350 * This is used after shifting pointers to the left, so it stops 2351 * fixing up pointers when a given leaf/node is not in slot 0 of the 2352 * higher levels 2353 * 2354 */ 2355 static void fixup_low_keys(struct btrfs_path *path, 2356 struct btrfs_disk_key *key, int level) 2357 { 2358 int i; 2359 struct extent_buffer *t; 2360 int ret; 2361 2362 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2363 int tslot = path->slots[i]; 2364 2365 if (!path->nodes[i]) 2366 break; 2367 t = path->nodes[i]; 2368 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2369 BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC); 2370 BUG_ON(ret < 0); 2371 btrfs_set_node_key(t, key, tslot); 2372 btrfs_mark_buffer_dirty(path->nodes[i]); 2373 if (tslot != 0) 2374 break; 2375 } 2376 } 2377 2378 /* 2379 * update item key. 2380 * 2381 * This function isn't completely safe. It's the caller's responsibility 2382 * that the new key won't break the order 2383 */ 2384 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2385 struct btrfs_path *path, 2386 const struct btrfs_key *new_key) 2387 { 2388 struct btrfs_disk_key disk_key; 2389 struct extent_buffer *eb; 2390 int slot; 2391 2392 eb = path->nodes[0]; 2393 slot = path->slots[0]; 2394 if (slot > 0) { 2395 btrfs_item_key(eb, &disk_key, slot - 1); 2396 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2397 btrfs_crit(fs_info, 2398 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2399 slot, btrfs_disk_key_objectid(&disk_key), 2400 btrfs_disk_key_type(&disk_key), 2401 btrfs_disk_key_offset(&disk_key), 2402 new_key->objectid, new_key->type, 2403 new_key->offset); 2404 btrfs_print_leaf(eb); 2405 BUG(); 2406 } 2407 } 2408 if (slot < btrfs_header_nritems(eb) - 1) { 2409 btrfs_item_key(eb, &disk_key, slot + 1); 2410 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2411 btrfs_crit(fs_info, 2412 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2413 slot, btrfs_disk_key_objectid(&disk_key), 2414 btrfs_disk_key_type(&disk_key), 2415 btrfs_disk_key_offset(&disk_key), 2416 new_key->objectid, new_key->type, 2417 new_key->offset); 2418 btrfs_print_leaf(eb); 2419 BUG(); 2420 } 2421 } 2422 2423 btrfs_cpu_key_to_disk(&disk_key, new_key); 2424 btrfs_set_item_key(eb, &disk_key, slot); 2425 btrfs_mark_buffer_dirty(eb); 2426 if (slot == 0) 2427 fixup_low_keys(path, &disk_key, 1); 2428 } 2429 2430 /* 2431 * Check key order of two sibling extent buffers. 2432 * 2433 * Return true if something is wrong. 2434 * Return false if everything is fine. 2435 * 2436 * Tree-checker only works inside one tree block, thus the following 2437 * corruption can not be detected by tree-checker: 2438 * 2439 * Leaf @left | Leaf @right 2440 * -------------------------------------------------------------- 2441 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2442 * 2443 * Key f6 in leaf @left itself is valid, but not valid when the next 2444 * key in leaf @right is 7. 2445 * This can only be checked at tree block merge time. 2446 * And since tree checker has ensured all key order in each tree block 2447 * is correct, we only need to bother the last key of @left and the first 2448 * key of @right. 2449 */ 2450 static bool check_sibling_keys(struct extent_buffer *left, 2451 struct extent_buffer *right) 2452 { 2453 struct btrfs_key left_last; 2454 struct btrfs_key right_first; 2455 int level = btrfs_header_level(left); 2456 int nr_left = btrfs_header_nritems(left); 2457 int nr_right = btrfs_header_nritems(right); 2458 2459 /* No key to check in one of the tree blocks */ 2460 if (!nr_left || !nr_right) 2461 return false; 2462 2463 if (level) { 2464 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2465 btrfs_node_key_to_cpu(right, &right_first, 0); 2466 } else { 2467 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2468 btrfs_item_key_to_cpu(right, &right_first, 0); 2469 } 2470 2471 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) { 2472 btrfs_crit(left->fs_info, 2473 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2474 left_last.objectid, left_last.type, 2475 left_last.offset, right_first.objectid, 2476 right_first.type, right_first.offset); 2477 return true; 2478 } 2479 return false; 2480 } 2481 2482 /* 2483 * try to push data from one node into the next node left in the 2484 * tree. 2485 * 2486 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2487 * error, and > 0 if there was no room in the left hand block. 2488 */ 2489 static int push_node_left(struct btrfs_trans_handle *trans, 2490 struct extent_buffer *dst, 2491 struct extent_buffer *src, int empty) 2492 { 2493 struct btrfs_fs_info *fs_info = trans->fs_info; 2494 int push_items = 0; 2495 int src_nritems; 2496 int dst_nritems; 2497 int ret = 0; 2498 2499 src_nritems = btrfs_header_nritems(src); 2500 dst_nritems = btrfs_header_nritems(dst); 2501 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2502 WARN_ON(btrfs_header_generation(src) != trans->transid); 2503 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2504 2505 if (!empty && src_nritems <= 8) 2506 return 1; 2507 2508 if (push_items <= 0) 2509 return 1; 2510 2511 if (empty) { 2512 push_items = min(src_nritems, push_items); 2513 if (push_items < src_nritems) { 2514 /* leave at least 8 pointers in the node if 2515 * we aren't going to empty it 2516 */ 2517 if (src_nritems - push_items < 8) { 2518 if (push_items <= 8) 2519 return 1; 2520 push_items -= 8; 2521 } 2522 } 2523 } else 2524 push_items = min(src_nritems - 8, push_items); 2525 2526 /* dst is the left eb, src is the middle eb */ 2527 if (check_sibling_keys(dst, src)) { 2528 ret = -EUCLEAN; 2529 btrfs_abort_transaction(trans, ret); 2530 return ret; 2531 } 2532 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2533 if (ret) { 2534 btrfs_abort_transaction(trans, ret); 2535 return ret; 2536 } 2537 copy_extent_buffer(dst, src, 2538 btrfs_node_key_ptr_offset(dst_nritems), 2539 btrfs_node_key_ptr_offset(0), 2540 push_items * sizeof(struct btrfs_key_ptr)); 2541 2542 if (push_items < src_nritems) { 2543 /* 2544 * Don't call btrfs_tree_mod_log_insert_move() here, key removal 2545 * was already fully logged by btrfs_tree_mod_log_eb_copy() above. 2546 */ 2547 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 2548 btrfs_node_key_ptr_offset(push_items), 2549 (src_nritems - push_items) * 2550 sizeof(struct btrfs_key_ptr)); 2551 } 2552 btrfs_set_header_nritems(src, src_nritems - push_items); 2553 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2554 btrfs_mark_buffer_dirty(src); 2555 btrfs_mark_buffer_dirty(dst); 2556 2557 return ret; 2558 } 2559 2560 /* 2561 * try to push data from one node into the next node right in the 2562 * tree. 2563 * 2564 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2565 * error, and > 0 if there was no room in the right hand block. 2566 * 2567 * this will only push up to 1/2 the contents of the left node over 2568 */ 2569 static int balance_node_right(struct btrfs_trans_handle *trans, 2570 struct extent_buffer *dst, 2571 struct extent_buffer *src) 2572 { 2573 struct btrfs_fs_info *fs_info = trans->fs_info; 2574 int push_items = 0; 2575 int max_push; 2576 int src_nritems; 2577 int dst_nritems; 2578 int ret = 0; 2579 2580 WARN_ON(btrfs_header_generation(src) != trans->transid); 2581 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2582 2583 src_nritems = btrfs_header_nritems(src); 2584 dst_nritems = btrfs_header_nritems(dst); 2585 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2586 if (push_items <= 0) 2587 return 1; 2588 2589 if (src_nritems < 4) 2590 return 1; 2591 2592 max_push = src_nritems / 2 + 1; 2593 /* don't try to empty the node */ 2594 if (max_push >= src_nritems) 2595 return 1; 2596 2597 if (max_push < push_items) 2598 push_items = max_push; 2599 2600 /* dst is the right eb, src is the middle eb */ 2601 if (check_sibling_keys(src, dst)) { 2602 ret = -EUCLEAN; 2603 btrfs_abort_transaction(trans, ret); 2604 return ret; 2605 } 2606 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems); 2607 BUG_ON(ret < 0); 2608 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 2609 btrfs_node_key_ptr_offset(0), 2610 (dst_nritems) * 2611 sizeof(struct btrfs_key_ptr)); 2612 2613 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2614 push_items); 2615 if (ret) { 2616 btrfs_abort_transaction(trans, ret); 2617 return ret; 2618 } 2619 copy_extent_buffer(dst, src, 2620 btrfs_node_key_ptr_offset(0), 2621 btrfs_node_key_ptr_offset(src_nritems - push_items), 2622 push_items * sizeof(struct btrfs_key_ptr)); 2623 2624 btrfs_set_header_nritems(src, src_nritems - push_items); 2625 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2626 2627 btrfs_mark_buffer_dirty(src); 2628 btrfs_mark_buffer_dirty(dst); 2629 2630 return ret; 2631 } 2632 2633 /* 2634 * helper function to insert a new root level in the tree. 2635 * A new node is allocated, and a single item is inserted to 2636 * point to the existing root 2637 * 2638 * returns zero on success or < 0 on failure. 2639 */ 2640 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2641 struct btrfs_root *root, 2642 struct btrfs_path *path, int level) 2643 { 2644 struct btrfs_fs_info *fs_info = root->fs_info; 2645 u64 lower_gen; 2646 struct extent_buffer *lower; 2647 struct extent_buffer *c; 2648 struct extent_buffer *old; 2649 struct btrfs_disk_key lower_key; 2650 int ret; 2651 2652 BUG_ON(path->nodes[level]); 2653 BUG_ON(path->nodes[level-1] != root->node); 2654 2655 lower = path->nodes[level-1]; 2656 if (level == 1) 2657 btrfs_item_key(lower, &lower_key, 0); 2658 else 2659 btrfs_node_key(lower, &lower_key, 0); 2660 2661 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2662 &lower_key, level, root->node->start, 0, 2663 BTRFS_NESTING_NEW_ROOT); 2664 if (IS_ERR(c)) 2665 return PTR_ERR(c); 2666 2667 root_add_used(root, fs_info->nodesize); 2668 2669 btrfs_set_header_nritems(c, 1); 2670 btrfs_set_node_key(c, &lower_key, 0); 2671 btrfs_set_node_blockptr(c, 0, lower->start); 2672 lower_gen = btrfs_header_generation(lower); 2673 WARN_ON(lower_gen != trans->transid); 2674 2675 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2676 2677 btrfs_mark_buffer_dirty(c); 2678 2679 old = root->node; 2680 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2681 BUG_ON(ret < 0); 2682 rcu_assign_pointer(root->node, c); 2683 2684 /* the super has an extra ref to root->node */ 2685 free_extent_buffer(old); 2686 2687 add_root_to_dirty_list(root); 2688 atomic_inc(&c->refs); 2689 path->nodes[level] = c; 2690 path->locks[level] = BTRFS_WRITE_LOCK; 2691 path->slots[level] = 0; 2692 return 0; 2693 } 2694 2695 /* 2696 * worker function to insert a single pointer in a node. 2697 * the node should have enough room for the pointer already 2698 * 2699 * slot and level indicate where you want the key to go, and 2700 * blocknr is the block the key points to. 2701 */ 2702 static void insert_ptr(struct btrfs_trans_handle *trans, 2703 struct btrfs_path *path, 2704 struct btrfs_disk_key *key, u64 bytenr, 2705 int slot, int level) 2706 { 2707 struct extent_buffer *lower; 2708 int nritems; 2709 int ret; 2710 2711 BUG_ON(!path->nodes[level]); 2712 btrfs_assert_tree_write_locked(path->nodes[level]); 2713 lower = path->nodes[level]; 2714 nritems = btrfs_header_nritems(lower); 2715 BUG_ON(slot > nritems); 2716 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2717 if (slot != nritems) { 2718 if (level) { 2719 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2720 slot, nritems - slot); 2721 BUG_ON(ret < 0); 2722 } 2723 memmove_extent_buffer(lower, 2724 btrfs_node_key_ptr_offset(slot + 1), 2725 btrfs_node_key_ptr_offset(slot), 2726 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2727 } 2728 if (level) { 2729 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2730 BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS); 2731 BUG_ON(ret < 0); 2732 } 2733 btrfs_set_node_key(lower, key, slot); 2734 btrfs_set_node_blockptr(lower, slot, bytenr); 2735 WARN_ON(trans->transid == 0); 2736 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2737 btrfs_set_header_nritems(lower, nritems + 1); 2738 btrfs_mark_buffer_dirty(lower); 2739 } 2740 2741 /* 2742 * split the node at the specified level in path in two. 2743 * The path is corrected to point to the appropriate node after the split 2744 * 2745 * Before splitting this tries to make some room in the node by pushing 2746 * left and right, if either one works, it returns right away. 2747 * 2748 * returns 0 on success and < 0 on failure 2749 */ 2750 static noinline int split_node(struct btrfs_trans_handle *trans, 2751 struct btrfs_root *root, 2752 struct btrfs_path *path, int level) 2753 { 2754 struct btrfs_fs_info *fs_info = root->fs_info; 2755 struct extent_buffer *c; 2756 struct extent_buffer *split; 2757 struct btrfs_disk_key disk_key; 2758 int mid; 2759 int ret; 2760 u32 c_nritems; 2761 2762 c = path->nodes[level]; 2763 WARN_ON(btrfs_header_generation(c) != trans->transid); 2764 if (c == root->node) { 2765 /* 2766 * trying to split the root, lets make a new one 2767 * 2768 * tree mod log: We don't log_removal old root in 2769 * insert_new_root, because that root buffer will be kept as a 2770 * normal node. We are going to log removal of half of the 2771 * elements below with btrfs_tree_mod_log_eb_copy(). We're 2772 * holding a tree lock on the buffer, which is why we cannot 2773 * race with other tree_mod_log users. 2774 */ 2775 ret = insert_new_root(trans, root, path, level + 1); 2776 if (ret) 2777 return ret; 2778 } else { 2779 ret = push_nodes_for_insert(trans, root, path, level); 2780 c = path->nodes[level]; 2781 if (!ret && btrfs_header_nritems(c) < 2782 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 2783 return 0; 2784 if (ret < 0) 2785 return ret; 2786 } 2787 2788 c_nritems = btrfs_header_nritems(c); 2789 mid = (c_nritems + 1) / 2; 2790 btrfs_node_key(c, &disk_key, mid); 2791 2792 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2793 &disk_key, level, c->start, 0, 2794 BTRFS_NESTING_SPLIT); 2795 if (IS_ERR(split)) 2796 return PTR_ERR(split); 2797 2798 root_add_used(root, fs_info->nodesize); 2799 ASSERT(btrfs_header_level(c) == level); 2800 2801 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 2802 if (ret) { 2803 btrfs_abort_transaction(trans, ret); 2804 return ret; 2805 } 2806 copy_extent_buffer(split, c, 2807 btrfs_node_key_ptr_offset(0), 2808 btrfs_node_key_ptr_offset(mid), 2809 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 2810 btrfs_set_header_nritems(split, c_nritems - mid); 2811 btrfs_set_header_nritems(c, mid); 2812 2813 btrfs_mark_buffer_dirty(c); 2814 btrfs_mark_buffer_dirty(split); 2815 2816 insert_ptr(trans, path, &disk_key, split->start, 2817 path->slots[level + 1] + 1, level + 1); 2818 2819 if (path->slots[level] >= mid) { 2820 path->slots[level] -= mid; 2821 btrfs_tree_unlock(c); 2822 free_extent_buffer(c); 2823 path->nodes[level] = split; 2824 path->slots[level + 1] += 1; 2825 } else { 2826 btrfs_tree_unlock(split); 2827 free_extent_buffer(split); 2828 } 2829 return 0; 2830 } 2831 2832 /* 2833 * how many bytes are required to store the items in a leaf. start 2834 * and nr indicate which items in the leaf to check. This totals up the 2835 * space used both by the item structs and the item data 2836 */ 2837 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 2838 { 2839 int data_len; 2840 int nritems = btrfs_header_nritems(l); 2841 int end = min(nritems, start + nr) - 1; 2842 2843 if (!nr) 2844 return 0; 2845 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 2846 data_len = data_len - btrfs_item_offset(l, end); 2847 data_len += sizeof(struct btrfs_item) * nr; 2848 WARN_ON(data_len < 0); 2849 return data_len; 2850 } 2851 2852 /* 2853 * The space between the end of the leaf items and 2854 * the start of the leaf data. IOW, how much room 2855 * the leaf has left for both items and data 2856 */ 2857 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf) 2858 { 2859 struct btrfs_fs_info *fs_info = leaf->fs_info; 2860 int nritems = btrfs_header_nritems(leaf); 2861 int ret; 2862 2863 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 2864 if (ret < 0) { 2865 btrfs_crit(fs_info, 2866 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 2867 ret, 2868 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 2869 leaf_space_used(leaf, 0, nritems), nritems); 2870 } 2871 return ret; 2872 } 2873 2874 /* 2875 * min slot controls the lowest index we're willing to push to the 2876 * right. We'll push up to and including min_slot, but no lower 2877 */ 2878 static noinline int __push_leaf_right(struct btrfs_path *path, 2879 int data_size, int empty, 2880 struct extent_buffer *right, 2881 int free_space, u32 left_nritems, 2882 u32 min_slot) 2883 { 2884 struct btrfs_fs_info *fs_info = right->fs_info; 2885 struct extent_buffer *left = path->nodes[0]; 2886 struct extent_buffer *upper = path->nodes[1]; 2887 struct btrfs_map_token token; 2888 struct btrfs_disk_key disk_key; 2889 int slot; 2890 u32 i; 2891 int push_space = 0; 2892 int push_items = 0; 2893 u32 nr; 2894 u32 right_nritems; 2895 u32 data_end; 2896 u32 this_item_size; 2897 2898 if (empty) 2899 nr = 0; 2900 else 2901 nr = max_t(u32, 1, min_slot); 2902 2903 if (path->slots[0] >= left_nritems) 2904 push_space += data_size; 2905 2906 slot = path->slots[1]; 2907 i = left_nritems - 1; 2908 while (i >= nr) { 2909 if (!empty && push_items > 0) { 2910 if (path->slots[0] > i) 2911 break; 2912 if (path->slots[0] == i) { 2913 int space = btrfs_leaf_free_space(left); 2914 2915 if (space + push_space * 2 > free_space) 2916 break; 2917 } 2918 } 2919 2920 if (path->slots[0] == i) 2921 push_space += data_size; 2922 2923 this_item_size = btrfs_item_size(left, i); 2924 if (this_item_size + sizeof(struct btrfs_item) + 2925 push_space > free_space) 2926 break; 2927 2928 push_items++; 2929 push_space += this_item_size + sizeof(struct btrfs_item); 2930 if (i == 0) 2931 break; 2932 i--; 2933 } 2934 2935 if (push_items == 0) 2936 goto out_unlock; 2937 2938 WARN_ON(!empty && push_items == left_nritems); 2939 2940 /* push left to right */ 2941 right_nritems = btrfs_header_nritems(right); 2942 2943 push_space = btrfs_item_data_end(left, left_nritems - push_items); 2944 push_space -= leaf_data_end(left); 2945 2946 /* make room in the right data area */ 2947 data_end = leaf_data_end(right); 2948 memmove_extent_buffer(right, 2949 BTRFS_LEAF_DATA_OFFSET + data_end - push_space, 2950 BTRFS_LEAF_DATA_OFFSET + data_end, 2951 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 2952 2953 /* copy from the left data area */ 2954 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET + 2955 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 2956 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left), 2957 push_space); 2958 2959 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), 2960 btrfs_item_nr_offset(0), 2961 right_nritems * sizeof(struct btrfs_item)); 2962 2963 /* copy the items from left to right */ 2964 copy_extent_buffer(right, left, btrfs_item_nr_offset(0), 2965 btrfs_item_nr_offset(left_nritems - push_items), 2966 push_items * sizeof(struct btrfs_item)); 2967 2968 /* update the item pointers */ 2969 btrfs_init_map_token(&token, right); 2970 right_nritems += push_items; 2971 btrfs_set_header_nritems(right, right_nritems); 2972 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 2973 for (i = 0; i < right_nritems; i++) { 2974 push_space -= btrfs_token_item_size(&token, i); 2975 btrfs_set_token_item_offset(&token, i, push_space); 2976 } 2977 2978 left_nritems -= push_items; 2979 btrfs_set_header_nritems(left, left_nritems); 2980 2981 if (left_nritems) 2982 btrfs_mark_buffer_dirty(left); 2983 else 2984 btrfs_clean_tree_block(left); 2985 2986 btrfs_mark_buffer_dirty(right); 2987 2988 btrfs_item_key(right, &disk_key, 0); 2989 btrfs_set_node_key(upper, &disk_key, slot + 1); 2990 btrfs_mark_buffer_dirty(upper); 2991 2992 /* then fixup the leaf pointer in the path */ 2993 if (path->slots[0] >= left_nritems) { 2994 path->slots[0] -= left_nritems; 2995 if (btrfs_header_nritems(path->nodes[0]) == 0) 2996 btrfs_clean_tree_block(path->nodes[0]); 2997 btrfs_tree_unlock(path->nodes[0]); 2998 free_extent_buffer(path->nodes[0]); 2999 path->nodes[0] = right; 3000 path->slots[1] += 1; 3001 } else { 3002 btrfs_tree_unlock(right); 3003 free_extent_buffer(right); 3004 } 3005 return 0; 3006 3007 out_unlock: 3008 btrfs_tree_unlock(right); 3009 free_extent_buffer(right); 3010 return 1; 3011 } 3012 3013 /* 3014 * push some data in the path leaf to the right, trying to free up at 3015 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3016 * 3017 * returns 1 if the push failed because the other node didn't have enough 3018 * room, 0 if everything worked out and < 0 if there were major errors. 3019 * 3020 * this will push starting from min_slot to the end of the leaf. It won't 3021 * push any slot lower than min_slot 3022 */ 3023 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3024 *root, struct btrfs_path *path, 3025 int min_data_size, int data_size, 3026 int empty, u32 min_slot) 3027 { 3028 struct extent_buffer *left = path->nodes[0]; 3029 struct extent_buffer *right; 3030 struct extent_buffer *upper; 3031 int slot; 3032 int free_space; 3033 u32 left_nritems; 3034 int ret; 3035 3036 if (!path->nodes[1]) 3037 return 1; 3038 3039 slot = path->slots[1]; 3040 upper = path->nodes[1]; 3041 if (slot >= btrfs_header_nritems(upper) - 1) 3042 return 1; 3043 3044 btrfs_assert_tree_write_locked(path->nodes[1]); 3045 3046 right = btrfs_read_node_slot(upper, slot + 1); 3047 /* 3048 * slot + 1 is not valid or we fail to read the right node, 3049 * no big deal, just return. 3050 */ 3051 if (IS_ERR(right)) 3052 return 1; 3053 3054 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3055 3056 free_space = btrfs_leaf_free_space(right); 3057 if (free_space < data_size) 3058 goto out_unlock; 3059 3060 ret = btrfs_cow_block(trans, root, right, upper, 3061 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3062 if (ret) 3063 goto out_unlock; 3064 3065 left_nritems = btrfs_header_nritems(left); 3066 if (left_nritems == 0) 3067 goto out_unlock; 3068 3069 if (check_sibling_keys(left, right)) { 3070 ret = -EUCLEAN; 3071 btrfs_tree_unlock(right); 3072 free_extent_buffer(right); 3073 return ret; 3074 } 3075 if (path->slots[0] == left_nritems && !empty) { 3076 /* Key greater than all keys in the leaf, right neighbor has 3077 * enough room for it and we're not emptying our leaf to delete 3078 * it, therefore use right neighbor to insert the new item and 3079 * no need to touch/dirty our left leaf. */ 3080 btrfs_tree_unlock(left); 3081 free_extent_buffer(left); 3082 path->nodes[0] = right; 3083 path->slots[0] = 0; 3084 path->slots[1]++; 3085 return 0; 3086 } 3087 3088 return __push_leaf_right(path, min_data_size, empty, 3089 right, free_space, left_nritems, min_slot); 3090 out_unlock: 3091 btrfs_tree_unlock(right); 3092 free_extent_buffer(right); 3093 return 1; 3094 } 3095 3096 /* 3097 * push some data in the path leaf to the left, trying to free up at 3098 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3099 * 3100 * max_slot can put a limit on how far into the leaf we'll push items. The 3101 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3102 * items 3103 */ 3104 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, 3105 int empty, struct extent_buffer *left, 3106 int free_space, u32 right_nritems, 3107 u32 max_slot) 3108 { 3109 struct btrfs_fs_info *fs_info = left->fs_info; 3110 struct btrfs_disk_key disk_key; 3111 struct extent_buffer *right = path->nodes[0]; 3112 int i; 3113 int push_space = 0; 3114 int push_items = 0; 3115 u32 old_left_nritems; 3116 u32 nr; 3117 int ret = 0; 3118 u32 this_item_size; 3119 u32 old_left_item_size; 3120 struct btrfs_map_token token; 3121 3122 if (empty) 3123 nr = min(right_nritems, max_slot); 3124 else 3125 nr = min(right_nritems - 1, max_slot); 3126 3127 for (i = 0; i < nr; i++) { 3128 if (!empty && push_items > 0) { 3129 if (path->slots[0] < i) 3130 break; 3131 if (path->slots[0] == i) { 3132 int space = btrfs_leaf_free_space(right); 3133 3134 if (space + push_space * 2 > free_space) 3135 break; 3136 } 3137 } 3138 3139 if (path->slots[0] == i) 3140 push_space += data_size; 3141 3142 this_item_size = btrfs_item_size(right, i); 3143 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3144 free_space) 3145 break; 3146 3147 push_items++; 3148 push_space += this_item_size + sizeof(struct btrfs_item); 3149 } 3150 3151 if (push_items == 0) { 3152 ret = 1; 3153 goto out; 3154 } 3155 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3156 3157 /* push data from right to left */ 3158 copy_extent_buffer(left, right, 3159 btrfs_item_nr_offset(btrfs_header_nritems(left)), 3160 btrfs_item_nr_offset(0), 3161 push_items * sizeof(struct btrfs_item)); 3162 3163 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3164 btrfs_item_offset(right, push_items - 1); 3165 3166 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET + 3167 leaf_data_end(left) - push_space, 3168 BTRFS_LEAF_DATA_OFFSET + 3169 btrfs_item_offset(right, push_items - 1), 3170 push_space); 3171 old_left_nritems = btrfs_header_nritems(left); 3172 BUG_ON(old_left_nritems <= 0); 3173 3174 btrfs_init_map_token(&token, left); 3175 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3176 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3177 u32 ioff; 3178 3179 ioff = btrfs_token_item_offset(&token, i); 3180 btrfs_set_token_item_offset(&token, i, 3181 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3182 } 3183 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3184 3185 /* fixup right node */ 3186 if (push_items > right_nritems) 3187 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3188 right_nritems); 3189 3190 if (push_items < right_nritems) { 3191 push_space = btrfs_item_offset(right, push_items - 1) - 3192 leaf_data_end(right); 3193 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET + 3194 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3195 BTRFS_LEAF_DATA_OFFSET + 3196 leaf_data_end(right), push_space); 3197 3198 memmove_extent_buffer(right, btrfs_item_nr_offset(0), 3199 btrfs_item_nr_offset(push_items), 3200 (btrfs_header_nritems(right) - push_items) * 3201 sizeof(struct btrfs_item)); 3202 } 3203 3204 btrfs_init_map_token(&token, right); 3205 right_nritems -= push_items; 3206 btrfs_set_header_nritems(right, right_nritems); 3207 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3208 for (i = 0; i < right_nritems; i++) { 3209 push_space = push_space - btrfs_token_item_size(&token, i); 3210 btrfs_set_token_item_offset(&token, i, push_space); 3211 } 3212 3213 btrfs_mark_buffer_dirty(left); 3214 if (right_nritems) 3215 btrfs_mark_buffer_dirty(right); 3216 else 3217 btrfs_clean_tree_block(right); 3218 3219 btrfs_item_key(right, &disk_key, 0); 3220 fixup_low_keys(path, &disk_key, 1); 3221 3222 /* then fixup the leaf pointer in the path */ 3223 if (path->slots[0] < push_items) { 3224 path->slots[0] += old_left_nritems; 3225 btrfs_tree_unlock(path->nodes[0]); 3226 free_extent_buffer(path->nodes[0]); 3227 path->nodes[0] = left; 3228 path->slots[1] -= 1; 3229 } else { 3230 btrfs_tree_unlock(left); 3231 free_extent_buffer(left); 3232 path->slots[0] -= push_items; 3233 } 3234 BUG_ON(path->slots[0] < 0); 3235 return ret; 3236 out: 3237 btrfs_tree_unlock(left); 3238 free_extent_buffer(left); 3239 return ret; 3240 } 3241 3242 /* 3243 * push some data in the path leaf to the left, trying to free up at 3244 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3245 * 3246 * max_slot can put a limit on how far into the leaf we'll push items. The 3247 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3248 * items 3249 */ 3250 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3251 *root, struct btrfs_path *path, int min_data_size, 3252 int data_size, int empty, u32 max_slot) 3253 { 3254 struct extent_buffer *right = path->nodes[0]; 3255 struct extent_buffer *left; 3256 int slot; 3257 int free_space; 3258 u32 right_nritems; 3259 int ret = 0; 3260 3261 slot = path->slots[1]; 3262 if (slot == 0) 3263 return 1; 3264 if (!path->nodes[1]) 3265 return 1; 3266 3267 right_nritems = btrfs_header_nritems(right); 3268 if (right_nritems == 0) 3269 return 1; 3270 3271 btrfs_assert_tree_write_locked(path->nodes[1]); 3272 3273 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3274 /* 3275 * slot - 1 is not valid or we fail to read the left node, 3276 * no big deal, just return. 3277 */ 3278 if (IS_ERR(left)) 3279 return 1; 3280 3281 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3282 3283 free_space = btrfs_leaf_free_space(left); 3284 if (free_space < data_size) { 3285 ret = 1; 3286 goto out; 3287 } 3288 3289 ret = btrfs_cow_block(trans, root, left, 3290 path->nodes[1], slot - 1, &left, 3291 BTRFS_NESTING_LEFT_COW); 3292 if (ret) { 3293 /* we hit -ENOSPC, but it isn't fatal here */ 3294 if (ret == -ENOSPC) 3295 ret = 1; 3296 goto out; 3297 } 3298 3299 if (check_sibling_keys(left, right)) { 3300 ret = -EUCLEAN; 3301 goto out; 3302 } 3303 return __push_leaf_left(path, min_data_size, 3304 empty, left, free_space, right_nritems, 3305 max_slot); 3306 out: 3307 btrfs_tree_unlock(left); 3308 free_extent_buffer(left); 3309 return ret; 3310 } 3311 3312 /* 3313 * split the path's leaf in two, making sure there is at least data_size 3314 * available for the resulting leaf level of the path. 3315 */ 3316 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3317 struct btrfs_path *path, 3318 struct extent_buffer *l, 3319 struct extent_buffer *right, 3320 int slot, int mid, int nritems) 3321 { 3322 struct btrfs_fs_info *fs_info = trans->fs_info; 3323 int data_copy_size; 3324 int rt_data_off; 3325 int i; 3326 struct btrfs_disk_key disk_key; 3327 struct btrfs_map_token token; 3328 3329 nritems = nritems - mid; 3330 btrfs_set_header_nritems(right, nritems); 3331 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3332 3333 copy_extent_buffer(right, l, btrfs_item_nr_offset(0), 3334 btrfs_item_nr_offset(mid), 3335 nritems * sizeof(struct btrfs_item)); 3336 3337 copy_extent_buffer(right, l, 3338 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) - 3339 data_copy_size, BTRFS_LEAF_DATA_OFFSET + 3340 leaf_data_end(l), data_copy_size); 3341 3342 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3343 3344 btrfs_init_map_token(&token, right); 3345 for (i = 0; i < nritems; i++) { 3346 u32 ioff; 3347 3348 ioff = btrfs_token_item_offset(&token, i); 3349 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3350 } 3351 3352 btrfs_set_header_nritems(l, mid); 3353 btrfs_item_key(right, &disk_key, 0); 3354 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3355 3356 btrfs_mark_buffer_dirty(right); 3357 btrfs_mark_buffer_dirty(l); 3358 BUG_ON(path->slots[0] != slot); 3359 3360 if (mid <= slot) { 3361 btrfs_tree_unlock(path->nodes[0]); 3362 free_extent_buffer(path->nodes[0]); 3363 path->nodes[0] = right; 3364 path->slots[0] -= mid; 3365 path->slots[1] += 1; 3366 } else { 3367 btrfs_tree_unlock(right); 3368 free_extent_buffer(right); 3369 } 3370 3371 BUG_ON(path->slots[0] < 0); 3372 } 3373 3374 /* 3375 * double splits happen when we need to insert a big item in the middle 3376 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3377 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3378 * A B C 3379 * 3380 * We avoid this by trying to push the items on either side of our target 3381 * into the adjacent leaves. If all goes well we can avoid the double split 3382 * completely. 3383 */ 3384 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3385 struct btrfs_root *root, 3386 struct btrfs_path *path, 3387 int data_size) 3388 { 3389 int ret; 3390 int progress = 0; 3391 int slot; 3392 u32 nritems; 3393 int space_needed = data_size; 3394 3395 slot = path->slots[0]; 3396 if (slot < btrfs_header_nritems(path->nodes[0])) 3397 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3398 3399 /* 3400 * try to push all the items after our slot into the 3401 * right leaf 3402 */ 3403 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3404 if (ret < 0) 3405 return ret; 3406 3407 if (ret == 0) 3408 progress++; 3409 3410 nritems = btrfs_header_nritems(path->nodes[0]); 3411 /* 3412 * our goal is to get our slot at the start or end of a leaf. If 3413 * we've done so we're done 3414 */ 3415 if (path->slots[0] == 0 || path->slots[0] == nritems) 3416 return 0; 3417 3418 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3419 return 0; 3420 3421 /* try to push all the items before our slot into the next leaf */ 3422 slot = path->slots[0]; 3423 space_needed = data_size; 3424 if (slot > 0) 3425 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3426 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3427 if (ret < 0) 3428 return ret; 3429 3430 if (ret == 0) 3431 progress++; 3432 3433 if (progress) 3434 return 0; 3435 return 1; 3436 } 3437 3438 /* 3439 * split the path's leaf in two, making sure there is at least data_size 3440 * available for the resulting leaf level of the path. 3441 * 3442 * returns 0 if all went well and < 0 on failure. 3443 */ 3444 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3445 struct btrfs_root *root, 3446 const struct btrfs_key *ins_key, 3447 struct btrfs_path *path, int data_size, 3448 int extend) 3449 { 3450 struct btrfs_disk_key disk_key; 3451 struct extent_buffer *l; 3452 u32 nritems; 3453 int mid; 3454 int slot; 3455 struct extent_buffer *right; 3456 struct btrfs_fs_info *fs_info = root->fs_info; 3457 int ret = 0; 3458 int wret; 3459 int split; 3460 int num_doubles = 0; 3461 int tried_avoid_double = 0; 3462 3463 l = path->nodes[0]; 3464 slot = path->slots[0]; 3465 if (extend && data_size + btrfs_item_size(l, slot) + 3466 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3467 return -EOVERFLOW; 3468 3469 /* first try to make some room by pushing left and right */ 3470 if (data_size && path->nodes[1]) { 3471 int space_needed = data_size; 3472 3473 if (slot < btrfs_header_nritems(l)) 3474 space_needed -= btrfs_leaf_free_space(l); 3475 3476 wret = push_leaf_right(trans, root, path, space_needed, 3477 space_needed, 0, 0); 3478 if (wret < 0) 3479 return wret; 3480 if (wret) { 3481 space_needed = data_size; 3482 if (slot > 0) 3483 space_needed -= btrfs_leaf_free_space(l); 3484 wret = push_leaf_left(trans, root, path, space_needed, 3485 space_needed, 0, (u32)-1); 3486 if (wret < 0) 3487 return wret; 3488 } 3489 l = path->nodes[0]; 3490 3491 /* did the pushes work? */ 3492 if (btrfs_leaf_free_space(l) >= data_size) 3493 return 0; 3494 } 3495 3496 if (!path->nodes[1]) { 3497 ret = insert_new_root(trans, root, path, 1); 3498 if (ret) 3499 return ret; 3500 } 3501 again: 3502 split = 1; 3503 l = path->nodes[0]; 3504 slot = path->slots[0]; 3505 nritems = btrfs_header_nritems(l); 3506 mid = (nritems + 1) / 2; 3507 3508 if (mid <= slot) { 3509 if (nritems == 1 || 3510 leaf_space_used(l, mid, nritems - mid) + data_size > 3511 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3512 if (slot >= nritems) { 3513 split = 0; 3514 } else { 3515 mid = slot; 3516 if (mid != nritems && 3517 leaf_space_used(l, mid, nritems - mid) + 3518 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3519 if (data_size && !tried_avoid_double) 3520 goto push_for_double; 3521 split = 2; 3522 } 3523 } 3524 } 3525 } else { 3526 if (leaf_space_used(l, 0, mid) + data_size > 3527 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3528 if (!extend && data_size && slot == 0) { 3529 split = 0; 3530 } else if ((extend || !data_size) && slot == 0) { 3531 mid = 1; 3532 } else { 3533 mid = slot; 3534 if (mid != nritems && 3535 leaf_space_used(l, mid, nritems - mid) + 3536 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3537 if (data_size && !tried_avoid_double) 3538 goto push_for_double; 3539 split = 2; 3540 } 3541 } 3542 } 3543 } 3544 3545 if (split == 0) 3546 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3547 else 3548 btrfs_item_key(l, &disk_key, mid); 3549 3550 /* 3551 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3552 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3553 * subclasses, which is 8 at the time of this patch, and we've maxed it 3554 * out. In the future we could add a 3555 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3556 * use BTRFS_NESTING_NEW_ROOT. 3557 */ 3558 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3559 &disk_key, 0, l->start, 0, 3560 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3561 BTRFS_NESTING_SPLIT); 3562 if (IS_ERR(right)) 3563 return PTR_ERR(right); 3564 3565 root_add_used(root, fs_info->nodesize); 3566 3567 if (split == 0) { 3568 if (mid <= slot) { 3569 btrfs_set_header_nritems(right, 0); 3570 insert_ptr(trans, path, &disk_key, 3571 right->start, path->slots[1] + 1, 1); 3572 btrfs_tree_unlock(path->nodes[0]); 3573 free_extent_buffer(path->nodes[0]); 3574 path->nodes[0] = right; 3575 path->slots[0] = 0; 3576 path->slots[1] += 1; 3577 } else { 3578 btrfs_set_header_nritems(right, 0); 3579 insert_ptr(trans, path, &disk_key, 3580 right->start, path->slots[1], 1); 3581 btrfs_tree_unlock(path->nodes[0]); 3582 free_extent_buffer(path->nodes[0]); 3583 path->nodes[0] = right; 3584 path->slots[0] = 0; 3585 if (path->slots[1] == 0) 3586 fixup_low_keys(path, &disk_key, 1); 3587 } 3588 /* 3589 * We create a new leaf 'right' for the required ins_len and 3590 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3591 * the content of ins_len to 'right'. 3592 */ 3593 return ret; 3594 } 3595 3596 copy_for_split(trans, path, l, right, slot, mid, nritems); 3597 3598 if (split == 2) { 3599 BUG_ON(num_doubles != 0); 3600 num_doubles++; 3601 goto again; 3602 } 3603 3604 return 0; 3605 3606 push_for_double: 3607 push_for_double_split(trans, root, path, data_size); 3608 tried_avoid_double = 1; 3609 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3610 return 0; 3611 goto again; 3612 } 3613 3614 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3615 struct btrfs_root *root, 3616 struct btrfs_path *path, int ins_len) 3617 { 3618 struct btrfs_key key; 3619 struct extent_buffer *leaf; 3620 struct btrfs_file_extent_item *fi; 3621 u64 extent_len = 0; 3622 u32 item_size; 3623 int ret; 3624 3625 leaf = path->nodes[0]; 3626 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3627 3628 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3629 key.type != BTRFS_EXTENT_CSUM_KEY); 3630 3631 if (btrfs_leaf_free_space(leaf) >= ins_len) 3632 return 0; 3633 3634 item_size = btrfs_item_size(leaf, path->slots[0]); 3635 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3636 fi = btrfs_item_ptr(leaf, path->slots[0], 3637 struct btrfs_file_extent_item); 3638 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3639 } 3640 btrfs_release_path(path); 3641 3642 path->keep_locks = 1; 3643 path->search_for_split = 1; 3644 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3645 path->search_for_split = 0; 3646 if (ret > 0) 3647 ret = -EAGAIN; 3648 if (ret < 0) 3649 goto err; 3650 3651 ret = -EAGAIN; 3652 leaf = path->nodes[0]; 3653 /* if our item isn't there, return now */ 3654 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3655 goto err; 3656 3657 /* the leaf has changed, it now has room. return now */ 3658 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3659 goto err; 3660 3661 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3662 fi = btrfs_item_ptr(leaf, path->slots[0], 3663 struct btrfs_file_extent_item); 3664 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3665 goto err; 3666 } 3667 3668 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3669 if (ret) 3670 goto err; 3671 3672 path->keep_locks = 0; 3673 btrfs_unlock_up_safe(path, 1); 3674 return 0; 3675 err: 3676 path->keep_locks = 0; 3677 return ret; 3678 } 3679 3680 static noinline int split_item(struct btrfs_path *path, 3681 const struct btrfs_key *new_key, 3682 unsigned long split_offset) 3683 { 3684 struct extent_buffer *leaf; 3685 int orig_slot, slot; 3686 char *buf; 3687 u32 nritems; 3688 u32 item_size; 3689 u32 orig_offset; 3690 struct btrfs_disk_key disk_key; 3691 3692 leaf = path->nodes[0]; 3693 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3694 3695 orig_slot = path->slots[0]; 3696 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3697 item_size = btrfs_item_size(leaf, path->slots[0]); 3698 3699 buf = kmalloc(item_size, GFP_NOFS); 3700 if (!buf) 3701 return -ENOMEM; 3702 3703 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3704 path->slots[0]), item_size); 3705 3706 slot = path->slots[0] + 1; 3707 nritems = btrfs_header_nritems(leaf); 3708 if (slot != nritems) { 3709 /* shift the items */ 3710 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), 3711 btrfs_item_nr_offset(slot), 3712 (nritems - slot) * sizeof(struct btrfs_item)); 3713 } 3714 3715 btrfs_cpu_key_to_disk(&disk_key, new_key); 3716 btrfs_set_item_key(leaf, &disk_key, slot); 3717 3718 btrfs_set_item_offset(leaf, slot, orig_offset); 3719 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3720 3721 btrfs_set_item_offset(leaf, orig_slot, 3722 orig_offset + item_size - split_offset); 3723 btrfs_set_item_size(leaf, orig_slot, split_offset); 3724 3725 btrfs_set_header_nritems(leaf, nritems + 1); 3726 3727 /* write the data for the start of the original item */ 3728 write_extent_buffer(leaf, buf, 3729 btrfs_item_ptr_offset(leaf, path->slots[0]), 3730 split_offset); 3731 3732 /* write the data for the new item */ 3733 write_extent_buffer(leaf, buf + split_offset, 3734 btrfs_item_ptr_offset(leaf, slot), 3735 item_size - split_offset); 3736 btrfs_mark_buffer_dirty(leaf); 3737 3738 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3739 kfree(buf); 3740 return 0; 3741 } 3742 3743 /* 3744 * This function splits a single item into two items, 3745 * giving 'new_key' to the new item and splitting the 3746 * old one at split_offset (from the start of the item). 3747 * 3748 * The path may be released by this operation. After 3749 * the split, the path is pointing to the old item. The 3750 * new item is going to be in the same node as the old one. 3751 * 3752 * Note, the item being split must be smaller enough to live alone on 3753 * a tree block with room for one extra struct btrfs_item 3754 * 3755 * This allows us to split the item in place, keeping a lock on the 3756 * leaf the entire time. 3757 */ 3758 int btrfs_split_item(struct btrfs_trans_handle *trans, 3759 struct btrfs_root *root, 3760 struct btrfs_path *path, 3761 const struct btrfs_key *new_key, 3762 unsigned long split_offset) 3763 { 3764 int ret; 3765 ret = setup_leaf_for_split(trans, root, path, 3766 sizeof(struct btrfs_item)); 3767 if (ret) 3768 return ret; 3769 3770 ret = split_item(path, new_key, split_offset); 3771 return ret; 3772 } 3773 3774 /* 3775 * make the item pointed to by the path smaller. new_size indicates 3776 * how small to make it, and from_end tells us if we just chop bytes 3777 * off the end of the item or if we shift the item to chop bytes off 3778 * the front. 3779 */ 3780 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 3781 { 3782 int slot; 3783 struct extent_buffer *leaf; 3784 u32 nritems; 3785 unsigned int data_end; 3786 unsigned int old_data_start; 3787 unsigned int old_size; 3788 unsigned int size_diff; 3789 int i; 3790 struct btrfs_map_token token; 3791 3792 leaf = path->nodes[0]; 3793 slot = path->slots[0]; 3794 3795 old_size = btrfs_item_size(leaf, slot); 3796 if (old_size == new_size) 3797 return; 3798 3799 nritems = btrfs_header_nritems(leaf); 3800 data_end = leaf_data_end(leaf); 3801 3802 old_data_start = btrfs_item_offset(leaf, slot); 3803 3804 size_diff = old_size - new_size; 3805 3806 BUG_ON(slot < 0); 3807 BUG_ON(slot >= nritems); 3808 3809 /* 3810 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3811 */ 3812 /* first correct the data pointers */ 3813 btrfs_init_map_token(&token, leaf); 3814 for (i = slot; i < nritems; i++) { 3815 u32 ioff; 3816 3817 ioff = btrfs_token_item_offset(&token, i); 3818 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 3819 } 3820 3821 /* shift the data */ 3822 if (from_end) { 3823 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3824 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET + 3825 data_end, old_data_start + new_size - data_end); 3826 } else { 3827 struct btrfs_disk_key disk_key; 3828 u64 offset; 3829 3830 btrfs_item_key(leaf, &disk_key, slot); 3831 3832 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 3833 unsigned long ptr; 3834 struct btrfs_file_extent_item *fi; 3835 3836 fi = btrfs_item_ptr(leaf, slot, 3837 struct btrfs_file_extent_item); 3838 fi = (struct btrfs_file_extent_item *)( 3839 (unsigned long)fi - size_diff); 3840 3841 if (btrfs_file_extent_type(leaf, fi) == 3842 BTRFS_FILE_EXTENT_INLINE) { 3843 ptr = btrfs_item_ptr_offset(leaf, slot); 3844 memmove_extent_buffer(leaf, ptr, 3845 (unsigned long)fi, 3846 BTRFS_FILE_EXTENT_INLINE_DATA_START); 3847 } 3848 } 3849 3850 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3851 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET + 3852 data_end, old_data_start - data_end); 3853 3854 offset = btrfs_disk_key_offset(&disk_key); 3855 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 3856 btrfs_set_item_key(leaf, &disk_key, slot); 3857 if (slot == 0) 3858 fixup_low_keys(path, &disk_key, 1); 3859 } 3860 3861 btrfs_set_item_size(leaf, slot, new_size); 3862 btrfs_mark_buffer_dirty(leaf); 3863 3864 if (btrfs_leaf_free_space(leaf) < 0) { 3865 btrfs_print_leaf(leaf); 3866 BUG(); 3867 } 3868 } 3869 3870 /* 3871 * make the item pointed to by the path bigger, data_size is the added size. 3872 */ 3873 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 3874 { 3875 int slot; 3876 struct extent_buffer *leaf; 3877 u32 nritems; 3878 unsigned int data_end; 3879 unsigned int old_data; 3880 unsigned int old_size; 3881 int i; 3882 struct btrfs_map_token token; 3883 3884 leaf = path->nodes[0]; 3885 3886 nritems = btrfs_header_nritems(leaf); 3887 data_end = leaf_data_end(leaf); 3888 3889 if (btrfs_leaf_free_space(leaf) < data_size) { 3890 btrfs_print_leaf(leaf); 3891 BUG(); 3892 } 3893 slot = path->slots[0]; 3894 old_data = btrfs_item_data_end(leaf, slot); 3895 3896 BUG_ON(slot < 0); 3897 if (slot >= nritems) { 3898 btrfs_print_leaf(leaf); 3899 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 3900 slot, nritems); 3901 BUG(); 3902 } 3903 3904 /* 3905 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3906 */ 3907 /* first correct the data pointers */ 3908 btrfs_init_map_token(&token, leaf); 3909 for (i = slot; i < nritems; i++) { 3910 u32 ioff; 3911 3912 ioff = btrfs_token_item_offset(&token, i); 3913 btrfs_set_token_item_offset(&token, i, ioff - data_size); 3914 } 3915 3916 /* shift the data */ 3917 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3918 data_end - data_size, BTRFS_LEAF_DATA_OFFSET + 3919 data_end, old_data - data_end); 3920 3921 data_end = old_data; 3922 old_size = btrfs_item_size(leaf, slot); 3923 btrfs_set_item_size(leaf, slot, old_size + data_size); 3924 btrfs_mark_buffer_dirty(leaf); 3925 3926 if (btrfs_leaf_free_space(leaf) < 0) { 3927 btrfs_print_leaf(leaf); 3928 BUG(); 3929 } 3930 } 3931 3932 /** 3933 * setup_items_for_insert - Helper called before inserting one or more items 3934 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work 3935 * in a function that doesn't call btrfs_search_slot 3936 * 3937 * @root: root we are inserting items to 3938 * @path: points to the leaf/slot where we are going to insert new items 3939 * @batch: information about the batch of items to insert 3940 */ 3941 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 3942 const struct btrfs_item_batch *batch) 3943 { 3944 struct btrfs_fs_info *fs_info = root->fs_info; 3945 int i; 3946 u32 nritems; 3947 unsigned int data_end; 3948 struct btrfs_disk_key disk_key; 3949 struct extent_buffer *leaf; 3950 int slot; 3951 struct btrfs_map_token token; 3952 u32 total_size; 3953 3954 /* 3955 * Before anything else, update keys in the parent and other ancestors 3956 * if needed, then release the write locks on them, so that other tasks 3957 * can use them while we modify the leaf. 3958 */ 3959 if (path->slots[0] == 0) { 3960 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 3961 fixup_low_keys(path, &disk_key, 1); 3962 } 3963 btrfs_unlock_up_safe(path, 1); 3964 3965 leaf = path->nodes[0]; 3966 slot = path->slots[0]; 3967 3968 nritems = btrfs_header_nritems(leaf); 3969 data_end = leaf_data_end(leaf); 3970 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 3971 3972 if (btrfs_leaf_free_space(leaf) < total_size) { 3973 btrfs_print_leaf(leaf); 3974 btrfs_crit(fs_info, "not enough freespace need %u have %d", 3975 total_size, btrfs_leaf_free_space(leaf)); 3976 BUG(); 3977 } 3978 3979 btrfs_init_map_token(&token, leaf); 3980 if (slot != nritems) { 3981 unsigned int old_data = btrfs_item_data_end(leaf, slot); 3982 3983 if (old_data < data_end) { 3984 btrfs_print_leaf(leaf); 3985 btrfs_crit(fs_info, 3986 "item at slot %d with data offset %u beyond data end of leaf %u", 3987 slot, old_data, data_end); 3988 BUG(); 3989 } 3990 /* 3991 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3992 */ 3993 /* first correct the data pointers */ 3994 for (i = slot; i < nritems; i++) { 3995 u32 ioff; 3996 3997 ioff = btrfs_token_item_offset(&token, i); 3998 btrfs_set_token_item_offset(&token, i, 3999 ioff - batch->total_data_size); 4000 } 4001 /* shift the items */ 4002 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + batch->nr), 4003 btrfs_item_nr_offset(slot), 4004 (nritems - slot) * sizeof(struct btrfs_item)); 4005 4006 /* shift the data */ 4007 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4008 data_end - batch->total_data_size, 4009 BTRFS_LEAF_DATA_OFFSET + data_end, 4010 old_data - data_end); 4011 data_end = old_data; 4012 } 4013 4014 /* setup the item for the new data */ 4015 for (i = 0; i < batch->nr; i++) { 4016 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4017 btrfs_set_item_key(leaf, &disk_key, slot + i); 4018 data_end -= batch->data_sizes[i]; 4019 btrfs_set_token_item_offset(&token, slot + i, data_end); 4020 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4021 } 4022 4023 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4024 btrfs_mark_buffer_dirty(leaf); 4025 4026 if (btrfs_leaf_free_space(leaf) < 0) { 4027 btrfs_print_leaf(leaf); 4028 BUG(); 4029 } 4030 } 4031 4032 /* 4033 * Insert a new item into a leaf. 4034 * 4035 * @root: The root of the btree. 4036 * @path: A path pointing to the target leaf and slot. 4037 * @key: The key of the new item. 4038 * @data_size: The size of the data associated with the new key. 4039 */ 4040 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4041 struct btrfs_path *path, 4042 const struct btrfs_key *key, 4043 u32 data_size) 4044 { 4045 struct btrfs_item_batch batch; 4046 4047 batch.keys = key; 4048 batch.data_sizes = &data_size; 4049 batch.total_data_size = data_size; 4050 batch.nr = 1; 4051 4052 setup_items_for_insert(root, path, &batch); 4053 } 4054 4055 /* 4056 * Given a key and some data, insert items into the tree. 4057 * This does all the path init required, making room in the tree if needed. 4058 */ 4059 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4060 struct btrfs_root *root, 4061 struct btrfs_path *path, 4062 const struct btrfs_item_batch *batch) 4063 { 4064 int ret = 0; 4065 int slot; 4066 u32 total_size; 4067 4068 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4069 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4070 if (ret == 0) 4071 return -EEXIST; 4072 if (ret < 0) 4073 return ret; 4074 4075 slot = path->slots[0]; 4076 BUG_ON(slot < 0); 4077 4078 setup_items_for_insert(root, path, batch); 4079 return 0; 4080 } 4081 4082 /* 4083 * Given a key and some data, insert an item into the tree. 4084 * This does all the path init required, making room in the tree if needed. 4085 */ 4086 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4087 const struct btrfs_key *cpu_key, void *data, 4088 u32 data_size) 4089 { 4090 int ret = 0; 4091 struct btrfs_path *path; 4092 struct extent_buffer *leaf; 4093 unsigned long ptr; 4094 4095 path = btrfs_alloc_path(); 4096 if (!path) 4097 return -ENOMEM; 4098 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4099 if (!ret) { 4100 leaf = path->nodes[0]; 4101 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4102 write_extent_buffer(leaf, data, ptr, data_size); 4103 btrfs_mark_buffer_dirty(leaf); 4104 } 4105 btrfs_free_path(path); 4106 return ret; 4107 } 4108 4109 /* 4110 * This function duplicates an item, giving 'new_key' to the new item. 4111 * It guarantees both items live in the same tree leaf and the new item is 4112 * contiguous with the original item. 4113 * 4114 * This allows us to split a file extent in place, keeping a lock on the leaf 4115 * the entire time. 4116 */ 4117 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4118 struct btrfs_root *root, 4119 struct btrfs_path *path, 4120 const struct btrfs_key *new_key) 4121 { 4122 struct extent_buffer *leaf; 4123 int ret; 4124 u32 item_size; 4125 4126 leaf = path->nodes[0]; 4127 item_size = btrfs_item_size(leaf, path->slots[0]); 4128 ret = setup_leaf_for_split(trans, root, path, 4129 item_size + sizeof(struct btrfs_item)); 4130 if (ret) 4131 return ret; 4132 4133 path->slots[0]++; 4134 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4135 leaf = path->nodes[0]; 4136 memcpy_extent_buffer(leaf, 4137 btrfs_item_ptr_offset(leaf, path->slots[0]), 4138 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4139 item_size); 4140 return 0; 4141 } 4142 4143 /* 4144 * delete the pointer from a given node. 4145 * 4146 * the tree should have been previously balanced so the deletion does not 4147 * empty a node. 4148 */ 4149 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 4150 int level, int slot) 4151 { 4152 struct extent_buffer *parent = path->nodes[level]; 4153 u32 nritems; 4154 int ret; 4155 4156 nritems = btrfs_header_nritems(parent); 4157 if (slot != nritems - 1) { 4158 if (level) { 4159 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4160 slot + 1, nritems - slot - 1); 4161 BUG_ON(ret < 0); 4162 } 4163 memmove_extent_buffer(parent, 4164 btrfs_node_key_ptr_offset(slot), 4165 btrfs_node_key_ptr_offset(slot + 1), 4166 sizeof(struct btrfs_key_ptr) * 4167 (nritems - slot - 1)); 4168 } else if (level) { 4169 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4170 BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS); 4171 BUG_ON(ret < 0); 4172 } 4173 4174 nritems--; 4175 btrfs_set_header_nritems(parent, nritems); 4176 if (nritems == 0 && parent == root->node) { 4177 BUG_ON(btrfs_header_level(root->node) != 1); 4178 /* just turn the root into a leaf and break */ 4179 btrfs_set_header_level(root->node, 0); 4180 } else if (slot == 0) { 4181 struct btrfs_disk_key disk_key; 4182 4183 btrfs_node_key(parent, &disk_key, 0); 4184 fixup_low_keys(path, &disk_key, level + 1); 4185 } 4186 btrfs_mark_buffer_dirty(parent); 4187 } 4188 4189 /* 4190 * a helper function to delete the leaf pointed to by path->slots[1] and 4191 * path->nodes[1]. 4192 * 4193 * This deletes the pointer in path->nodes[1] and frees the leaf 4194 * block extent. zero is returned if it all worked out, < 0 otherwise. 4195 * 4196 * The path must have already been setup for deleting the leaf, including 4197 * all the proper balancing. path->nodes[1] must be locked. 4198 */ 4199 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4200 struct btrfs_root *root, 4201 struct btrfs_path *path, 4202 struct extent_buffer *leaf) 4203 { 4204 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4205 del_ptr(root, path, 1, path->slots[1]); 4206 4207 /* 4208 * btrfs_free_extent is expensive, we want to make sure we 4209 * aren't holding any locks when we call it 4210 */ 4211 btrfs_unlock_up_safe(path, 0); 4212 4213 root_sub_used(root, leaf->len); 4214 4215 atomic_inc(&leaf->refs); 4216 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4217 free_extent_buffer_stale(leaf); 4218 } 4219 /* 4220 * delete the item at the leaf level in path. If that empties 4221 * the leaf, remove it from the tree 4222 */ 4223 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4224 struct btrfs_path *path, int slot, int nr) 4225 { 4226 struct btrfs_fs_info *fs_info = root->fs_info; 4227 struct extent_buffer *leaf; 4228 int ret = 0; 4229 int wret; 4230 u32 nritems; 4231 4232 leaf = path->nodes[0]; 4233 nritems = btrfs_header_nritems(leaf); 4234 4235 if (slot + nr != nritems) { 4236 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4237 const int data_end = leaf_data_end(leaf); 4238 struct btrfs_map_token token; 4239 u32 dsize = 0; 4240 int i; 4241 4242 for (i = 0; i < nr; i++) 4243 dsize += btrfs_item_size(leaf, slot + i); 4244 4245 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4246 data_end + dsize, 4247 BTRFS_LEAF_DATA_OFFSET + data_end, 4248 last_off - data_end); 4249 4250 btrfs_init_map_token(&token, leaf); 4251 for (i = slot + nr; i < nritems; i++) { 4252 u32 ioff; 4253 4254 ioff = btrfs_token_item_offset(&token, i); 4255 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4256 } 4257 4258 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 4259 btrfs_item_nr_offset(slot + nr), 4260 sizeof(struct btrfs_item) * 4261 (nritems - slot - nr)); 4262 } 4263 btrfs_set_header_nritems(leaf, nritems - nr); 4264 nritems -= nr; 4265 4266 /* delete the leaf if we've emptied it */ 4267 if (nritems == 0) { 4268 if (leaf == root->node) { 4269 btrfs_set_header_level(leaf, 0); 4270 } else { 4271 btrfs_clean_tree_block(leaf); 4272 btrfs_del_leaf(trans, root, path, leaf); 4273 } 4274 } else { 4275 int used = leaf_space_used(leaf, 0, nritems); 4276 if (slot == 0) { 4277 struct btrfs_disk_key disk_key; 4278 4279 btrfs_item_key(leaf, &disk_key, 0); 4280 fixup_low_keys(path, &disk_key, 1); 4281 } 4282 4283 /* 4284 * Try to delete the leaf if it is mostly empty. We do this by 4285 * trying to move all its items into its left and right neighbours. 4286 * If we can't move all the items, then we don't delete it - it's 4287 * not ideal, but future insertions might fill the leaf with more 4288 * items, or items from other leaves might be moved later into our 4289 * leaf due to deletions on those leaves. 4290 */ 4291 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4292 u32 min_push_space; 4293 4294 /* push_leaf_left fixes the path. 4295 * make sure the path still points to our leaf 4296 * for possible call to del_ptr below 4297 */ 4298 slot = path->slots[1]; 4299 atomic_inc(&leaf->refs); 4300 /* 4301 * We want to be able to at least push one item to the 4302 * left neighbour leaf, and that's the first item. 4303 */ 4304 min_push_space = sizeof(struct btrfs_item) + 4305 btrfs_item_size(leaf, 0); 4306 wret = push_leaf_left(trans, root, path, 0, 4307 min_push_space, 1, (u32)-1); 4308 if (wret < 0 && wret != -ENOSPC) 4309 ret = wret; 4310 4311 if (path->nodes[0] == leaf && 4312 btrfs_header_nritems(leaf)) { 4313 /* 4314 * If we were not able to push all items from our 4315 * leaf to its left neighbour, then attempt to 4316 * either push all the remaining items to the 4317 * right neighbour or none. There's no advantage 4318 * in pushing only some items, instead of all, as 4319 * it's pointless to end up with a leaf having 4320 * too few items while the neighbours can be full 4321 * or nearly full. 4322 */ 4323 nritems = btrfs_header_nritems(leaf); 4324 min_push_space = leaf_space_used(leaf, 0, nritems); 4325 wret = push_leaf_right(trans, root, path, 0, 4326 min_push_space, 1, 0); 4327 if (wret < 0 && wret != -ENOSPC) 4328 ret = wret; 4329 } 4330 4331 if (btrfs_header_nritems(leaf) == 0) { 4332 path->slots[1] = slot; 4333 btrfs_del_leaf(trans, root, path, leaf); 4334 free_extent_buffer(leaf); 4335 ret = 0; 4336 } else { 4337 /* if we're still in the path, make sure 4338 * we're dirty. Otherwise, one of the 4339 * push_leaf functions must have already 4340 * dirtied this buffer 4341 */ 4342 if (path->nodes[0] == leaf) 4343 btrfs_mark_buffer_dirty(leaf); 4344 free_extent_buffer(leaf); 4345 } 4346 } else { 4347 btrfs_mark_buffer_dirty(leaf); 4348 } 4349 } 4350 return ret; 4351 } 4352 4353 /* 4354 * search the tree again to find a leaf with lesser keys 4355 * returns 0 if it found something or 1 if there are no lesser leaves. 4356 * returns < 0 on io errors. 4357 * 4358 * This may release the path, and so you may lose any locks held at the 4359 * time you call it. 4360 */ 4361 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 4362 { 4363 struct btrfs_key key; 4364 struct btrfs_disk_key found_key; 4365 int ret; 4366 4367 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 4368 4369 if (key.offset > 0) { 4370 key.offset--; 4371 } else if (key.type > 0) { 4372 key.type--; 4373 key.offset = (u64)-1; 4374 } else if (key.objectid > 0) { 4375 key.objectid--; 4376 key.type = (u8)-1; 4377 key.offset = (u64)-1; 4378 } else { 4379 return 1; 4380 } 4381 4382 btrfs_release_path(path); 4383 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4384 if (ret < 0) 4385 return ret; 4386 btrfs_item_key(path->nodes[0], &found_key, 0); 4387 ret = comp_keys(&found_key, &key); 4388 /* 4389 * We might have had an item with the previous key in the tree right 4390 * before we released our path. And after we released our path, that 4391 * item might have been pushed to the first slot (0) of the leaf we 4392 * were holding due to a tree balance. Alternatively, an item with the 4393 * previous key can exist as the only element of a leaf (big fat item). 4394 * Therefore account for these 2 cases, so that our callers (like 4395 * btrfs_previous_item) don't miss an existing item with a key matching 4396 * the previous key we computed above. 4397 */ 4398 if (ret <= 0) 4399 return 0; 4400 return 1; 4401 } 4402 4403 /* 4404 * A helper function to walk down the tree starting at min_key, and looking 4405 * for nodes or leaves that are have a minimum transaction id. 4406 * This is used by the btree defrag code, and tree logging 4407 * 4408 * This does not cow, but it does stuff the starting key it finds back 4409 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4410 * key and get a writable path. 4411 * 4412 * This honors path->lowest_level to prevent descent past a given level 4413 * of the tree. 4414 * 4415 * min_trans indicates the oldest transaction that you are interested 4416 * in walking through. Any nodes or leaves older than min_trans are 4417 * skipped over (without reading them). 4418 * 4419 * returns zero if something useful was found, < 0 on error and 1 if there 4420 * was nothing in the tree that matched the search criteria. 4421 */ 4422 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4423 struct btrfs_path *path, 4424 u64 min_trans) 4425 { 4426 struct extent_buffer *cur; 4427 struct btrfs_key found_key; 4428 int slot; 4429 int sret; 4430 u32 nritems; 4431 int level; 4432 int ret = 1; 4433 int keep_locks = path->keep_locks; 4434 4435 path->keep_locks = 1; 4436 again: 4437 cur = btrfs_read_lock_root_node(root); 4438 level = btrfs_header_level(cur); 4439 WARN_ON(path->nodes[level]); 4440 path->nodes[level] = cur; 4441 path->locks[level] = BTRFS_READ_LOCK; 4442 4443 if (btrfs_header_generation(cur) < min_trans) { 4444 ret = 1; 4445 goto out; 4446 } 4447 while (1) { 4448 nritems = btrfs_header_nritems(cur); 4449 level = btrfs_header_level(cur); 4450 sret = btrfs_bin_search(cur, min_key, &slot); 4451 if (sret < 0) { 4452 ret = sret; 4453 goto out; 4454 } 4455 4456 /* at the lowest level, we're done, setup the path and exit */ 4457 if (level == path->lowest_level) { 4458 if (slot >= nritems) 4459 goto find_next_key; 4460 ret = 0; 4461 path->slots[level] = slot; 4462 btrfs_item_key_to_cpu(cur, &found_key, slot); 4463 goto out; 4464 } 4465 if (sret && slot > 0) 4466 slot--; 4467 /* 4468 * check this node pointer against the min_trans parameters. 4469 * If it is too old, skip to the next one. 4470 */ 4471 while (slot < nritems) { 4472 u64 gen; 4473 4474 gen = btrfs_node_ptr_generation(cur, slot); 4475 if (gen < min_trans) { 4476 slot++; 4477 continue; 4478 } 4479 break; 4480 } 4481 find_next_key: 4482 /* 4483 * we didn't find a candidate key in this node, walk forward 4484 * and find another one 4485 */ 4486 if (slot >= nritems) { 4487 path->slots[level] = slot; 4488 sret = btrfs_find_next_key(root, path, min_key, level, 4489 min_trans); 4490 if (sret == 0) { 4491 btrfs_release_path(path); 4492 goto again; 4493 } else { 4494 goto out; 4495 } 4496 } 4497 /* save our key for returning back */ 4498 btrfs_node_key_to_cpu(cur, &found_key, slot); 4499 path->slots[level] = slot; 4500 if (level == path->lowest_level) { 4501 ret = 0; 4502 goto out; 4503 } 4504 cur = btrfs_read_node_slot(cur, slot); 4505 if (IS_ERR(cur)) { 4506 ret = PTR_ERR(cur); 4507 goto out; 4508 } 4509 4510 btrfs_tree_read_lock(cur); 4511 4512 path->locks[level - 1] = BTRFS_READ_LOCK; 4513 path->nodes[level - 1] = cur; 4514 unlock_up(path, level, 1, 0, NULL); 4515 } 4516 out: 4517 path->keep_locks = keep_locks; 4518 if (ret == 0) { 4519 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4520 memcpy(min_key, &found_key, sizeof(found_key)); 4521 } 4522 return ret; 4523 } 4524 4525 /* 4526 * this is similar to btrfs_next_leaf, but does not try to preserve 4527 * and fixup the path. It looks for and returns the next key in the 4528 * tree based on the current path and the min_trans parameters. 4529 * 4530 * 0 is returned if another key is found, < 0 if there are any errors 4531 * and 1 is returned if there are no higher keys in the tree 4532 * 4533 * path->keep_locks should be set to 1 on the search made before 4534 * calling this function. 4535 */ 4536 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4537 struct btrfs_key *key, int level, u64 min_trans) 4538 { 4539 int slot; 4540 struct extent_buffer *c; 4541 4542 WARN_ON(!path->keep_locks && !path->skip_locking); 4543 while (level < BTRFS_MAX_LEVEL) { 4544 if (!path->nodes[level]) 4545 return 1; 4546 4547 slot = path->slots[level] + 1; 4548 c = path->nodes[level]; 4549 next: 4550 if (slot >= btrfs_header_nritems(c)) { 4551 int ret; 4552 int orig_lowest; 4553 struct btrfs_key cur_key; 4554 if (level + 1 >= BTRFS_MAX_LEVEL || 4555 !path->nodes[level + 1]) 4556 return 1; 4557 4558 if (path->locks[level + 1] || path->skip_locking) { 4559 level++; 4560 continue; 4561 } 4562 4563 slot = btrfs_header_nritems(c) - 1; 4564 if (level == 0) 4565 btrfs_item_key_to_cpu(c, &cur_key, slot); 4566 else 4567 btrfs_node_key_to_cpu(c, &cur_key, slot); 4568 4569 orig_lowest = path->lowest_level; 4570 btrfs_release_path(path); 4571 path->lowest_level = level; 4572 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4573 0, 0); 4574 path->lowest_level = orig_lowest; 4575 if (ret < 0) 4576 return ret; 4577 4578 c = path->nodes[level]; 4579 slot = path->slots[level]; 4580 if (ret == 0) 4581 slot++; 4582 goto next; 4583 } 4584 4585 if (level == 0) 4586 btrfs_item_key_to_cpu(c, key, slot); 4587 else { 4588 u64 gen = btrfs_node_ptr_generation(c, slot); 4589 4590 if (gen < min_trans) { 4591 slot++; 4592 goto next; 4593 } 4594 btrfs_node_key_to_cpu(c, key, slot); 4595 } 4596 return 0; 4597 } 4598 return 1; 4599 } 4600 4601 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4602 u64 time_seq) 4603 { 4604 int slot; 4605 int level; 4606 struct extent_buffer *c; 4607 struct extent_buffer *next; 4608 struct btrfs_fs_info *fs_info = root->fs_info; 4609 struct btrfs_key key; 4610 bool need_commit_sem = false; 4611 u32 nritems; 4612 int ret; 4613 int i; 4614 4615 nritems = btrfs_header_nritems(path->nodes[0]); 4616 if (nritems == 0) 4617 return 1; 4618 4619 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4620 again: 4621 level = 1; 4622 next = NULL; 4623 btrfs_release_path(path); 4624 4625 path->keep_locks = 1; 4626 4627 if (time_seq) { 4628 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4629 } else { 4630 if (path->need_commit_sem) { 4631 path->need_commit_sem = 0; 4632 need_commit_sem = true; 4633 down_read(&fs_info->commit_root_sem); 4634 } 4635 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4636 } 4637 path->keep_locks = 0; 4638 4639 if (ret < 0) 4640 goto done; 4641 4642 nritems = btrfs_header_nritems(path->nodes[0]); 4643 /* 4644 * by releasing the path above we dropped all our locks. A balance 4645 * could have added more items next to the key that used to be 4646 * at the very end of the block. So, check again here and 4647 * advance the path if there are now more items available. 4648 */ 4649 if (nritems > 0 && path->slots[0] < nritems - 1) { 4650 if (ret == 0) 4651 path->slots[0]++; 4652 ret = 0; 4653 goto done; 4654 } 4655 /* 4656 * So the above check misses one case: 4657 * - after releasing the path above, someone has removed the item that 4658 * used to be at the very end of the block, and balance between leafs 4659 * gets another one with bigger key.offset to replace it. 4660 * 4661 * This one should be returned as well, or we can get leaf corruption 4662 * later(esp. in __btrfs_drop_extents()). 4663 * 4664 * And a bit more explanation about this check, 4665 * with ret > 0, the key isn't found, the path points to the slot 4666 * where it should be inserted, so the path->slots[0] item must be the 4667 * bigger one. 4668 */ 4669 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4670 ret = 0; 4671 goto done; 4672 } 4673 4674 while (level < BTRFS_MAX_LEVEL) { 4675 if (!path->nodes[level]) { 4676 ret = 1; 4677 goto done; 4678 } 4679 4680 slot = path->slots[level] + 1; 4681 c = path->nodes[level]; 4682 if (slot >= btrfs_header_nritems(c)) { 4683 level++; 4684 if (level == BTRFS_MAX_LEVEL) { 4685 ret = 1; 4686 goto done; 4687 } 4688 continue; 4689 } 4690 4691 4692 /* 4693 * Our current level is where we're going to start from, and to 4694 * make sure lockdep doesn't complain we need to drop our locks 4695 * and nodes from 0 to our current level. 4696 */ 4697 for (i = 0; i < level; i++) { 4698 if (path->locks[level]) { 4699 btrfs_tree_read_unlock(path->nodes[i]); 4700 path->locks[i] = 0; 4701 } 4702 free_extent_buffer(path->nodes[i]); 4703 path->nodes[i] = NULL; 4704 } 4705 4706 next = c; 4707 ret = read_block_for_search(root, path, &next, level, 4708 slot, &key); 4709 if (ret == -EAGAIN) 4710 goto again; 4711 4712 if (ret < 0) { 4713 btrfs_release_path(path); 4714 goto done; 4715 } 4716 4717 if (!path->skip_locking) { 4718 ret = btrfs_try_tree_read_lock(next); 4719 if (!ret && time_seq) { 4720 /* 4721 * If we don't get the lock, we may be racing 4722 * with push_leaf_left, holding that lock while 4723 * itself waiting for the leaf we've currently 4724 * locked. To solve this situation, we give up 4725 * on our lock and cycle. 4726 */ 4727 free_extent_buffer(next); 4728 btrfs_release_path(path); 4729 cond_resched(); 4730 goto again; 4731 } 4732 if (!ret) 4733 btrfs_tree_read_lock(next); 4734 } 4735 break; 4736 } 4737 path->slots[level] = slot; 4738 while (1) { 4739 level--; 4740 path->nodes[level] = next; 4741 path->slots[level] = 0; 4742 if (!path->skip_locking) 4743 path->locks[level] = BTRFS_READ_LOCK; 4744 if (!level) 4745 break; 4746 4747 ret = read_block_for_search(root, path, &next, level, 4748 0, &key); 4749 if (ret == -EAGAIN) 4750 goto again; 4751 4752 if (ret < 0) { 4753 btrfs_release_path(path); 4754 goto done; 4755 } 4756 4757 if (!path->skip_locking) 4758 btrfs_tree_read_lock(next); 4759 } 4760 ret = 0; 4761 done: 4762 unlock_up(path, 0, 1, 0, NULL); 4763 if (need_commit_sem) { 4764 int ret2; 4765 4766 path->need_commit_sem = 1; 4767 ret2 = finish_need_commit_sem_search(path); 4768 up_read(&fs_info->commit_root_sem); 4769 if (ret2) 4770 ret = ret2; 4771 } 4772 4773 return ret; 4774 } 4775 4776 /* 4777 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4778 * searching until it gets past min_objectid or finds an item of 'type' 4779 * 4780 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4781 */ 4782 int btrfs_previous_item(struct btrfs_root *root, 4783 struct btrfs_path *path, u64 min_objectid, 4784 int type) 4785 { 4786 struct btrfs_key found_key; 4787 struct extent_buffer *leaf; 4788 u32 nritems; 4789 int ret; 4790 4791 while (1) { 4792 if (path->slots[0] == 0) { 4793 ret = btrfs_prev_leaf(root, path); 4794 if (ret != 0) 4795 return ret; 4796 } else { 4797 path->slots[0]--; 4798 } 4799 leaf = path->nodes[0]; 4800 nritems = btrfs_header_nritems(leaf); 4801 if (nritems == 0) 4802 return 1; 4803 if (path->slots[0] == nritems) 4804 path->slots[0]--; 4805 4806 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4807 if (found_key.objectid < min_objectid) 4808 break; 4809 if (found_key.type == type) 4810 return 0; 4811 if (found_key.objectid == min_objectid && 4812 found_key.type < type) 4813 break; 4814 } 4815 return 1; 4816 } 4817 4818 /* 4819 * search in extent tree to find a previous Metadata/Data extent item with 4820 * min objecitd. 4821 * 4822 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4823 */ 4824 int btrfs_previous_extent_item(struct btrfs_root *root, 4825 struct btrfs_path *path, u64 min_objectid) 4826 { 4827 struct btrfs_key found_key; 4828 struct extent_buffer *leaf; 4829 u32 nritems; 4830 int ret; 4831 4832 while (1) { 4833 if (path->slots[0] == 0) { 4834 ret = btrfs_prev_leaf(root, path); 4835 if (ret != 0) 4836 return ret; 4837 } else { 4838 path->slots[0]--; 4839 } 4840 leaf = path->nodes[0]; 4841 nritems = btrfs_header_nritems(leaf); 4842 if (nritems == 0) 4843 return 1; 4844 if (path->slots[0] == nritems) 4845 path->slots[0]--; 4846 4847 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4848 if (found_key.objectid < min_objectid) 4849 break; 4850 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 4851 found_key.type == BTRFS_METADATA_ITEM_KEY) 4852 return 0; 4853 if (found_key.objectid == min_objectid && 4854 found_key.type < BTRFS_EXTENT_ITEM_KEY) 4855 break; 4856 } 4857 return 1; 4858 } 4859