1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "ctree.h" 12 #include "disk-io.h" 13 #include "transaction.h" 14 #include "print-tree.h" 15 #include "locking.h" 16 #include "volumes.h" 17 #include "qgroup.h" 18 #include "tree-mod-log.h" 19 20 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 21 *root, struct btrfs_path *path, int level); 22 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 23 const struct btrfs_key *ins_key, struct btrfs_path *path, 24 int data_size, int extend); 25 static int push_node_left(struct btrfs_trans_handle *trans, 26 struct extent_buffer *dst, 27 struct extent_buffer *src, int empty); 28 static int balance_node_right(struct btrfs_trans_handle *trans, 29 struct extent_buffer *dst_buf, 30 struct extent_buffer *src_buf); 31 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 32 int level, int slot); 33 34 static const struct btrfs_csums { 35 u16 size; 36 const char name[10]; 37 const char driver[12]; 38 } btrfs_csums[] = { 39 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 40 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 41 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 42 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 43 .driver = "blake2b-256" }, 44 }; 45 46 int btrfs_super_csum_size(const struct btrfs_super_block *s) 47 { 48 u16 t = btrfs_super_csum_type(s); 49 /* 50 * csum type is validated at mount time 51 */ 52 return btrfs_csums[t].size; 53 } 54 55 const char *btrfs_super_csum_name(u16 csum_type) 56 { 57 /* csum type is validated at mount time */ 58 return btrfs_csums[csum_type].name; 59 } 60 61 /* 62 * Return driver name if defined, otherwise the name that's also a valid driver 63 * name 64 */ 65 const char *btrfs_super_csum_driver(u16 csum_type) 66 { 67 /* csum type is validated at mount time */ 68 return btrfs_csums[csum_type].driver[0] ? 69 btrfs_csums[csum_type].driver : 70 btrfs_csums[csum_type].name; 71 } 72 73 size_t __attribute_const__ btrfs_get_num_csums(void) 74 { 75 return ARRAY_SIZE(btrfs_csums); 76 } 77 78 struct btrfs_path *btrfs_alloc_path(void) 79 { 80 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 81 } 82 83 /* this also releases the path */ 84 void btrfs_free_path(struct btrfs_path *p) 85 { 86 if (!p) 87 return; 88 btrfs_release_path(p); 89 kmem_cache_free(btrfs_path_cachep, p); 90 } 91 92 /* 93 * path release drops references on the extent buffers in the path 94 * and it drops any locks held by this path 95 * 96 * It is safe to call this on paths that no locks or extent buffers held. 97 */ 98 noinline void btrfs_release_path(struct btrfs_path *p) 99 { 100 int i; 101 102 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 103 p->slots[i] = 0; 104 if (!p->nodes[i]) 105 continue; 106 if (p->locks[i]) { 107 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 108 p->locks[i] = 0; 109 } 110 free_extent_buffer(p->nodes[i]); 111 p->nodes[i] = NULL; 112 } 113 } 114 115 /* 116 * safely gets a reference on the root node of a tree. A lock 117 * is not taken, so a concurrent writer may put a different node 118 * at the root of the tree. See btrfs_lock_root_node for the 119 * looping required. 120 * 121 * The extent buffer returned by this has a reference taken, so 122 * it won't disappear. It may stop being the root of the tree 123 * at any time because there are no locks held. 124 */ 125 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 126 { 127 struct extent_buffer *eb; 128 129 while (1) { 130 rcu_read_lock(); 131 eb = rcu_dereference(root->node); 132 133 /* 134 * RCU really hurts here, we could free up the root node because 135 * it was COWed but we may not get the new root node yet so do 136 * the inc_not_zero dance and if it doesn't work then 137 * synchronize_rcu and try again. 138 */ 139 if (atomic_inc_not_zero(&eb->refs)) { 140 rcu_read_unlock(); 141 break; 142 } 143 rcu_read_unlock(); 144 synchronize_rcu(); 145 } 146 return eb; 147 } 148 149 /* 150 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 151 * just get put onto a simple dirty list. Transaction walks this list to make 152 * sure they get properly updated on disk. 153 */ 154 static void add_root_to_dirty_list(struct btrfs_root *root) 155 { 156 struct btrfs_fs_info *fs_info = root->fs_info; 157 158 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 159 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 160 return; 161 162 spin_lock(&fs_info->trans_lock); 163 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 164 /* Want the extent tree to be the last on the list */ 165 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 166 list_move_tail(&root->dirty_list, 167 &fs_info->dirty_cowonly_roots); 168 else 169 list_move(&root->dirty_list, 170 &fs_info->dirty_cowonly_roots); 171 } 172 spin_unlock(&fs_info->trans_lock); 173 } 174 175 /* 176 * used by snapshot creation to make a copy of a root for a tree with 177 * a given objectid. The buffer with the new root node is returned in 178 * cow_ret, and this func returns zero on success or a negative error code. 179 */ 180 int btrfs_copy_root(struct btrfs_trans_handle *trans, 181 struct btrfs_root *root, 182 struct extent_buffer *buf, 183 struct extent_buffer **cow_ret, u64 new_root_objectid) 184 { 185 struct btrfs_fs_info *fs_info = root->fs_info; 186 struct extent_buffer *cow; 187 int ret = 0; 188 int level; 189 struct btrfs_disk_key disk_key; 190 191 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 192 trans->transid != fs_info->running_transaction->transid); 193 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 194 trans->transid != root->last_trans); 195 196 level = btrfs_header_level(buf); 197 if (level == 0) 198 btrfs_item_key(buf, &disk_key, 0); 199 else 200 btrfs_node_key(buf, &disk_key, 0); 201 202 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 203 &disk_key, level, buf->start, 0, 204 BTRFS_NESTING_NEW_ROOT); 205 if (IS_ERR(cow)) 206 return PTR_ERR(cow); 207 208 copy_extent_buffer_full(cow, buf); 209 btrfs_set_header_bytenr(cow, cow->start); 210 btrfs_set_header_generation(cow, trans->transid); 211 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 212 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 213 BTRFS_HEADER_FLAG_RELOC); 214 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 215 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 216 else 217 btrfs_set_header_owner(cow, new_root_objectid); 218 219 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 220 221 WARN_ON(btrfs_header_generation(buf) > trans->transid); 222 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 223 ret = btrfs_inc_ref(trans, root, cow, 1); 224 else 225 ret = btrfs_inc_ref(trans, root, cow, 0); 226 if (ret) { 227 btrfs_tree_unlock(cow); 228 free_extent_buffer(cow); 229 btrfs_abort_transaction(trans, ret); 230 return ret; 231 } 232 233 btrfs_mark_buffer_dirty(cow); 234 *cow_ret = cow; 235 return 0; 236 } 237 238 /* 239 * check if the tree block can be shared by multiple trees 240 */ 241 int btrfs_block_can_be_shared(struct btrfs_root *root, 242 struct extent_buffer *buf) 243 { 244 /* 245 * Tree blocks not in shareable trees and tree roots are never shared. 246 * If a block was allocated after the last snapshot and the block was 247 * not allocated by tree relocation, we know the block is not shared. 248 */ 249 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 250 buf != root->node && buf != root->commit_root && 251 (btrfs_header_generation(buf) <= 252 btrfs_root_last_snapshot(&root->root_item) || 253 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 254 return 1; 255 256 return 0; 257 } 258 259 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 260 struct btrfs_root *root, 261 struct extent_buffer *buf, 262 struct extent_buffer *cow, 263 int *last_ref) 264 { 265 struct btrfs_fs_info *fs_info = root->fs_info; 266 u64 refs; 267 u64 owner; 268 u64 flags; 269 u64 new_flags = 0; 270 int ret; 271 272 /* 273 * Backrefs update rules: 274 * 275 * Always use full backrefs for extent pointers in tree block 276 * allocated by tree relocation. 277 * 278 * If a shared tree block is no longer referenced by its owner 279 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 280 * use full backrefs for extent pointers in tree block. 281 * 282 * If a tree block is been relocating 283 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 284 * use full backrefs for extent pointers in tree block. 285 * The reason for this is some operations (such as drop tree) 286 * are only allowed for blocks use full backrefs. 287 */ 288 289 if (btrfs_block_can_be_shared(root, buf)) { 290 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 291 btrfs_header_level(buf), 1, 292 &refs, &flags); 293 if (ret) 294 return ret; 295 if (refs == 0) { 296 ret = -EROFS; 297 btrfs_handle_fs_error(fs_info, ret, NULL); 298 return ret; 299 } 300 } else { 301 refs = 1; 302 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 303 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 304 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 305 else 306 flags = 0; 307 } 308 309 owner = btrfs_header_owner(buf); 310 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 311 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 312 313 if (refs > 1) { 314 if ((owner == root->root_key.objectid || 315 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 316 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 317 ret = btrfs_inc_ref(trans, root, buf, 1); 318 if (ret) 319 return ret; 320 321 if (root->root_key.objectid == 322 BTRFS_TREE_RELOC_OBJECTID) { 323 ret = btrfs_dec_ref(trans, root, buf, 0); 324 if (ret) 325 return ret; 326 ret = btrfs_inc_ref(trans, root, cow, 1); 327 if (ret) 328 return ret; 329 } 330 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 331 } else { 332 333 if (root->root_key.objectid == 334 BTRFS_TREE_RELOC_OBJECTID) 335 ret = btrfs_inc_ref(trans, root, cow, 1); 336 else 337 ret = btrfs_inc_ref(trans, root, cow, 0); 338 if (ret) 339 return ret; 340 } 341 if (new_flags != 0) { 342 int level = btrfs_header_level(buf); 343 344 ret = btrfs_set_disk_extent_flags(trans, buf, 345 new_flags, level, 0); 346 if (ret) 347 return ret; 348 } 349 } else { 350 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 351 if (root->root_key.objectid == 352 BTRFS_TREE_RELOC_OBJECTID) 353 ret = btrfs_inc_ref(trans, root, cow, 1); 354 else 355 ret = btrfs_inc_ref(trans, root, cow, 0); 356 if (ret) 357 return ret; 358 ret = btrfs_dec_ref(trans, root, buf, 1); 359 if (ret) 360 return ret; 361 } 362 btrfs_clean_tree_block(buf); 363 *last_ref = 1; 364 } 365 return 0; 366 } 367 368 /* 369 * does the dirty work in cow of a single block. The parent block (if 370 * supplied) is updated to point to the new cow copy. The new buffer is marked 371 * dirty and returned locked. If you modify the block it needs to be marked 372 * dirty again. 373 * 374 * search_start -- an allocation hint for the new block 375 * 376 * empty_size -- a hint that you plan on doing more cow. This is the size in 377 * bytes the allocator should try to find free next to the block it returns. 378 * This is just a hint and may be ignored by the allocator. 379 */ 380 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 381 struct btrfs_root *root, 382 struct extent_buffer *buf, 383 struct extent_buffer *parent, int parent_slot, 384 struct extent_buffer **cow_ret, 385 u64 search_start, u64 empty_size, 386 enum btrfs_lock_nesting nest) 387 { 388 struct btrfs_fs_info *fs_info = root->fs_info; 389 struct btrfs_disk_key disk_key; 390 struct extent_buffer *cow; 391 int level, ret; 392 int last_ref = 0; 393 int unlock_orig = 0; 394 u64 parent_start = 0; 395 396 if (*cow_ret == buf) 397 unlock_orig = 1; 398 399 btrfs_assert_tree_write_locked(buf); 400 401 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 402 trans->transid != fs_info->running_transaction->transid); 403 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 404 trans->transid != root->last_trans); 405 406 level = btrfs_header_level(buf); 407 408 if (level == 0) 409 btrfs_item_key(buf, &disk_key, 0); 410 else 411 btrfs_node_key(buf, &disk_key, 0); 412 413 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 414 parent_start = parent->start; 415 416 cow = btrfs_alloc_tree_block(trans, root, parent_start, 417 root->root_key.objectid, &disk_key, level, 418 search_start, empty_size, nest); 419 if (IS_ERR(cow)) 420 return PTR_ERR(cow); 421 422 /* cow is set to blocking by btrfs_init_new_buffer */ 423 424 copy_extent_buffer_full(cow, buf); 425 btrfs_set_header_bytenr(cow, cow->start); 426 btrfs_set_header_generation(cow, trans->transid); 427 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 428 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 429 BTRFS_HEADER_FLAG_RELOC); 430 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 431 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 432 else 433 btrfs_set_header_owner(cow, root->root_key.objectid); 434 435 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 436 437 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 438 if (ret) { 439 btrfs_tree_unlock(cow); 440 free_extent_buffer(cow); 441 btrfs_abort_transaction(trans, ret); 442 return ret; 443 } 444 445 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 446 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 447 if (ret) { 448 btrfs_tree_unlock(cow); 449 free_extent_buffer(cow); 450 btrfs_abort_transaction(trans, ret); 451 return ret; 452 } 453 } 454 455 if (buf == root->node) { 456 WARN_ON(parent && parent != buf); 457 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 458 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 459 parent_start = buf->start; 460 461 atomic_inc(&cow->refs); 462 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 463 BUG_ON(ret < 0); 464 rcu_assign_pointer(root->node, cow); 465 466 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 467 parent_start, last_ref); 468 free_extent_buffer(buf); 469 add_root_to_dirty_list(root); 470 } else { 471 WARN_ON(trans->transid != btrfs_header_generation(parent)); 472 btrfs_tree_mod_log_insert_key(parent, parent_slot, 473 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 474 btrfs_set_node_blockptr(parent, parent_slot, 475 cow->start); 476 btrfs_set_node_ptr_generation(parent, parent_slot, 477 trans->transid); 478 btrfs_mark_buffer_dirty(parent); 479 if (last_ref) { 480 ret = btrfs_tree_mod_log_free_eb(buf); 481 if (ret) { 482 btrfs_tree_unlock(cow); 483 free_extent_buffer(cow); 484 btrfs_abort_transaction(trans, ret); 485 return ret; 486 } 487 } 488 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 489 parent_start, last_ref); 490 } 491 if (unlock_orig) 492 btrfs_tree_unlock(buf); 493 free_extent_buffer_stale(buf); 494 btrfs_mark_buffer_dirty(cow); 495 *cow_ret = cow; 496 return 0; 497 } 498 499 static inline int should_cow_block(struct btrfs_trans_handle *trans, 500 struct btrfs_root *root, 501 struct extent_buffer *buf) 502 { 503 if (btrfs_is_testing(root->fs_info)) 504 return 0; 505 506 /* Ensure we can see the FORCE_COW bit */ 507 smp_mb__before_atomic(); 508 509 /* 510 * We do not need to cow a block if 511 * 1) this block is not created or changed in this transaction; 512 * 2) this block does not belong to TREE_RELOC tree; 513 * 3) the root is not forced COW. 514 * 515 * What is forced COW: 516 * when we create snapshot during committing the transaction, 517 * after we've finished copying src root, we must COW the shared 518 * block to ensure the metadata consistency. 519 */ 520 if (btrfs_header_generation(buf) == trans->transid && 521 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 522 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 523 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 524 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 525 return 0; 526 return 1; 527 } 528 529 /* 530 * cows a single block, see __btrfs_cow_block for the real work. 531 * This version of it has extra checks so that a block isn't COWed more than 532 * once per transaction, as long as it hasn't been written yet 533 */ 534 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 535 struct btrfs_root *root, struct extent_buffer *buf, 536 struct extent_buffer *parent, int parent_slot, 537 struct extent_buffer **cow_ret, 538 enum btrfs_lock_nesting nest) 539 { 540 struct btrfs_fs_info *fs_info = root->fs_info; 541 u64 search_start; 542 int ret; 543 544 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 545 btrfs_err(fs_info, 546 "COW'ing blocks on a fs root that's being dropped"); 547 548 if (trans->transaction != fs_info->running_transaction) 549 WARN(1, KERN_CRIT "trans %llu running %llu\n", 550 trans->transid, 551 fs_info->running_transaction->transid); 552 553 if (trans->transid != fs_info->generation) 554 WARN(1, KERN_CRIT "trans %llu running %llu\n", 555 trans->transid, fs_info->generation); 556 557 if (!should_cow_block(trans, root, buf)) { 558 *cow_ret = buf; 559 return 0; 560 } 561 562 search_start = buf->start & ~((u64)SZ_1G - 1); 563 564 /* 565 * Before CoWing this block for later modification, check if it's 566 * the subtree root and do the delayed subtree trace if needed. 567 * 568 * Also We don't care about the error, as it's handled internally. 569 */ 570 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 571 ret = __btrfs_cow_block(trans, root, buf, parent, 572 parent_slot, cow_ret, search_start, 0, nest); 573 574 trace_btrfs_cow_block(root, buf, *cow_ret); 575 576 return ret; 577 } 578 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 579 580 /* 581 * helper function for defrag to decide if two blocks pointed to by a 582 * node are actually close by 583 */ 584 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 585 { 586 if (blocknr < other && other - (blocknr + blocksize) < 32768) 587 return 1; 588 if (blocknr > other && blocknr - (other + blocksize) < 32768) 589 return 1; 590 return 0; 591 } 592 593 #ifdef __LITTLE_ENDIAN 594 595 /* 596 * Compare two keys, on little-endian the disk order is same as CPU order and 597 * we can avoid the conversion. 598 */ 599 static int comp_keys(const struct btrfs_disk_key *disk_key, 600 const struct btrfs_key *k2) 601 { 602 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 603 604 return btrfs_comp_cpu_keys(k1, k2); 605 } 606 607 #else 608 609 /* 610 * compare two keys in a memcmp fashion 611 */ 612 static int comp_keys(const struct btrfs_disk_key *disk, 613 const struct btrfs_key *k2) 614 { 615 struct btrfs_key k1; 616 617 btrfs_disk_key_to_cpu(&k1, disk); 618 619 return btrfs_comp_cpu_keys(&k1, k2); 620 } 621 #endif 622 623 /* 624 * same as comp_keys only with two btrfs_key's 625 */ 626 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 627 { 628 if (k1->objectid > k2->objectid) 629 return 1; 630 if (k1->objectid < k2->objectid) 631 return -1; 632 if (k1->type > k2->type) 633 return 1; 634 if (k1->type < k2->type) 635 return -1; 636 if (k1->offset > k2->offset) 637 return 1; 638 if (k1->offset < k2->offset) 639 return -1; 640 return 0; 641 } 642 643 /* 644 * this is used by the defrag code to go through all the 645 * leaves pointed to by a node and reallocate them so that 646 * disk order is close to key order 647 */ 648 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 649 struct btrfs_root *root, struct extent_buffer *parent, 650 int start_slot, u64 *last_ret, 651 struct btrfs_key *progress) 652 { 653 struct btrfs_fs_info *fs_info = root->fs_info; 654 struct extent_buffer *cur; 655 u64 blocknr; 656 u64 search_start = *last_ret; 657 u64 last_block = 0; 658 u64 other; 659 u32 parent_nritems; 660 int end_slot; 661 int i; 662 int err = 0; 663 u32 blocksize; 664 int progress_passed = 0; 665 struct btrfs_disk_key disk_key; 666 667 WARN_ON(trans->transaction != fs_info->running_transaction); 668 WARN_ON(trans->transid != fs_info->generation); 669 670 parent_nritems = btrfs_header_nritems(parent); 671 blocksize = fs_info->nodesize; 672 end_slot = parent_nritems - 1; 673 674 if (parent_nritems <= 1) 675 return 0; 676 677 for (i = start_slot; i <= end_slot; i++) { 678 int close = 1; 679 680 btrfs_node_key(parent, &disk_key, i); 681 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 682 continue; 683 684 progress_passed = 1; 685 blocknr = btrfs_node_blockptr(parent, i); 686 if (last_block == 0) 687 last_block = blocknr; 688 689 if (i > 0) { 690 other = btrfs_node_blockptr(parent, i - 1); 691 close = close_blocks(blocknr, other, blocksize); 692 } 693 if (!close && i < end_slot) { 694 other = btrfs_node_blockptr(parent, i + 1); 695 close = close_blocks(blocknr, other, blocksize); 696 } 697 if (close) { 698 last_block = blocknr; 699 continue; 700 } 701 702 cur = btrfs_read_node_slot(parent, i); 703 if (IS_ERR(cur)) 704 return PTR_ERR(cur); 705 if (search_start == 0) 706 search_start = last_block; 707 708 btrfs_tree_lock(cur); 709 err = __btrfs_cow_block(trans, root, cur, parent, i, 710 &cur, search_start, 711 min(16 * blocksize, 712 (end_slot - i) * blocksize), 713 BTRFS_NESTING_COW); 714 if (err) { 715 btrfs_tree_unlock(cur); 716 free_extent_buffer(cur); 717 break; 718 } 719 search_start = cur->start; 720 last_block = cur->start; 721 *last_ret = search_start; 722 btrfs_tree_unlock(cur); 723 free_extent_buffer(cur); 724 } 725 return err; 726 } 727 728 /* 729 * search for key in the extent_buffer. The items start at offset p, 730 * and they are item_size apart. 731 * 732 * the slot in the array is returned via slot, and it points to 733 * the place where you would insert key if it is not found in 734 * the array. 735 * 736 * Slot may point to total number of items if the key is bigger than 737 * all of the keys 738 */ 739 static noinline int generic_bin_search(struct extent_buffer *eb, 740 unsigned long p, int item_size, 741 const struct btrfs_key *key, int *slot) 742 { 743 int low = 0; 744 int high = btrfs_header_nritems(eb); 745 int ret; 746 const int key_size = sizeof(struct btrfs_disk_key); 747 748 if (low > high) { 749 btrfs_err(eb->fs_info, 750 "%s: low (%d) > high (%d) eb %llu owner %llu level %d", 751 __func__, low, high, eb->start, 752 btrfs_header_owner(eb), btrfs_header_level(eb)); 753 return -EINVAL; 754 } 755 756 while (low < high) { 757 unsigned long oip; 758 unsigned long offset; 759 struct btrfs_disk_key *tmp; 760 struct btrfs_disk_key unaligned; 761 int mid; 762 763 mid = (low + high) / 2; 764 offset = p + mid * item_size; 765 oip = offset_in_page(offset); 766 767 if (oip + key_size <= PAGE_SIZE) { 768 const unsigned long idx = get_eb_page_index(offset); 769 char *kaddr = page_address(eb->pages[idx]); 770 771 oip = get_eb_offset_in_page(eb, offset); 772 tmp = (struct btrfs_disk_key *)(kaddr + oip); 773 } else { 774 read_extent_buffer(eb, &unaligned, offset, key_size); 775 tmp = &unaligned; 776 } 777 778 ret = comp_keys(tmp, key); 779 780 if (ret < 0) 781 low = mid + 1; 782 else if (ret > 0) 783 high = mid; 784 else { 785 *slot = mid; 786 return 0; 787 } 788 } 789 *slot = low; 790 return 1; 791 } 792 793 /* 794 * simple bin_search frontend that does the right thing for 795 * leaves vs nodes 796 */ 797 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, 798 int *slot) 799 { 800 if (btrfs_header_level(eb) == 0) 801 return generic_bin_search(eb, 802 offsetof(struct btrfs_leaf, items), 803 sizeof(struct btrfs_item), key, slot); 804 else 805 return generic_bin_search(eb, 806 offsetof(struct btrfs_node, ptrs), 807 sizeof(struct btrfs_key_ptr), key, slot); 808 } 809 810 static void root_add_used(struct btrfs_root *root, u32 size) 811 { 812 spin_lock(&root->accounting_lock); 813 btrfs_set_root_used(&root->root_item, 814 btrfs_root_used(&root->root_item) + size); 815 spin_unlock(&root->accounting_lock); 816 } 817 818 static void root_sub_used(struct btrfs_root *root, u32 size) 819 { 820 spin_lock(&root->accounting_lock); 821 btrfs_set_root_used(&root->root_item, 822 btrfs_root_used(&root->root_item) - size); 823 spin_unlock(&root->accounting_lock); 824 } 825 826 /* given a node and slot number, this reads the blocks it points to. The 827 * extent buffer is returned with a reference taken (but unlocked). 828 */ 829 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 830 int slot) 831 { 832 int level = btrfs_header_level(parent); 833 struct extent_buffer *eb; 834 struct btrfs_key first_key; 835 836 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 837 return ERR_PTR(-ENOENT); 838 839 BUG_ON(level == 0); 840 841 btrfs_node_key_to_cpu(parent, &first_key, slot); 842 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 843 btrfs_header_owner(parent), 844 btrfs_node_ptr_generation(parent, slot), 845 level - 1, &first_key); 846 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) { 847 free_extent_buffer(eb); 848 eb = ERR_PTR(-EIO); 849 } 850 851 return eb; 852 } 853 854 /* 855 * node level balancing, used to make sure nodes are in proper order for 856 * item deletion. We balance from the top down, so we have to make sure 857 * that a deletion won't leave an node completely empty later on. 858 */ 859 static noinline int balance_level(struct btrfs_trans_handle *trans, 860 struct btrfs_root *root, 861 struct btrfs_path *path, int level) 862 { 863 struct btrfs_fs_info *fs_info = root->fs_info; 864 struct extent_buffer *right = NULL; 865 struct extent_buffer *mid; 866 struct extent_buffer *left = NULL; 867 struct extent_buffer *parent = NULL; 868 int ret = 0; 869 int wret; 870 int pslot; 871 int orig_slot = path->slots[level]; 872 u64 orig_ptr; 873 874 ASSERT(level > 0); 875 876 mid = path->nodes[level]; 877 878 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 879 WARN_ON(btrfs_header_generation(mid) != trans->transid); 880 881 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 882 883 if (level < BTRFS_MAX_LEVEL - 1) { 884 parent = path->nodes[level + 1]; 885 pslot = path->slots[level + 1]; 886 } 887 888 /* 889 * deal with the case where there is only one pointer in the root 890 * by promoting the node below to a root 891 */ 892 if (!parent) { 893 struct extent_buffer *child; 894 895 if (btrfs_header_nritems(mid) != 1) 896 return 0; 897 898 /* promote the child to a root */ 899 child = btrfs_read_node_slot(mid, 0); 900 if (IS_ERR(child)) { 901 ret = PTR_ERR(child); 902 btrfs_handle_fs_error(fs_info, ret, NULL); 903 goto enospc; 904 } 905 906 btrfs_tree_lock(child); 907 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 908 BTRFS_NESTING_COW); 909 if (ret) { 910 btrfs_tree_unlock(child); 911 free_extent_buffer(child); 912 goto enospc; 913 } 914 915 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 916 BUG_ON(ret < 0); 917 rcu_assign_pointer(root->node, child); 918 919 add_root_to_dirty_list(root); 920 btrfs_tree_unlock(child); 921 922 path->locks[level] = 0; 923 path->nodes[level] = NULL; 924 btrfs_clean_tree_block(mid); 925 btrfs_tree_unlock(mid); 926 /* once for the path */ 927 free_extent_buffer(mid); 928 929 root_sub_used(root, mid->len); 930 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 931 /* once for the root ptr */ 932 free_extent_buffer_stale(mid); 933 return 0; 934 } 935 if (btrfs_header_nritems(mid) > 936 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 937 return 0; 938 939 left = btrfs_read_node_slot(parent, pslot - 1); 940 if (IS_ERR(left)) 941 left = NULL; 942 943 if (left) { 944 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 945 wret = btrfs_cow_block(trans, root, left, 946 parent, pslot - 1, &left, 947 BTRFS_NESTING_LEFT_COW); 948 if (wret) { 949 ret = wret; 950 goto enospc; 951 } 952 } 953 954 right = btrfs_read_node_slot(parent, pslot + 1); 955 if (IS_ERR(right)) 956 right = NULL; 957 958 if (right) { 959 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 960 wret = btrfs_cow_block(trans, root, right, 961 parent, pslot + 1, &right, 962 BTRFS_NESTING_RIGHT_COW); 963 if (wret) { 964 ret = wret; 965 goto enospc; 966 } 967 } 968 969 /* first, try to make some room in the middle buffer */ 970 if (left) { 971 orig_slot += btrfs_header_nritems(left); 972 wret = push_node_left(trans, left, mid, 1); 973 if (wret < 0) 974 ret = wret; 975 } 976 977 /* 978 * then try to empty the right most buffer into the middle 979 */ 980 if (right) { 981 wret = push_node_left(trans, mid, right, 1); 982 if (wret < 0 && wret != -ENOSPC) 983 ret = wret; 984 if (btrfs_header_nritems(right) == 0) { 985 btrfs_clean_tree_block(right); 986 btrfs_tree_unlock(right); 987 del_ptr(root, path, level + 1, pslot + 1); 988 root_sub_used(root, right->len); 989 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 990 0, 1); 991 free_extent_buffer_stale(right); 992 right = NULL; 993 } else { 994 struct btrfs_disk_key right_key; 995 btrfs_node_key(right, &right_key, 0); 996 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 997 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 998 BUG_ON(ret < 0); 999 btrfs_set_node_key(parent, &right_key, pslot + 1); 1000 btrfs_mark_buffer_dirty(parent); 1001 } 1002 } 1003 if (btrfs_header_nritems(mid) == 1) { 1004 /* 1005 * we're not allowed to leave a node with one item in the 1006 * tree during a delete. A deletion from lower in the tree 1007 * could try to delete the only pointer in this node. 1008 * So, pull some keys from the left. 1009 * There has to be a left pointer at this point because 1010 * otherwise we would have pulled some pointers from the 1011 * right 1012 */ 1013 if (!left) { 1014 ret = -EROFS; 1015 btrfs_handle_fs_error(fs_info, ret, NULL); 1016 goto enospc; 1017 } 1018 wret = balance_node_right(trans, mid, left); 1019 if (wret < 0) { 1020 ret = wret; 1021 goto enospc; 1022 } 1023 if (wret == 1) { 1024 wret = push_node_left(trans, left, mid, 1); 1025 if (wret < 0) 1026 ret = wret; 1027 } 1028 BUG_ON(wret == 1); 1029 } 1030 if (btrfs_header_nritems(mid) == 0) { 1031 btrfs_clean_tree_block(mid); 1032 btrfs_tree_unlock(mid); 1033 del_ptr(root, path, level + 1, pslot); 1034 root_sub_used(root, mid->len); 1035 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1036 free_extent_buffer_stale(mid); 1037 mid = NULL; 1038 } else { 1039 /* update the parent key to reflect our changes */ 1040 struct btrfs_disk_key mid_key; 1041 btrfs_node_key(mid, &mid_key, 0); 1042 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1043 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1044 BUG_ON(ret < 0); 1045 btrfs_set_node_key(parent, &mid_key, pslot); 1046 btrfs_mark_buffer_dirty(parent); 1047 } 1048 1049 /* update the path */ 1050 if (left) { 1051 if (btrfs_header_nritems(left) > orig_slot) { 1052 atomic_inc(&left->refs); 1053 /* left was locked after cow */ 1054 path->nodes[level] = left; 1055 path->slots[level + 1] -= 1; 1056 path->slots[level] = orig_slot; 1057 if (mid) { 1058 btrfs_tree_unlock(mid); 1059 free_extent_buffer(mid); 1060 } 1061 } else { 1062 orig_slot -= btrfs_header_nritems(left); 1063 path->slots[level] = orig_slot; 1064 } 1065 } 1066 /* double check we haven't messed things up */ 1067 if (orig_ptr != 1068 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1069 BUG(); 1070 enospc: 1071 if (right) { 1072 btrfs_tree_unlock(right); 1073 free_extent_buffer(right); 1074 } 1075 if (left) { 1076 if (path->nodes[level] != left) 1077 btrfs_tree_unlock(left); 1078 free_extent_buffer(left); 1079 } 1080 return ret; 1081 } 1082 1083 /* Node balancing for insertion. Here we only split or push nodes around 1084 * when they are completely full. This is also done top down, so we 1085 * have to be pessimistic. 1086 */ 1087 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1088 struct btrfs_root *root, 1089 struct btrfs_path *path, int level) 1090 { 1091 struct btrfs_fs_info *fs_info = root->fs_info; 1092 struct extent_buffer *right = NULL; 1093 struct extent_buffer *mid; 1094 struct extent_buffer *left = NULL; 1095 struct extent_buffer *parent = NULL; 1096 int ret = 0; 1097 int wret; 1098 int pslot; 1099 int orig_slot = path->slots[level]; 1100 1101 if (level == 0) 1102 return 1; 1103 1104 mid = path->nodes[level]; 1105 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1106 1107 if (level < BTRFS_MAX_LEVEL - 1) { 1108 parent = path->nodes[level + 1]; 1109 pslot = path->slots[level + 1]; 1110 } 1111 1112 if (!parent) 1113 return 1; 1114 1115 left = btrfs_read_node_slot(parent, pslot - 1); 1116 if (IS_ERR(left)) 1117 left = NULL; 1118 1119 /* first, try to make some room in the middle buffer */ 1120 if (left) { 1121 u32 left_nr; 1122 1123 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1124 1125 left_nr = btrfs_header_nritems(left); 1126 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1127 wret = 1; 1128 } else { 1129 ret = btrfs_cow_block(trans, root, left, parent, 1130 pslot - 1, &left, 1131 BTRFS_NESTING_LEFT_COW); 1132 if (ret) 1133 wret = 1; 1134 else { 1135 wret = push_node_left(trans, left, mid, 0); 1136 } 1137 } 1138 if (wret < 0) 1139 ret = wret; 1140 if (wret == 0) { 1141 struct btrfs_disk_key disk_key; 1142 orig_slot += left_nr; 1143 btrfs_node_key(mid, &disk_key, 0); 1144 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1145 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1146 BUG_ON(ret < 0); 1147 btrfs_set_node_key(parent, &disk_key, pslot); 1148 btrfs_mark_buffer_dirty(parent); 1149 if (btrfs_header_nritems(left) > orig_slot) { 1150 path->nodes[level] = left; 1151 path->slots[level + 1] -= 1; 1152 path->slots[level] = orig_slot; 1153 btrfs_tree_unlock(mid); 1154 free_extent_buffer(mid); 1155 } else { 1156 orig_slot -= 1157 btrfs_header_nritems(left); 1158 path->slots[level] = orig_slot; 1159 btrfs_tree_unlock(left); 1160 free_extent_buffer(left); 1161 } 1162 return 0; 1163 } 1164 btrfs_tree_unlock(left); 1165 free_extent_buffer(left); 1166 } 1167 right = btrfs_read_node_slot(parent, pslot + 1); 1168 if (IS_ERR(right)) 1169 right = NULL; 1170 1171 /* 1172 * then try to empty the right most buffer into the middle 1173 */ 1174 if (right) { 1175 u32 right_nr; 1176 1177 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1178 1179 right_nr = btrfs_header_nritems(right); 1180 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1181 wret = 1; 1182 } else { 1183 ret = btrfs_cow_block(trans, root, right, 1184 parent, pslot + 1, 1185 &right, BTRFS_NESTING_RIGHT_COW); 1186 if (ret) 1187 wret = 1; 1188 else { 1189 wret = balance_node_right(trans, right, mid); 1190 } 1191 } 1192 if (wret < 0) 1193 ret = wret; 1194 if (wret == 0) { 1195 struct btrfs_disk_key disk_key; 1196 1197 btrfs_node_key(right, &disk_key, 0); 1198 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1199 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS); 1200 BUG_ON(ret < 0); 1201 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1202 btrfs_mark_buffer_dirty(parent); 1203 1204 if (btrfs_header_nritems(mid) <= orig_slot) { 1205 path->nodes[level] = right; 1206 path->slots[level + 1] += 1; 1207 path->slots[level] = orig_slot - 1208 btrfs_header_nritems(mid); 1209 btrfs_tree_unlock(mid); 1210 free_extent_buffer(mid); 1211 } else { 1212 btrfs_tree_unlock(right); 1213 free_extent_buffer(right); 1214 } 1215 return 0; 1216 } 1217 btrfs_tree_unlock(right); 1218 free_extent_buffer(right); 1219 } 1220 return 1; 1221 } 1222 1223 /* 1224 * readahead one full node of leaves, finding things that are close 1225 * to the block in 'slot', and triggering ra on them. 1226 */ 1227 static void reada_for_search(struct btrfs_fs_info *fs_info, 1228 struct btrfs_path *path, 1229 int level, int slot, u64 objectid) 1230 { 1231 struct extent_buffer *node; 1232 struct btrfs_disk_key disk_key; 1233 u32 nritems; 1234 u64 search; 1235 u64 target; 1236 u64 nread = 0; 1237 u64 nread_max; 1238 u32 nr; 1239 u32 blocksize; 1240 u32 nscan = 0; 1241 1242 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1243 return; 1244 1245 if (!path->nodes[level]) 1246 return; 1247 1248 node = path->nodes[level]; 1249 1250 /* 1251 * Since the time between visiting leaves is much shorter than the time 1252 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1253 * much IO at once (possibly random). 1254 */ 1255 if (path->reada == READA_FORWARD_ALWAYS) { 1256 if (level > 1) 1257 nread_max = node->fs_info->nodesize; 1258 else 1259 nread_max = SZ_128K; 1260 } else { 1261 nread_max = SZ_64K; 1262 } 1263 1264 search = btrfs_node_blockptr(node, slot); 1265 blocksize = fs_info->nodesize; 1266 if (path->reada != READA_FORWARD_ALWAYS) { 1267 struct extent_buffer *eb; 1268 1269 eb = find_extent_buffer(fs_info, search); 1270 if (eb) { 1271 free_extent_buffer(eb); 1272 return; 1273 } 1274 } 1275 1276 target = search; 1277 1278 nritems = btrfs_header_nritems(node); 1279 nr = slot; 1280 1281 while (1) { 1282 if (path->reada == READA_BACK) { 1283 if (nr == 0) 1284 break; 1285 nr--; 1286 } else if (path->reada == READA_FORWARD || 1287 path->reada == READA_FORWARD_ALWAYS) { 1288 nr++; 1289 if (nr >= nritems) 1290 break; 1291 } 1292 if (path->reada == READA_BACK && objectid) { 1293 btrfs_node_key(node, &disk_key, nr); 1294 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1295 break; 1296 } 1297 search = btrfs_node_blockptr(node, nr); 1298 if (path->reada == READA_FORWARD_ALWAYS || 1299 (search <= target && target - search <= 65536) || 1300 (search > target && search - target <= 65536)) { 1301 btrfs_readahead_node_child(node, nr); 1302 nread += blocksize; 1303 } 1304 nscan++; 1305 if (nread > nread_max || nscan > 32) 1306 break; 1307 } 1308 } 1309 1310 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1311 { 1312 struct extent_buffer *parent; 1313 int slot; 1314 int nritems; 1315 1316 parent = path->nodes[level + 1]; 1317 if (!parent) 1318 return; 1319 1320 nritems = btrfs_header_nritems(parent); 1321 slot = path->slots[level + 1]; 1322 1323 if (slot > 0) 1324 btrfs_readahead_node_child(parent, slot - 1); 1325 if (slot + 1 < nritems) 1326 btrfs_readahead_node_child(parent, slot + 1); 1327 } 1328 1329 1330 /* 1331 * when we walk down the tree, it is usually safe to unlock the higher layers 1332 * in the tree. The exceptions are when our path goes through slot 0, because 1333 * operations on the tree might require changing key pointers higher up in the 1334 * tree. 1335 * 1336 * callers might also have set path->keep_locks, which tells this code to keep 1337 * the lock if the path points to the last slot in the block. This is part of 1338 * walking through the tree, and selecting the next slot in the higher block. 1339 * 1340 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1341 * if lowest_unlock is 1, level 0 won't be unlocked 1342 */ 1343 static noinline void unlock_up(struct btrfs_path *path, int level, 1344 int lowest_unlock, int min_write_lock_level, 1345 int *write_lock_level) 1346 { 1347 int i; 1348 int skip_level = level; 1349 int no_skips = 0; 1350 struct extent_buffer *t; 1351 1352 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1353 if (!path->nodes[i]) 1354 break; 1355 if (!path->locks[i]) 1356 break; 1357 if (!no_skips && path->slots[i] == 0) { 1358 skip_level = i + 1; 1359 continue; 1360 } 1361 if (!no_skips && path->keep_locks) { 1362 u32 nritems; 1363 t = path->nodes[i]; 1364 nritems = btrfs_header_nritems(t); 1365 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1366 skip_level = i + 1; 1367 continue; 1368 } 1369 } 1370 if (skip_level < i && i >= lowest_unlock) 1371 no_skips = 1; 1372 1373 t = path->nodes[i]; 1374 if (i >= lowest_unlock && i > skip_level) { 1375 btrfs_tree_unlock_rw(t, path->locks[i]); 1376 path->locks[i] = 0; 1377 if (write_lock_level && 1378 i > min_write_lock_level && 1379 i <= *write_lock_level) { 1380 *write_lock_level = i - 1; 1381 } 1382 } 1383 } 1384 } 1385 1386 /* 1387 * helper function for btrfs_search_slot. The goal is to find a block 1388 * in cache without setting the path to blocking. If we find the block 1389 * we return zero and the path is unchanged. 1390 * 1391 * If we can't find the block, we set the path blocking and do some 1392 * reada. -EAGAIN is returned and the search must be repeated. 1393 */ 1394 static int 1395 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1396 struct extent_buffer **eb_ret, int level, int slot, 1397 const struct btrfs_key *key) 1398 { 1399 struct btrfs_fs_info *fs_info = root->fs_info; 1400 u64 blocknr; 1401 u64 gen; 1402 struct extent_buffer *tmp; 1403 struct btrfs_key first_key; 1404 int ret; 1405 int parent_level; 1406 1407 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1408 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1409 parent_level = btrfs_header_level(*eb_ret); 1410 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot); 1411 1412 tmp = find_extent_buffer(fs_info, blocknr); 1413 if (tmp) { 1414 if (p->reada == READA_FORWARD_ALWAYS) 1415 reada_for_search(fs_info, p, level, slot, key->objectid); 1416 1417 /* first we do an atomic uptodate check */ 1418 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1419 /* 1420 * Do extra check for first_key, eb can be stale due to 1421 * being cached, read from scrub, or have multiple 1422 * parents (shared tree blocks). 1423 */ 1424 if (btrfs_verify_level_key(tmp, 1425 parent_level - 1, &first_key, gen)) { 1426 free_extent_buffer(tmp); 1427 return -EUCLEAN; 1428 } 1429 *eb_ret = tmp; 1430 return 0; 1431 } 1432 1433 /* now we're allowed to do a blocking uptodate check */ 1434 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key); 1435 if (!ret) { 1436 *eb_ret = tmp; 1437 return 0; 1438 } 1439 free_extent_buffer(tmp); 1440 btrfs_release_path(p); 1441 return -EIO; 1442 } 1443 1444 /* 1445 * reduce lock contention at high levels 1446 * of the btree by dropping locks before 1447 * we read. Don't release the lock on the current 1448 * level because we need to walk this node to figure 1449 * out which blocks to read. 1450 */ 1451 btrfs_unlock_up_safe(p, level + 1); 1452 1453 if (p->reada != READA_NONE) 1454 reada_for_search(fs_info, p, level, slot, key->objectid); 1455 1456 ret = -EAGAIN; 1457 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid, 1458 gen, parent_level - 1, &first_key); 1459 if (!IS_ERR(tmp)) { 1460 /* 1461 * If the read above didn't mark this buffer up to date, 1462 * it will never end up being up to date. Set ret to EIO now 1463 * and give up so that our caller doesn't loop forever 1464 * on our EAGAINs. 1465 */ 1466 if (!extent_buffer_uptodate(tmp)) 1467 ret = -EIO; 1468 free_extent_buffer(tmp); 1469 } else { 1470 ret = PTR_ERR(tmp); 1471 } 1472 1473 btrfs_release_path(p); 1474 return ret; 1475 } 1476 1477 /* 1478 * helper function for btrfs_search_slot. This does all of the checks 1479 * for node-level blocks and does any balancing required based on 1480 * the ins_len. 1481 * 1482 * If no extra work was required, zero is returned. If we had to 1483 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1484 * start over 1485 */ 1486 static int 1487 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1488 struct btrfs_root *root, struct btrfs_path *p, 1489 struct extent_buffer *b, int level, int ins_len, 1490 int *write_lock_level) 1491 { 1492 struct btrfs_fs_info *fs_info = root->fs_info; 1493 int ret = 0; 1494 1495 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1496 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1497 1498 if (*write_lock_level < level + 1) { 1499 *write_lock_level = level + 1; 1500 btrfs_release_path(p); 1501 return -EAGAIN; 1502 } 1503 1504 reada_for_balance(p, level); 1505 ret = split_node(trans, root, p, level); 1506 1507 b = p->nodes[level]; 1508 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1509 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1510 1511 if (*write_lock_level < level + 1) { 1512 *write_lock_level = level + 1; 1513 btrfs_release_path(p); 1514 return -EAGAIN; 1515 } 1516 1517 reada_for_balance(p, level); 1518 ret = balance_level(trans, root, p, level); 1519 if (ret) 1520 return ret; 1521 1522 b = p->nodes[level]; 1523 if (!b) { 1524 btrfs_release_path(p); 1525 return -EAGAIN; 1526 } 1527 BUG_ON(btrfs_header_nritems(b) == 1); 1528 } 1529 return ret; 1530 } 1531 1532 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1533 u64 iobjectid, u64 ioff, u8 key_type, 1534 struct btrfs_key *found_key) 1535 { 1536 int ret; 1537 struct btrfs_key key; 1538 struct extent_buffer *eb; 1539 1540 ASSERT(path); 1541 ASSERT(found_key); 1542 1543 key.type = key_type; 1544 key.objectid = iobjectid; 1545 key.offset = ioff; 1546 1547 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1548 if (ret < 0) 1549 return ret; 1550 1551 eb = path->nodes[0]; 1552 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1553 ret = btrfs_next_leaf(fs_root, path); 1554 if (ret) 1555 return ret; 1556 eb = path->nodes[0]; 1557 } 1558 1559 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1560 if (found_key->type != key.type || 1561 found_key->objectid != key.objectid) 1562 return 1; 1563 1564 return 0; 1565 } 1566 1567 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1568 struct btrfs_path *p, 1569 int write_lock_level) 1570 { 1571 struct btrfs_fs_info *fs_info = root->fs_info; 1572 struct extent_buffer *b; 1573 int root_lock; 1574 int level = 0; 1575 1576 /* We try very hard to do read locks on the root */ 1577 root_lock = BTRFS_READ_LOCK; 1578 1579 if (p->search_commit_root) { 1580 /* 1581 * The commit roots are read only so we always do read locks, 1582 * and we always must hold the commit_root_sem when doing 1583 * searches on them, the only exception is send where we don't 1584 * want to block transaction commits for a long time, so 1585 * we need to clone the commit root in order to avoid races 1586 * with transaction commits that create a snapshot of one of 1587 * the roots used by a send operation. 1588 */ 1589 if (p->need_commit_sem) { 1590 down_read(&fs_info->commit_root_sem); 1591 b = btrfs_clone_extent_buffer(root->commit_root); 1592 up_read(&fs_info->commit_root_sem); 1593 if (!b) 1594 return ERR_PTR(-ENOMEM); 1595 1596 } else { 1597 b = root->commit_root; 1598 atomic_inc(&b->refs); 1599 } 1600 level = btrfs_header_level(b); 1601 /* 1602 * Ensure that all callers have set skip_locking when 1603 * p->search_commit_root = 1. 1604 */ 1605 ASSERT(p->skip_locking == 1); 1606 1607 goto out; 1608 } 1609 1610 if (p->skip_locking) { 1611 b = btrfs_root_node(root); 1612 level = btrfs_header_level(b); 1613 goto out; 1614 } 1615 1616 /* 1617 * If the level is set to maximum, we can skip trying to get the read 1618 * lock. 1619 */ 1620 if (write_lock_level < BTRFS_MAX_LEVEL) { 1621 /* 1622 * We don't know the level of the root node until we actually 1623 * have it read locked 1624 */ 1625 b = btrfs_read_lock_root_node(root); 1626 level = btrfs_header_level(b); 1627 if (level > write_lock_level) 1628 goto out; 1629 1630 /* Whoops, must trade for write lock */ 1631 btrfs_tree_read_unlock(b); 1632 free_extent_buffer(b); 1633 } 1634 1635 b = btrfs_lock_root_node(root); 1636 root_lock = BTRFS_WRITE_LOCK; 1637 1638 /* The level might have changed, check again */ 1639 level = btrfs_header_level(b); 1640 1641 out: 1642 p->nodes[level] = b; 1643 if (!p->skip_locking) 1644 p->locks[level] = root_lock; 1645 /* 1646 * Callers are responsible for dropping b's references. 1647 */ 1648 return b; 1649 } 1650 1651 1652 /* 1653 * btrfs_search_slot - look for a key in a tree and perform necessary 1654 * modifications to preserve tree invariants. 1655 * 1656 * @trans: Handle of transaction, used when modifying the tree 1657 * @p: Holds all btree nodes along the search path 1658 * @root: The root node of the tree 1659 * @key: The key we are looking for 1660 * @ins_len: Indicates purpose of search: 1661 * >0 for inserts it's size of item inserted (*) 1662 * <0 for deletions 1663 * 0 for plain searches, not modifying the tree 1664 * 1665 * (*) If size of item inserted doesn't include 1666 * sizeof(struct btrfs_item), then p->search_for_extension must 1667 * be set. 1668 * @cow: boolean should CoW operations be performed. Must always be 1 1669 * when modifying the tree. 1670 * 1671 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 1672 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 1673 * 1674 * If @key is found, 0 is returned and you can find the item in the leaf level 1675 * of the path (level 0) 1676 * 1677 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 1678 * points to the slot where it should be inserted 1679 * 1680 * If an error is encountered while searching the tree a negative error number 1681 * is returned 1682 */ 1683 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1684 const struct btrfs_key *key, struct btrfs_path *p, 1685 int ins_len, int cow) 1686 { 1687 struct extent_buffer *b; 1688 int slot; 1689 int ret; 1690 int err; 1691 int level; 1692 int lowest_unlock = 1; 1693 /* everything at write_lock_level or lower must be write locked */ 1694 int write_lock_level = 0; 1695 u8 lowest_level = 0; 1696 int min_write_lock_level; 1697 int prev_cmp; 1698 1699 lowest_level = p->lowest_level; 1700 WARN_ON(lowest_level && ins_len > 0); 1701 WARN_ON(p->nodes[0] != NULL); 1702 BUG_ON(!cow && ins_len); 1703 1704 if (ins_len < 0) { 1705 lowest_unlock = 2; 1706 1707 /* when we are removing items, we might have to go up to level 1708 * two as we update tree pointers Make sure we keep write 1709 * for those levels as well 1710 */ 1711 write_lock_level = 2; 1712 } else if (ins_len > 0) { 1713 /* 1714 * for inserting items, make sure we have a write lock on 1715 * level 1 so we can update keys 1716 */ 1717 write_lock_level = 1; 1718 } 1719 1720 if (!cow) 1721 write_lock_level = -1; 1722 1723 if (cow && (p->keep_locks || p->lowest_level)) 1724 write_lock_level = BTRFS_MAX_LEVEL; 1725 1726 min_write_lock_level = write_lock_level; 1727 1728 again: 1729 prev_cmp = -1; 1730 b = btrfs_search_slot_get_root(root, p, write_lock_level); 1731 if (IS_ERR(b)) { 1732 ret = PTR_ERR(b); 1733 goto done; 1734 } 1735 1736 while (b) { 1737 int dec = 0; 1738 1739 level = btrfs_header_level(b); 1740 1741 if (cow) { 1742 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 1743 1744 /* 1745 * if we don't really need to cow this block 1746 * then we don't want to set the path blocking, 1747 * so we test it here 1748 */ 1749 if (!should_cow_block(trans, root, b)) 1750 goto cow_done; 1751 1752 /* 1753 * must have write locks on this node and the 1754 * parent 1755 */ 1756 if (level > write_lock_level || 1757 (level + 1 > write_lock_level && 1758 level + 1 < BTRFS_MAX_LEVEL && 1759 p->nodes[level + 1])) { 1760 write_lock_level = level + 1; 1761 btrfs_release_path(p); 1762 goto again; 1763 } 1764 1765 if (last_level) 1766 err = btrfs_cow_block(trans, root, b, NULL, 0, 1767 &b, 1768 BTRFS_NESTING_COW); 1769 else 1770 err = btrfs_cow_block(trans, root, b, 1771 p->nodes[level + 1], 1772 p->slots[level + 1], &b, 1773 BTRFS_NESTING_COW); 1774 if (err) { 1775 ret = err; 1776 goto done; 1777 } 1778 } 1779 cow_done: 1780 p->nodes[level] = b; 1781 /* 1782 * Leave path with blocking locks to avoid massive 1783 * lock context switch, this is made on purpose. 1784 */ 1785 1786 /* 1787 * we have a lock on b and as long as we aren't changing 1788 * the tree, there is no way to for the items in b to change. 1789 * It is safe to drop the lock on our parent before we 1790 * go through the expensive btree search on b. 1791 * 1792 * If we're inserting or deleting (ins_len != 0), then we might 1793 * be changing slot zero, which may require changing the parent. 1794 * So, we can't drop the lock until after we know which slot 1795 * we're operating on. 1796 */ 1797 if (!ins_len && !p->keep_locks) { 1798 int u = level + 1; 1799 1800 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 1801 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 1802 p->locks[u] = 0; 1803 } 1804 } 1805 1806 /* 1807 * If btrfs_bin_search returns an exact match (prev_cmp == 0) 1808 * we can safely assume the target key will always be in slot 0 1809 * on lower levels due to the invariants BTRFS' btree provides, 1810 * namely that a btrfs_key_ptr entry always points to the 1811 * lowest key in the child node, thus we can skip searching 1812 * lower levels 1813 */ 1814 if (prev_cmp == 0) { 1815 slot = 0; 1816 ret = 0; 1817 } else { 1818 ret = btrfs_bin_search(b, key, &slot); 1819 prev_cmp = ret; 1820 if (ret < 0) 1821 goto done; 1822 } 1823 1824 if (level == 0) { 1825 p->slots[level] = slot; 1826 /* 1827 * Item key already exists. In this case, if we are 1828 * allowed to insert the item (for example, in dir_item 1829 * case, item key collision is allowed), it will be 1830 * merged with the original item. Only the item size 1831 * grows, no new btrfs item will be added. If 1832 * search_for_extension is not set, ins_len already 1833 * accounts the size btrfs_item, deduct it here so leaf 1834 * space check will be correct. 1835 */ 1836 if (ret == 0 && ins_len > 0 && !p->search_for_extension) { 1837 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1838 ins_len -= sizeof(struct btrfs_item); 1839 } 1840 if (ins_len > 0 && 1841 btrfs_leaf_free_space(b) < ins_len) { 1842 if (write_lock_level < 1) { 1843 write_lock_level = 1; 1844 btrfs_release_path(p); 1845 goto again; 1846 } 1847 1848 err = split_leaf(trans, root, key, 1849 p, ins_len, ret == 0); 1850 1851 BUG_ON(err > 0); 1852 if (err) { 1853 ret = err; 1854 goto done; 1855 } 1856 } 1857 if (!p->search_for_split) 1858 unlock_up(p, level, lowest_unlock, 1859 min_write_lock_level, NULL); 1860 goto done; 1861 } 1862 if (ret && slot > 0) { 1863 dec = 1; 1864 slot--; 1865 } 1866 p->slots[level] = slot; 1867 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 1868 &write_lock_level); 1869 if (err == -EAGAIN) 1870 goto again; 1871 if (err) { 1872 ret = err; 1873 goto done; 1874 } 1875 b = p->nodes[level]; 1876 slot = p->slots[level]; 1877 1878 /* 1879 * Slot 0 is special, if we change the key we have to update 1880 * the parent pointer which means we must have a write lock on 1881 * the parent 1882 */ 1883 if (slot == 0 && ins_len && write_lock_level < level + 1) { 1884 write_lock_level = level + 1; 1885 btrfs_release_path(p); 1886 goto again; 1887 } 1888 1889 unlock_up(p, level, lowest_unlock, min_write_lock_level, 1890 &write_lock_level); 1891 1892 if (level == lowest_level) { 1893 if (dec) 1894 p->slots[level]++; 1895 goto done; 1896 } 1897 1898 err = read_block_for_search(root, p, &b, level, slot, key); 1899 if (err == -EAGAIN) 1900 goto again; 1901 if (err) { 1902 ret = err; 1903 goto done; 1904 } 1905 1906 if (!p->skip_locking) { 1907 level = btrfs_header_level(b); 1908 if (level <= write_lock_level) { 1909 btrfs_tree_lock(b); 1910 p->locks[level] = BTRFS_WRITE_LOCK; 1911 } else { 1912 btrfs_tree_read_lock(b); 1913 p->locks[level] = BTRFS_READ_LOCK; 1914 } 1915 p->nodes[level] = b; 1916 } 1917 } 1918 ret = 1; 1919 done: 1920 if (ret < 0 && !p->skip_release_on_error) 1921 btrfs_release_path(p); 1922 return ret; 1923 } 1924 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 1925 1926 /* 1927 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 1928 * current state of the tree together with the operations recorded in the tree 1929 * modification log to search for the key in a previous version of this tree, as 1930 * denoted by the time_seq parameter. 1931 * 1932 * Naturally, there is no support for insert, delete or cow operations. 1933 * 1934 * The resulting path and return value will be set up as if we called 1935 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 1936 */ 1937 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 1938 struct btrfs_path *p, u64 time_seq) 1939 { 1940 struct btrfs_fs_info *fs_info = root->fs_info; 1941 struct extent_buffer *b; 1942 int slot; 1943 int ret; 1944 int err; 1945 int level; 1946 int lowest_unlock = 1; 1947 u8 lowest_level = 0; 1948 1949 lowest_level = p->lowest_level; 1950 WARN_ON(p->nodes[0] != NULL); 1951 1952 if (p->search_commit_root) { 1953 BUG_ON(time_seq); 1954 return btrfs_search_slot(NULL, root, key, p, 0, 0); 1955 } 1956 1957 again: 1958 b = btrfs_get_old_root(root, time_seq); 1959 if (!b) { 1960 ret = -EIO; 1961 goto done; 1962 } 1963 level = btrfs_header_level(b); 1964 p->locks[level] = BTRFS_READ_LOCK; 1965 1966 while (b) { 1967 int dec = 0; 1968 1969 level = btrfs_header_level(b); 1970 p->nodes[level] = b; 1971 1972 /* 1973 * we have a lock on b and as long as we aren't changing 1974 * the tree, there is no way to for the items in b to change. 1975 * It is safe to drop the lock on our parent before we 1976 * go through the expensive btree search on b. 1977 */ 1978 btrfs_unlock_up_safe(p, level + 1); 1979 1980 ret = btrfs_bin_search(b, key, &slot); 1981 if (ret < 0) 1982 goto done; 1983 1984 if (level == 0) { 1985 p->slots[level] = slot; 1986 unlock_up(p, level, lowest_unlock, 0, NULL); 1987 goto done; 1988 } 1989 1990 if (ret && slot > 0) { 1991 dec = 1; 1992 slot--; 1993 } 1994 p->slots[level] = slot; 1995 unlock_up(p, level, lowest_unlock, 0, NULL); 1996 1997 if (level == lowest_level) { 1998 if (dec) 1999 p->slots[level]++; 2000 goto done; 2001 } 2002 2003 err = read_block_for_search(root, p, &b, level, slot, key); 2004 if (err == -EAGAIN) 2005 goto again; 2006 if (err) { 2007 ret = err; 2008 goto done; 2009 } 2010 2011 level = btrfs_header_level(b); 2012 btrfs_tree_read_lock(b); 2013 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2014 if (!b) { 2015 ret = -ENOMEM; 2016 goto done; 2017 } 2018 p->locks[level] = BTRFS_READ_LOCK; 2019 p->nodes[level] = b; 2020 } 2021 ret = 1; 2022 done: 2023 if (ret < 0) 2024 btrfs_release_path(p); 2025 2026 return ret; 2027 } 2028 2029 /* 2030 * helper to use instead of search slot if no exact match is needed but 2031 * instead the next or previous item should be returned. 2032 * When find_higher is true, the next higher item is returned, the next lower 2033 * otherwise. 2034 * When return_any and find_higher are both true, and no higher item is found, 2035 * return the next lower instead. 2036 * When return_any is true and find_higher is false, and no lower item is found, 2037 * return the next higher instead. 2038 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2039 * < 0 on error 2040 */ 2041 int btrfs_search_slot_for_read(struct btrfs_root *root, 2042 const struct btrfs_key *key, 2043 struct btrfs_path *p, int find_higher, 2044 int return_any) 2045 { 2046 int ret; 2047 struct extent_buffer *leaf; 2048 2049 again: 2050 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2051 if (ret <= 0) 2052 return ret; 2053 /* 2054 * a return value of 1 means the path is at the position where the 2055 * item should be inserted. Normally this is the next bigger item, 2056 * but in case the previous item is the last in a leaf, path points 2057 * to the first free slot in the previous leaf, i.e. at an invalid 2058 * item. 2059 */ 2060 leaf = p->nodes[0]; 2061 2062 if (find_higher) { 2063 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2064 ret = btrfs_next_leaf(root, p); 2065 if (ret <= 0) 2066 return ret; 2067 if (!return_any) 2068 return 1; 2069 /* 2070 * no higher item found, return the next 2071 * lower instead 2072 */ 2073 return_any = 0; 2074 find_higher = 0; 2075 btrfs_release_path(p); 2076 goto again; 2077 } 2078 } else { 2079 if (p->slots[0] == 0) { 2080 ret = btrfs_prev_leaf(root, p); 2081 if (ret < 0) 2082 return ret; 2083 if (!ret) { 2084 leaf = p->nodes[0]; 2085 if (p->slots[0] == btrfs_header_nritems(leaf)) 2086 p->slots[0]--; 2087 return 0; 2088 } 2089 if (!return_any) 2090 return 1; 2091 /* 2092 * no lower item found, return the next 2093 * higher instead 2094 */ 2095 return_any = 0; 2096 find_higher = 1; 2097 btrfs_release_path(p); 2098 goto again; 2099 } else { 2100 --p->slots[0]; 2101 } 2102 } 2103 return 0; 2104 } 2105 2106 /* 2107 * Execute search and call btrfs_previous_item to traverse backwards if the item 2108 * was not found. 2109 * 2110 * Return 0 if found, 1 if not found and < 0 if error. 2111 */ 2112 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2113 struct btrfs_path *path) 2114 { 2115 int ret; 2116 2117 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2118 if (ret > 0) 2119 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2120 2121 if (ret == 0) 2122 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2123 2124 return ret; 2125 } 2126 2127 /* 2128 * adjust the pointers going up the tree, starting at level 2129 * making sure the right key of each node is points to 'key'. 2130 * This is used after shifting pointers to the left, so it stops 2131 * fixing up pointers when a given leaf/node is not in slot 0 of the 2132 * higher levels 2133 * 2134 */ 2135 static void fixup_low_keys(struct btrfs_path *path, 2136 struct btrfs_disk_key *key, int level) 2137 { 2138 int i; 2139 struct extent_buffer *t; 2140 int ret; 2141 2142 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2143 int tslot = path->slots[i]; 2144 2145 if (!path->nodes[i]) 2146 break; 2147 t = path->nodes[i]; 2148 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2149 BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC); 2150 BUG_ON(ret < 0); 2151 btrfs_set_node_key(t, key, tslot); 2152 btrfs_mark_buffer_dirty(path->nodes[i]); 2153 if (tslot != 0) 2154 break; 2155 } 2156 } 2157 2158 /* 2159 * update item key. 2160 * 2161 * This function isn't completely safe. It's the caller's responsibility 2162 * that the new key won't break the order 2163 */ 2164 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2165 struct btrfs_path *path, 2166 const struct btrfs_key *new_key) 2167 { 2168 struct btrfs_disk_key disk_key; 2169 struct extent_buffer *eb; 2170 int slot; 2171 2172 eb = path->nodes[0]; 2173 slot = path->slots[0]; 2174 if (slot > 0) { 2175 btrfs_item_key(eb, &disk_key, slot - 1); 2176 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2177 btrfs_crit(fs_info, 2178 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2179 slot, btrfs_disk_key_objectid(&disk_key), 2180 btrfs_disk_key_type(&disk_key), 2181 btrfs_disk_key_offset(&disk_key), 2182 new_key->objectid, new_key->type, 2183 new_key->offset); 2184 btrfs_print_leaf(eb); 2185 BUG(); 2186 } 2187 } 2188 if (slot < btrfs_header_nritems(eb) - 1) { 2189 btrfs_item_key(eb, &disk_key, slot + 1); 2190 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2191 btrfs_crit(fs_info, 2192 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2193 slot, btrfs_disk_key_objectid(&disk_key), 2194 btrfs_disk_key_type(&disk_key), 2195 btrfs_disk_key_offset(&disk_key), 2196 new_key->objectid, new_key->type, 2197 new_key->offset); 2198 btrfs_print_leaf(eb); 2199 BUG(); 2200 } 2201 } 2202 2203 btrfs_cpu_key_to_disk(&disk_key, new_key); 2204 btrfs_set_item_key(eb, &disk_key, slot); 2205 btrfs_mark_buffer_dirty(eb); 2206 if (slot == 0) 2207 fixup_low_keys(path, &disk_key, 1); 2208 } 2209 2210 /* 2211 * Check key order of two sibling extent buffers. 2212 * 2213 * Return true if something is wrong. 2214 * Return false if everything is fine. 2215 * 2216 * Tree-checker only works inside one tree block, thus the following 2217 * corruption can not be detected by tree-checker: 2218 * 2219 * Leaf @left | Leaf @right 2220 * -------------------------------------------------------------- 2221 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2222 * 2223 * Key f6 in leaf @left itself is valid, but not valid when the next 2224 * key in leaf @right is 7. 2225 * This can only be checked at tree block merge time. 2226 * And since tree checker has ensured all key order in each tree block 2227 * is correct, we only need to bother the last key of @left and the first 2228 * key of @right. 2229 */ 2230 static bool check_sibling_keys(struct extent_buffer *left, 2231 struct extent_buffer *right) 2232 { 2233 struct btrfs_key left_last; 2234 struct btrfs_key right_first; 2235 int level = btrfs_header_level(left); 2236 int nr_left = btrfs_header_nritems(left); 2237 int nr_right = btrfs_header_nritems(right); 2238 2239 /* No key to check in one of the tree blocks */ 2240 if (!nr_left || !nr_right) 2241 return false; 2242 2243 if (level) { 2244 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2245 btrfs_node_key_to_cpu(right, &right_first, 0); 2246 } else { 2247 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2248 btrfs_item_key_to_cpu(right, &right_first, 0); 2249 } 2250 2251 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) { 2252 btrfs_crit(left->fs_info, 2253 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2254 left_last.objectid, left_last.type, 2255 left_last.offset, right_first.objectid, 2256 right_first.type, right_first.offset); 2257 return true; 2258 } 2259 return false; 2260 } 2261 2262 /* 2263 * try to push data from one node into the next node left in the 2264 * tree. 2265 * 2266 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2267 * error, and > 0 if there was no room in the left hand block. 2268 */ 2269 static int push_node_left(struct btrfs_trans_handle *trans, 2270 struct extent_buffer *dst, 2271 struct extent_buffer *src, int empty) 2272 { 2273 struct btrfs_fs_info *fs_info = trans->fs_info; 2274 int push_items = 0; 2275 int src_nritems; 2276 int dst_nritems; 2277 int ret = 0; 2278 2279 src_nritems = btrfs_header_nritems(src); 2280 dst_nritems = btrfs_header_nritems(dst); 2281 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2282 WARN_ON(btrfs_header_generation(src) != trans->transid); 2283 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2284 2285 if (!empty && src_nritems <= 8) 2286 return 1; 2287 2288 if (push_items <= 0) 2289 return 1; 2290 2291 if (empty) { 2292 push_items = min(src_nritems, push_items); 2293 if (push_items < src_nritems) { 2294 /* leave at least 8 pointers in the node if 2295 * we aren't going to empty it 2296 */ 2297 if (src_nritems - push_items < 8) { 2298 if (push_items <= 8) 2299 return 1; 2300 push_items -= 8; 2301 } 2302 } 2303 } else 2304 push_items = min(src_nritems - 8, push_items); 2305 2306 /* dst is the left eb, src is the middle eb */ 2307 if (check_sibling_keys(dst, src)) { 2308 ret = -EUCLEAN; 2309 btrfs_abort_transaction(trans, ret); 2310 return ret; 2311 } 2312 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2313 if (ret) { 2314 btrfs_abort_transaction(trans, ret); 2315 return ret; 2316 } 2317 copy_extent_buffer(dst, src, 2318 btrfs_node_key_ptr_offset(dst_nritems), 2319 btrfs_node_key_ptr_offset(0), 2320 push_items * sizeof(struct btrfs_key_ptr)); 2321 2322 if (push_items < src_nritems) { 2323 /* 2324 * Don't call btrfs_tree_mod_log_insert_move() here, key removal 2325 * was already fully logged by btrfs_tree_mod_log_eb_copy() above. 2326 */ 2327 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 2328 btrfs_node_key_ptr_offset(push_items), 2329 (src_nritems - push_items) * 2330 sizeof(struct btrfs_key_ptr)); 2331 } 2332 btrfs_set_header_nritems(src, src_nritems - push_items); 2333 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2334 btrfs_mark_buffer_dirty(src); 2335 btrfs_mark_buffer_dirty(dst); 2336 2337 return ret; 2338 } 2339 2340 /* 2341 * try to push data from one node into the next node right in the 2342 * tree. 2343 * 2344 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2345 * error, and > 0 if there was no room in the right hand block. 2346 * 2347 * this will only push up to 1/2 the contents of the left node over 2348 */ 2349 static int balance_node_right(struct btrfs_trans_handle *trans, 2350 struct extent_buffer *dst, 2351 struct extent_buffer *src) 2352 { 2353 struct btrfs_fs_info *fs_info = trans->fs_info; 2354 int push_items = 0; 2355 int max_push; 2356 int src_nritems; 2357 int dst_nritems; 2358 int ret = 0; 2359 2360 WARN_ON(btrfs_header_generation(src) != trans->transid); 2361 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2362 2363 src_nritems = btrfs_header_nritems(src); 2364 dst_nritems = btrfs_header_nritems(dst); 2365 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2366 if (push_items <= 0) 2367 return 1; 2368 2369 if (src_nritems < 4) 2370 return 1; 2371 2372 max_push = src_nritems / 2 + 1; 2373 /* don't try to empty the node */ 2374 if (max_push >= src_nritems) 2375 return 1; 2376 2377 if (max_push < push_items) 2378 push_items = max_push; 2379 2380 /* dst is the right eb, src is the middle eb */ 2381 if (check_sibling_keys(src, dst)) { 2382 ret = -EUCLEAN; 2383 btrfs_abort_transaction(trans, ret); 2384 return ret; 2385 } 2386 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems); 2387 BUG_ON(ret < 0); 2388 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 2389 btrfs_node_key_ptr_offset(0), 2390 (dst_nritems) * 2391 sizeof(struct btrfs_key_ptr)); 2392 2393 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2394 push_items); 2395 if (ret) { 2396 btrfs_abort_transaction(trans, ret); 2397 return ret; 2398 } 2399 copy_extent_buffer(dst, src, 2400 btrfs_node_key_ptr_offset(0), 2401 btrfs_node_key_ptr_offset(src_nritems - push_items), 2402 push_items * sizeof(struct btrfs_key_ptr)); 2403 2404 btrfs_set_header_nritems(src, src_nritems - push_items); 2405 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2406 2407 btrfs_mark_buffer_dirty(src); 2408 btrfs_mark_buffer_dirty(dst); 2409 2410 return ret; 2411 } 2412 2413 /* 2414 * helper function to insert a new root level in the tree. 2415 * A new node is allocated, and a single item is inserted to 2416 * point to the existing root 2417 * 2418 * returns zero on success or < 0 on failure. 2419 */ 2420 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2421 struct btrfs_root *root, 2422 struct btrfs_path *path, int level) 2423 { 2424 struct btrfs_fs_info *fs_info = root->fs_info; 2425 u64 lower_gen; 2426 struct extent_buffer *lower; 2427 struct extent_buffer *c; 2428 struct extent_buffer *old; 2429 struct btrfs_disk_key lower_key; 2430 int ret; 2431 2432 BUG_ON(path->nodes[level]); 2433 BUG_ON(path->nodes[level-1] != root->node); 2434 2435 lower = path->nodes[level-1]; 2436 if (level == 1) 2437 btrfs_item_key(lower, &lower_key, 0); 2438 else 2439 btrfs_node_key(lower, &lower_key, 0); 2440 2441 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2442 &lower_key, level, root->node->start, 0, 2443 BTRFS_NESTING_NEW_ROOT); 2444 if (IS_ERR(c)) 2445 return PTR_ERR(c); 2446 2447 root_add_used(root, fs_info->nodesize); 2448 2449 btrfs_set_header_nritems(c, 1); 2450 btrfs_set_node_key(c, &lower_key, 0); 2451 btrfs_set_node_blockptr(c, 0, lower->start); 2452 lower_gen = btrfs_header_generation(lower); 2453 WARN_ON(lower_gen != trans->transid); 2454 2455 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2456 2457 btrfs_mark_buffer_dirty(c); 2458 2459 old = root->node; 2460 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2461 BUG_ON(ret < 0); 2462 rcu_assign_pointer(root->node, c); 2463 2464 /* the super has an extra ref to root->node */ 2465 free_extent_buffer(old); 2466 2467 add_root_to_dirty_list(root); 2468 atomic_inc(&c->refs); 2469 path->nodes[level] = c; 2470 path->locks[level] = BTRFS_WRITE_LOCK; 2471 path->slots[level] = 0; 2472 return 0; 2473 } 2474 2475 /* 2476 * worker function to insert a single pointer in a node. 2477 * the node should have enough room for the pointer already 2478 * 2479 * slot and level indicate where you want the key to go, and 2480 * blocknr is the block the key points to. 2481 */ 2482 static void insert_ptr(struct btrfs_trans_handle *trans, 2483 struct btrfs_path *path, 2484 struct btrfs_disk_key *key, u64 bytenr, 2485 int slot, int level) 2486 { 2487 struct extent_buffer *lower; 2488 int nritems; 2489 int ret; 2490 2491 BUG_ON(!path->nodes[level]); 2492 btrfs_assert_tree_write_locked(path->nodes[level]); 2493 lower = path->nodes[level]; 2494 nritems = btrfs_header_nritems(lower); 2495 BUG_ON(slot > nritems); 2496 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2497 if (slot != nritems) { 2498 if (level) { 2499 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2500 slot, nritems - slot); 2501 BUG_ON(ret < 0); 2502 } 2503 memmove_extent_buffer(lower, 2504 btrfs_node_key_ptr_offset(slot + 1), 2505 btrfs_node_key_ptr_offset(slot), 2506 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2507 } 2508 if (level) { 2509 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2510 BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS); 2511 BUG_ON(ret < 0); 2512 } 2513 btrfs_set_node_key(lower, key, slot); 2514 btrfs_set_node_blockptr(lower, slot, bytenr); 2515 WARN_ON(trans->transid == 0); 2516 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2517 btrfs_set_header_nritems(lower, nritems + 1); 2518 btrfs_mark_buffer_dirty(lower); 2519 } 2520 2521 /* 2522 * split the node at the specified level in path in two. 2523 * The path is corrected to point to the appropriate node after the split 2524 * 2525 * Before splitting this tries to make some room in the node by pushing 2526 * left and right, if either one works, it returns right away. 2527 * 2528 * returns 0 on success and < 0 on failure 2529 */ 2530 static noinline int split_node(struct btrfs_trans_handle *trans, 2531 struct btrfs_root *root, 2532 struct btrfs_path *path, int level) 2533 { 2534 struct btrfs_fs_info *fs_info = root->fs_info; 2535 struct extent_buffer *c; 2536 struct extent_buffer *split; 2537 struct btrfs_disk_key disk_key; 2538 int mid; 2539 int ret; 2540 u32 c_nritems; 2541 2542 c = path->nodes[level]; 2543 WARN_ON(btrfs_header_generation(c) != trans->transid); 2544 if (c == root->node) { 2545 /* 2546 * trying to split the root, lets make a new one 2547 * 2548 * tree mod log: We don't log_removal old root in 2549 * insert_new_root, because that root buffer will be kept as a 2550 * normal node. We are going to log removal of half of the 2551 * elements below with btrfs_tree_mod_log_eb_copy(). We're 2552 * holding a tree lock on the buffer, which is why we cannot 2553 * race with other tree_mod_log users. 2554 */ 2555 ret = insert_new_root(trans, root, path, level + 1); 2556 if (ret) 2557 return ret; 2558 } else { 2559 ret = push_nodes_for_insert(trans, root, path, level); 2560 c = path->nodes[level]; 2561 if (!ret && btrfs_header_nritems(c) < 2562 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 2563 return 0; 2564 if (ret < 0) 2565 return ret; 2566 } 2567 2568 c_nritems = btrfs_header_nritems(c); 2569 mid = (c_nritems + 1) / 2; 2570 btrfs_node_key(c, &disk_key, mid); 2571 2572 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2573 &disk_key, level, c->start, 0, 2574 BTRFS_NESTING_SPLIT); 2575 if (IS_ERR(split)) 2576 return PTR_ERR(split); 2577 2578 root_add_used(root, fs_info->nodesize); 2579 ASSERT(btrfs_header_level(c) == level); 2580 2581 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 2582 if (ret) { 2583 btrfs_abort_transaction(trans, ret); 2584 return ret; 2585 } 2586 copy_extent_buffer(split, c, 2587 btrfs_node_key_ptr_offset(0), 2588 btrfs_node_key_ptr_offset(mid), 2589 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 2590 btrfs_set_header_nritems(split, c_nritems - mid); 2591 btrfs_set_header_nritems(c, mid); 2592 2593 btrfs_mark_buffer_dirty(c); 2594 btrfs_mark_buffer_dirty(split); 2595 2596 insert_ptr(trans, path, &disk_key, split->start, 2597 path->slots[level + 1] + 1, level + 1); 2598 2599 if (path->slots[level] >= mid) { 2600 path->slots[level] -= mid; 2601 btrfs_tree_unlock(c); 2602 free_extent_buffer(c); 2603 path->nodes[level] = split; 2604 path->slots[level + 1] += 1; 2605 } else { 2606 btrfs_tree_unlock(split); 2607 free_extent_buffer(split); 2608 } 2609 return 0; 2610 } 2611 2612 /* 2613 * how many bytes are required to store the items in a leaf. start 2614 * and nr indicate which items in the leaf to check. This totals up the 2615 * space used both by the item structs and the item data 2616 */ 2617 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 2618 { 2619 struct btrfs_item *start_item; 2620 struct btrfs_item *end_item; 2621 int data_len; 2622 int nritems = btrfs_header_nritems(l); 2623 int end = min(nritems, start + nr) - 1; 2624 2625 if (!nr) 2626 return 0; 2627 start_item = btrfs_item_nr(start); 2628 end_item = btrfs_item_nr(end); 2629 data_len = btrfs_item_offset(l, start_item) + 2630 btrfs_item_size(l, start_item); 2631 data_len = data_len - btrfs_item_offset(l, end_item); 2632 data_len += sizeof(struct btrfs_item) * nr; 2633 WARN_ON(data_len < 0); 2634 return data_len; 2635 } 2636 2637 /* 2638 * The space between the end of the leaf items and 2639 * the start of the leaf data. IOW, how much room 2640 * the leaf has left for both items and data 2641 */ 2642 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf) 2643 { 2644 struct btrfs_fs_info *fs_info = leaf->fs_info; 2645 int nritems = btrfs_header_nritems(leaf); 2646 int ret; 2647 2648 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 2649 if (ret < 0) { 2650 btrfs_crit(fs_info, 2651 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 2652 ret, 2653 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 2654 leaf_space_used(leaf, 0, nritems), nritems); 2655 } 2656 return ret; 2657 } 2658 2659 /* 2660 * min slot controls the lowest index we're willing to push to the 2661 * right. We'll push up to and including min_slot, but no lower 2662 */ 2663 static noinline int __push_leaf_right(struct btrfs_path *path, 2664 int data_size, int empty, 2665 struct extent_buffer *right, 2666 int free_space, u32 left_nritems, 2667 u32 min_slot) 2668 { 2669 struct btrfs_fs_info *fs_info = right->fs_info; 2670 struct extent_buffer *left = path->nodes[0]; 2671 struct extent_buffer *upper = path->nodes[1]; 2672 struct btrfs_map_token token; 2673 struct btrfs_disk_key disk_key; 2674 int slot; 2675 u32 i; 2676 int push_space = 0; 2677 int push_items = 0; 2678 struct btrfs_item *item; 2679 u32 nr; 2680 u32 right_nritems; 2681 u32 data_end; 2682 u32 this_item_size; 2683 2684 if (empty) 2685 nr = 0; 2686 else 2687 nr = max_t(u32, 1, min_slot); 2688 2689 if (path->slots[0] >= left_nritems) 2690 push_space += data_size; 2691 2692 slot = path->slots[1]; 2693 i = left_nritems - 1; 2694 while (i >= nr) { 2695 item = btrfs_item_nr(i); 2696 2697 if (!empty && push_items > 0) { 2698 if (path->slots[0] > i) 2699 break; 2700 if (path->slots[0] == i) { 2701 int space = btrfs_leaf_free_space(left); 2702 2703 if (space + push_space * 2 > free_space) 2704 break; 2705 } 2706 } 2707 2708 if (path->slots[0] == i) 2709 push_space += data_size; 2710 2711 this_item_size = btrfs_item_size(left, item); 2712 if (this_item_size + sizeof(*item) + push_space > free_space) 2713 break; 2714 2715 push_items++; 2716 push_space += this_item_size + sizeof(*item); 2717 if (i == 0) 2718 break; 2719 i--; 2720 } 2721 2722 if (push_items == 0) 2723 goto out_unlock; 2724 2725 WARN_ON(!empty && push_items == left_nritems); 2726 2727 /* push left to right */ 2728 right_nritems = btrfs_header_nritems(right); 2729 2730 push_space = btrfs_item_end_nr(left, left_nritems - push_items); 2731 push_space -= leaf_data_end(left); 2732 2733 /* make room in the right data area */ 2734 data_end = leaf_data_end(right); 2735 memmove_extent_buffer(right, 2736 BTRFS_LEAF_DATA_OFFSET + data_end - push_space, 2737 BTRFS_LEAF_DATA_OFFSET + data_end, 2738 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 2739 2740 /* copy from the left data area */ 2741 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET + 2742 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 2743 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left), 2744 push_space); 2745 2746 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), 2747 btrfs_item_nr_offset(0), 2748 right_nritems * sizeof(struct btrfs_item)); 2749 2750 /* copy the items from left to right */ 2751 copy_extent_buffer(right, left, btrfs_item_nr_offset(0), 2752 btrfs_item_nr_offset(left_nritems - push_items), 2753 push_items * sizeof(struct btrfs_item)); 2754 2755 /* update the item pointers */ 2756 btrfs_init_map_token(&token, right); 2757 right_nritems += push_items; 2758 btrfs_set_header_nritems(right, right_nritems); 2759 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 2760 for (i = 0; i < right_nritems; i++) { 2761 item = btrfs_item_nr(i); 2762 push_space -= btrfs_token_item_size(&token, item); 2763 btrfs_set_token_item_offset(&token, item, push_space); 2764 } 2765 2766 left_nritems -= push_items; 2767 btrfs_set_header_nritems(left, left_nritems); 2768 2769 if (left_nritems) 2770 btrfs_mark_buffer_dirty(left); 2771 else 2772 btrfs_clean_tree_block(left); 2773 2774 btrfs_mark_buffer_dirty(right); 2775 2776 btrfs_item_key(right, &disk_key, 0); 2777 btrfs_set_node_key(upper, &disk_key, slot + 1); 2778 btrfs_mark_buffer_dirty(upper); 2779 2780 /* then fixup the leaf pointer in the path */ 2781 if (path->slots[0] >= left_nritems) { 2782 path->slots[0] -= left_nritems; 2783 if (btrfs_header_nritems(path->nodes[0]) == 0) 2784 btrfs_clean_tree_block(path->nodes[0]); 2785 btrfs_tree_unlock(path->nodes[0]); 2786 free_extent_buffer(path->nodes[0]); 2787 path->nodes[0] = right; 2788 path->slots[1] += 1; 2789 } else { 2790 btrfs_tree_unlock(right); 2791 free_extent_buffer(right); 2792 } 2793 return 0; 2794 2795 out_unlock: 2796 btrfs_tree_unlock(right); 2797 free_extent_buffer(right); 2798 return 1; 2799 } 2800 2801 /* 2802 * push some data in the path leaf to the right, trying to free up at 2803 * least data_size bytes. returns zero if the push worked, nonzero otherwise 2804 * 2805 * returns 1 if the push failed because the other node didn't have enough 2806 * room, 0 if everything worked out and < 0 if there were major errors. 2807 * 2808 * this will push starting from min_slot to the end of the leaf. It won't 2809 * push any slot lower than min_slot 2810 */ 2811 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 2812 *root, struct btrfs_path *path, 2813 int min_data_size, int data_size, 2814 int empty, u32 min_slot) 2815 { 2816 struct extent_buffer *left = path->nodes[0]; 2817 struct extent_buffer *right; 2818 struct extent_buffer *upper; 2819 int slot; 2820 int free_space; 2821 u32 left_nritems; 2822 int ret; 2823 2824 if (!path->nodes[1]) 2825 return 1; 2826 2827 slot = path->slots[1]; 2828 upper = path->nodes[1]; 2829 if (slot >= btrfs_header_nritems(upper) - 1) 2830 return 1; 2831 2832 btrfs_assert_tree_write_locked(path->nodes[1]); 2833 2834 right = btrfs_read_node_slot(upper, slot + 1); 2835 /* 2836 * slot + 1 is not valid or we fail to read the right node, 2837 * no big deal, just return. 2838 */ 2839 if (IS_ERR(right)) 2840 return 1; 2841 2842 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 2843 2844 free_space = btrfs_leaf_free_space(right); 2845 if (free_space < data_size) 2846 goto out_unlock; 2847 2848 /* cow and double check */ 2849 ret = btrfs_cow_block(trans, root, right, upper, 2850 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 2851 if (ret) 2852 goto out_unlock; 2853 2854 free_space = btrfs_leaf_free_space(right); 2855 if (free_space < data_size) 2856 goto out_unlock; 2857 2858 left_nritems = btrfs_header_nritems(left); 2859 if (left_nritems == 0) 2860 goto out_unlock; 2861 2862 if (check_sibling_keys(left, right)) { 2863 ret = -EUCLEAN; 2864 btrfs_tree_unlock(right); 2865 free_extent_buffer(right); 2866 return ret; 2867 } 2868 if (path->slots[0] == left_nritems && !empty) { 2869 /* Key greater than all keys in the leaf, right neighbor has 2870 * enough room for it and we're not emptying our leaf to delete 2871 * it, therefore use right neighbor to insert the new item and 2872 * no need to touch/dirty our left leaf. */ 2873 btrfs_tree_unlock(left); 2874 free_extent_buffer(left); 2875 path->nodes[0] = right; 2876 path->slots[0] = 0; 2877 path->slots[1]++; 2878 return 0; 2879 } 2880 2881 return __push_leaf_right(path, min_data_size, empty, 2882 right, free_space, left_nritems, min_slot); 2883 out_unlock: 2884 btrfs_tree_unlock(right); 2885 free_extent_buffer(right); 2886 return 1; 2887 } 2888 2889 /* 2890 * push some data in the path leaf to the left, trying to free up at 2891 * least data_size bytes. returns zero if the push worked, nonzero otherwise 2892 * 2893 * max_slot can put a limit on how far into the leaf we'll push items. The 2894 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 2895 * items 2896 */ 2897 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, 2898 int empty, struct extent_buffer *left, 2899 int free_space, u32 right_nritems, 2900 u32 max_slot) 2901 { 2902 struct btrfs_fs_info *fs_info = left->fs_info; 2903 struct btrfs_disk_key disk_key; 2904 struct extent_buffer *right = path->nodes[0]; 2905 int i; 2906 int push_space = 0; 2907 int push_items = 0; 2908 struct btrfs_item *item; 2909 u32 old_left_nritems; 2910 u32 nr; 2911 int ret = 0; 2912 u32 this_item_size; 2913 u32 old_left_item_size; 2914 struct btrfs_map_token token; 2915 2916 if (empty) 2917 nr = min(right_nritems, max_slot); 2918 else 2919 nr = min(right_nritems - 1, max_slot); 2920 2921 for (i = 0; i < nr; i++) { 2922 item = btrfs_item_nr(i); 2923 2924 if (!empty && push_items > 0) { 2925 if (path->slots[0] < i) 2926 break; 2927 if (path->slots[0] == i) { 2928 int space = btrfs_leaf_free_space(right); 2929 2930 if (space + push_space * 2 > free_space) 2931 break; 2932 } 2933 } 2934 2935 if (path->slots[0] == i) 2936 push_space += data_size; 2937 2938 this_item_size = btrfs_item_size(right, item); 2939 if (this_item_size + sizeof(*item) + push_space > free_space) 2940 break; 2941 2942 push_items++; 2943 push_space += this_item_size + sizeof(*item); 2944 } 2945 2946 if (push_items == 0) { 2947 ret = 1; 2948 goto out; 2949 } 2950 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 2951 2952 /* push data from right to left */ 2953 copy_extent_buffer(left, right, 2954 btrfs_item_nr_offset(btrfs_header_nritems(left)), 2955 btrfs_item_nr_offset(0), 2956 push_items * sizeof(struct btrfs_item)); 2957 2958 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 2959 btrfs_item_offset_nr(right, push_items - 1); 2960 2961 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET + 2962 leaf_data_end(left) - push_space, 2963 BTRFS_LEAF_DATA_OFFSET + 2964 btrfs_item_offset_nr(right, push_items - 1), 2965 push_space); 2966 old_left_nritems = btrfs_header_nritems(left); 2967 BUG_ON(old_left_nritems <= 0); 2968 2969 btrfs_init_map_token(&token, left); 2970 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); 2971 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 2972 u32 ioff; 2973 2974 item = btrfs_item_nr(i); 2975 2976 ioff = btrfs_token_item_offset(&token, item); 2977 btrfs_set_token_item_offset(&token, item, 2978 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 2979 } 2980 btrfs_set_header_nritems(left, old_left_nritems + push_items); 2981 2982 /* fixup right node */ 2983 if (push_items > right_nritems) 2984 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 2985 right_nritems); 2986 2987 if (push_items < right_nritems) { 2988 push_space = btrfs_item_offset_nr(right, push_items - 1) - 2989 leaf_data_end(right); 2990 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET + 2991 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 2992 BTRFS_LEAF_DATA_OFFSET + 2993 leaf_data_end(right), push_space); 2994 2995 memmove_extent_buffer(right, btrfs_item_nr_offset(0), 2996 btrfs_item_nr_offset(push_items), 2997 (btrfs_header_nritems(right) - push_items) * 2998 sizeof(struct btrfs_item)); 2999 } 3000 3001 btrfs_init_map_token(&token, right); 3002 right_nritems -= push_items; 3003 btrfs_set_header_nritems(right, right_nritems); 3004 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3005 for (i = 0; i < right_nritems; i++) { 3006 item = btrfs_item_nr(i); 3007 3008 push_space = push_space - btrfs_token_item_size(&token, item); 3009 btrfs_set_token_item_offset(&token, item, push_space); 3010 } 3011 3012 btrfs_mark_buffer_dirty(left); 3013 if (right_nritems) 3014 btrfs_mark_buffer_dirty(right); 3015 else 3016 btrfs_clean_tree_block(right); 3017 3018 btrfs_item_key(right, &disk_key, 0); 3019 fixup_low_keys(path, &disk_key, 1); 3020 3021 /* then fixup the leaf pointer in the path */ 3022 if (path->slots[0] < push_items) { 3023 path->slots[0] += old_left_nritems; 3024 btrfs_tree_unlock(path->nodes[0]); 3025 free_extent_buffer(path->nodes[0]); 3026 path->nodes[0] = left; 3027 path->slots[1] -= 1; 3028 } else { 3029 btrfs_tree_unlock(left); 3030 free_extent_buffer(left); 3031 path->slots[0] -= push_items; 3032 } 3033 BUG_ON(path->slots[0] < 0); 3034 return ret; 3035 out: 3036 btrfs_tree_unlock(left); 3037 free_extent_buffer(left); 3038 return ret; 3039 } 3040 3041 /* 3042 * push some data in the path leaf to the left, trying to free up at 3043 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3044 * 3045 * max_slot can put a limit on how far into the leaf we'll push items. The 3046 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3047 * items 3048 */ 3049 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3050 *root, struct btrfs_path *path, int min_data_size, 3051 int data_size, int empty, u32 max_slot) 3052 { 3053 struct extent_buffer *right = path->nodes[0]; 3054 struct extent_buffer *left; 3055 int slot; 3056 int free_space; 3057 u32 right_nritems; 3058 int ret = 0; 3059 3060 slot = path->slots[1]; 3061 if (slot == 0) 3062 return 1; 3063 if (!path->nodes[1]) 3064 return 1; 3065 3066 right_nritems = btrfs_header_nritems(right); 3067 if (right_nritems == 0) 3068 return 1; 3069 3070 btrfs_assert_tree_write_locked(path->nodes[1]); 3071 3072 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3073 /* 3074 * slot - 1 is not valid or we fail to read the left node, 3075 * no big deal, just return. 3076 */ 3077 if (IS_ERR(left)) 3078 return 1; 3079 3080 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3081 3082 free_space = btrfs_leaf_free_space(left); 3083 if (free_space < data_size) { 3084 ret = 1; 3085 goto out; 3086 } 3087 3088 /* cow and double check */ 3089 ret = btrfs_cow_block(trans, root, left, 3090 path->nodes[1], slot - 1, &left, 3091 BTRFS_NESTING_LEFT_COW); 3092 if (ret) { 3093 /* we hit -ENOSPC, but it isn't fatal here */ 3094 if (ret == -ENOSPC) 3095 ret = 1; 3096 goto out; 3097 } 3098 3099 free_space = btrfs_leaf_free_space(left); 3100 if (free_space < data_size) { 3101 ret = 1; 3102 goto out; 3103 } 3104 3105 if (check_sibling_keys(left, right)) { 3106 ret = -EUCLEAN; 3107 goto out; 3108 } 3109 return __push_leaf_left(path, min_data_size, 3110 empty, left, free_space, right_nritems, 3111 max_slot); 3112 out: 3113 btrfs_tree_unlock(left); 3114 free_extent_buffer(left); 3115 return ret; 3116 } 3117 3118 /* 3119 * split the path's leaf in two, making sure there is at least data_size 3120 * available for the resulting leaf level of the path. 3121 */ 3122 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3123 struct btrfs_path *path, 3124 struct extent_buffer *l, 3125 struct extent_buffer *right, 3126 int slot, int mid, int nritems) 3127 { 3128 struct btrfs_fs_info *fs_info = trans->fs_info; 3129 int data_copy_size; 3130 int rt_data_off; 3131 int i; 3132 struct btrfs_disk_key disk_key; 3133 struct btrfs_map_token token; 3134 3135 nritems = nritems - mid; 3136 btrfs_set_header_nritems(right, nritems); 3137 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l); 3138 3139 copy_extent_buffer(right, l, btrfs_item_nr_offset(0), 3140 btrfs_item_nr_offset(mid), 3141 nritems * sizeof(struct btrfs_item)); 3142 3143 copy_extent_buffer(right, l, 3144 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) - 3145 data_copy_size, BTRFS_LEAF_DATA_OFFSET + 3146 leaf_data_end(l), data_copy_size); 3147 3148 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid); 3149 3150 btrfs_init_map_token(&token, right); 3151 for (i = 0; i < nritems; i++) { 3152 struct btrfs_item *item = btrfs_item_nr(i); 3153 u32 ioff; 3154 3155 ioff = btrfs_token_item_offset(&token, item); 3156 btrfs_set_token_item_offset(&token, item, ioff + rt_data_off); 3157 } 3158 3159 btrfs_set_header_nritems(l, mid); 3160 btrfs_item_key(right, &disk_key, 0); 3161 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3162 3163 btrfs_mark_buffer_dirty(right); 3164 btrfs_mark_buffer_dirty(l); 3165 BUG_ON(path->slots[0] != slot); 3166 3167 if (mid <= slot) { 3168 btrfs_tree_unlock(path->nodes[0]); 3169 free_extent_buffer(path->nodes[0]); 3170 path->nodes[0] = right; 3171 path->slots[0] -= mid; 3172 path->slots[1] += 1; 3173 } else { 3174 btrfs_tree_unlock(right); 3175 free_extent_buffer(right); 3176 } 3177 3178 BUG_ON(path->slots[0] < 0); 3179 } 3180 3181 /* 3182 * double splits happen when we need to insert a big item in the middle 3183 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3184 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3185 * A B C 3186 * 3187 * We avoid this by trying to push the items on either side of our target 3188 * into the adjacent leaves. If all goes well we can avoid the double split 3189 * completely. 3190 */ 3191 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3192 struct btrfs_root *root, 3193 struct btrfs_path *path, 3194 int data_size) 3195 { 3196 int ret; 3197 int progress = 0; 3198 int slot; 3199 u32 nritems; 3200 int space_needed = data_size; 3201 3202 slot = path->slots[0]; 3203 if (slot < btrfs_header_nritems(path->nodes[0])) 3204 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3205 3206 /* 3207 * try to push all the items after our slot into the 3208 * right leaf 3209 */ 3210 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3211 if (ret < 0) 3212 return ret; 3213 3214 if (ret == 0) 3215 progress++; 3216 3217 nritems = btrfs_header_nritems(path->nodes[0]); 3218 /* 3219 * our goal is to get our slot at the start or end of a leaf. If 3220 * we've done so we're done 3221 */ 3222 if (path->slots[0] == 0 || path->slots[0] == nritems) 3223 return 0; 3224 3225 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3226 return 0; 3227 3228 /* try to push all the items before our slot into the next leaf */ 3229 slot = path->slots[0]; 3230 space_needed = data_size; 3231 if (slot > 0) 3232 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3233 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3234 if (ret < 0) 3235 return ret; 3236 3237 if (ret == 0) 3238 progress++; 3239 3240 if (progress) 3241 return 0; 3242 return 1; 3243 } 3244 3245 /* 3246 * split the path's leaf in two, making sure there is at least data_size 3247 * available for the resulting leaf level of the path. 3248 * 3249 * returns 0 if all went well and < 0 on failure. 3250 */ 3251 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3252 struct btrfs_root *root, 3253 const struct btrfs_key *ins_key, 3254 struct btrfs_path *path, int data_size, 3255 int extend) 3256 { 3257 struct btrfs_disk_key disk_key; 3258 struct extent_buffer *l; 3259 u32 nritems; 3260 int mid; 3261 int slot; 3262 struct extent_buffer *right; 3263 struct btrfs_fs_info *fs_info = root->fs_info; 3264 int ret = 0; 3265 int wret; 3266 int split; 3267 int num_doubles = 0; 3268 int tried_avoid_double = 0; 3269 3270 l = path->nodes[0]; 3271 slot = path->slots[0]; 3272 if (extend && data_size + btrfs_item_size_nr(l, slot) + 3273 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3274 return -EOVERFLOW; 3275 3276 /* first try to make some room by pushing left and right */ 3277 if (data_size && path->nodes[1]) { 3278 int space_needed = data_size; 3279 3280 if (slot < btrfs_header_nritems(l)) 3281 space_needed -= btrfs_leaf_free_space(l); 3282 3283 wret = push_leaf_right(trans, root, path, space_needed, 3284 space_needed, 0, 0); 3285 if (wret < 0) 3286 return wret; 3287 if (wret) { 3288 space_needed = data_size; 3289 if (slot > 0) 3290 space_needed -= btrfs_leaf_free_space(l); 3291 wret = push_leaf_left(trans, root, path, space_needed, 3292 space_needed, 0, (u32)-1); 3293 if (wret < 0) 3294 return wret; 3295 } 3296 l = path->nodes[0]; 3297 3298 /* did the pushes work? */ 3299 if (btrfs_leaf_free_space(l) >= data_size) 3300 return 0; 3301 } 3302 3303 if (!path->nodes[1]) { 3304 ret = insert_new_root(trans, root, path, 1); 3305 if (ret) 3306 return ret; 3307 } 3308 again: 3309 split = 1; 3310 l = path->nodes[0]; 3311 slot = path->slots[0]; 3312 nritems = btrfs_header_nritems(l); 3313 mid = (nritems + 1) / 2; 3314 3315 if (mid <= slot) { 3316 if (nritems == 1 || 3317 leaf_space_used(l, mid, nritems - mid) + data_size > 3318 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3319 if (slot >= nritems) { 3320 split = 0; 3321 } else { 3322 mid = slot; 3323 if (mid != nritems && 3324 leaf_space_used(l, mid, nritems - mid) + 3325 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3326 if (data_size && !tried_avoid_double) 3327 goto push_for_double; 3328 split = 2; 3329 } 3330 } 3331 } 3332 } else { 3333 if (leaf_space_used(l, 0, mid) + data_size > 3334 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3335 if (!extend && data_size && slot == 0) { 3336 split = 0; 3337 } else if ((extend || !data_size) && slot == 0) { 3338 mid = 1; 3339 } else { 3340 mid = slot; 3341 if (mid != nritems && 3342 leaf_space_used(l, mid, nritems - mid) + 3343 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3344 if (data_size && !tried_avoid_double) 3345 goto push_for_double; 3346 split = 2; 3347 } 3348 } 3349 } 3350 } 3351 3352 if (split == 0) 3353 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3354 else 3355 btrfs_item_key(l, &disk_key, mid); 3356 3357 /* 3358 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3359 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3360 * subclasses, which is 8 at the time of this patch, and we've maxed it 3361 * out. In the future we could add a 3362 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3363 * use BTRFS_NESTING_NEW_ROOT. 3364 */ 3365 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3366 &disk_key, 0, l->start, 0, 3367 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3368 BTRFS_NESTING_SPLIT); 3369 if (IS_ERR(right)) 3370 return PTR_ERR(right); 3371 3372 root_add_used(root, fs_info->nodesize); 3373 3374 if (split == 0) { 3375 if (mid <= slot) { 3376 btrfs_set_header_nritems(right, 0); 3377 insert_ptr(trans, path, &disk_key, 3378 right->start, path->slots[1] + 1, 1); 3379 btrfs_tree_unlock(path->nodes[0]); 3380 free_extent_buffer(path->nodes[0]); 3381 path->nodes[0] = right; 3382 path->slots[0] = 0; 3383 path->slots[1] += 1; 3384 } else { 3385 btrfs_set_header_nritems(right, 0); 3386 insert_ptr(trans, path, &disk_key, 3387 right->start, path->slots[1], 1); 3388 btrfs_tree_unlock(path->nodes[0]); 3389 free_extent_buffer(path->nodes[0]); 3390 path->nodes[0] = right; 3391 path->slots[0] = 0; 3392 if (path->slots[1] == 0) 3393 fixup_low_keys(path, &disk_key, 1); 3394 } 3395 /* 3396 * We create a new leaf 'right' for the required ins_len and 3397 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3398 * the content of ins_len to 'right'. 3399 */ 3400 return ret; 3401 } 3402 3403 copy_for_split(trans, path, l, right, slot, mid, nritems); 3404 3405 if (split == 2) { 3406 BUG_ON(num_doubles != 0); 3407 num_doubles++; 3408 goto again; 3409 } 3410 3411 return 0; 3412 3413 push_for_double: 3414 push_for_double_split(trans, root, path, data_size); 3415 tried_avoid_double = 1; 3416 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3417 return 0; 3418 goto again; 3419 } 3420 3421 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3422 struct btrfs_root *root, 3423 struct btrfs_path *path, int ins_len) 3424 { 3425 struct btrfs_key key; 3426 struct extent_buffer *leaf; 3427 struct btrfs_file_extent_item *fi; 3428 u64 extent_len = 0; 3429 u32 item_size; 3430 int ret; 3431 3432 leaf = path->nodes[0]; 3433 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3434 3435 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3436 key.type != BTRFS_EXTENT_CSUM_KEY); 3437 3438 if (btrfs_leaf_free_space(leaf) >= ins_len) 3439 return 0; 3440 3441 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 3442 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3443 fi = btrfs_item_ptr(leaf, path->slots[0], 3444 struct btrfs_file_extent_item); 3445 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3446 } 3447 btrfs_release_path(path); 3448 3449 path->keep_locks = 1; 3450 path->search_for_split = 1; 3451 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3452 path->search_for_split = 0; 3453 if (ret > 0) 3454 ret = -EAGAIN; 3455 if (ret < 0) 3456 goto err; 3457 3458 ret = -EAGAIN; 3459 leaf = path->nodes[0]; 3460 /* if our item isn't there, return now */ 3461 if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) 3462 goto err; 3463 3464 /* the leaf has changed, it now has room. return now */ 3465 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3466 goto err; 3467 3468 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3469 fi = btrfs_item_ptr(leaf, path->slots[0], 3470 struct btrfs_file_extent_item); 3471 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3472 goto err; 3473 } 3474 3475 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3476 if (ret) 3477 goto err; 3478 3479 path->keep_locks = 0; 3480 btrfs_unlock_up_safe(path, 1); 3481 return 0; 3482 err: 3483 path->keep_locks = 0; 3484 return ret; 3485 } 3486 3487 static noinline int split_item(struct btrfs_path *path, 3488 const struct btrfs_key *new_key, 3489 unsigned long split_offset) 3490 { 3491 struct extent_buffer *leaf; 3492 struct btrfs_item *item; 3493 struct btrfs_item *new_item; 3494 int slot; 3495 char *buf; 3496 u32 nritems; 3497 u32 item_size; 3498 u32 orig_offset; 3499 struct btrfs_disk_key disk_key; 3500 3501 leaf = path->nodes[0]; 3502 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3503 3504 item = btrfs_item_nr(path->slots[0]); 3505 orig_offset = btrfs_item_offset(leaf, item); 3506 item_size = btrfs_item_size(leaf, item); 3507 3508 buf = kmalloc(item_size, GFP_NOFS); 3509 if (!buf) 3510 return -ENOMEM; 3511 3512 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3513 path->slots[0]), item_size); 3514 3515 slot = path->slots[0] + 1; 3516 nritems = btrfs_header_nritems(leaf); 3517 if (slot != nritems) { 3518 /* shift the items */ 3519 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), 3520 btrfs_item_nr_offset(slot), 3521 (nritems - slot) * sizeof(struct btrfs_item)); 3522 } 3523 3524 btrfs_cpu_key_to_disk(&disk_key, new_key); 3525 btrfs_set_item_key(leaf, &disk_key, slot); 3526 3527 new_item = btrfs_item_nr(slot); 3528 3529 btrfs_set_item_offset(leaf, new_item, orig_offset); 3530 btrfs_set_item_size(leaf, new_item, item_size - split_offset); 3531 3532 btrfs_set_item_offset(leaf, item, 3533 orig_offset + item_size - split_offset); 3534 btrfs_set_item_size(leaf, item, split_offset); 3535 3536 btrfs_set_header_nritems(leaf, nritems + 1); 3537 3538 /* write the data for the start of the original item */ 3539 write_extent_buffer(leaf, buf, 3540 btrfs_item_ptr_offset(leaf, path->slots[0]), 3541 split_offset); 3542 3543 /* write the data for the new item */ 3544 write_extent_buffer(leaf, buf + split_offset, 3545 btrfs_item_ptr_offset(leaf, slot), 3546 item_size - split_offset); 3547 btrfs_mark_buffer_dirty(leaf); 3548 3549 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3550 kfree(buf); 3551 return 0; 3552 } 3553 3554 /* 3555 * This function splits a single item into two items, 3556 * giving 'new_key' to the new item and splitting the 3557 * old one at split_offset (from the start of the item). 3558 * 3559 * The path may be released by this operation. After 3560 * the split, the path is pointing to the old item. The 3561 * new item is going to be in the same node as the old one. 3562 * 3563 * Note, the item being split must be smaller enough to live alone on 3564 * a tree block with room for one extra struct btrfs_item 3565 * 3566 * This allows us to split the item in place, keeping a lock on the 3567 * leaf the entire time. 3568 */ 3569 int btrfs_split_item(struct btrfs_trans_handle *trans, 3570 struct btrfs_root *root, 3571 struct btrfs_path *path, 3572 const struct btrfs_key *new_key, 3573 unsigned long split_offset) 3574 { 3575 int ret; 3576 ret = setup_leaf_for_split(trans, root, path, 3577 sizeof(struct btrfs_item)); 3578 if (ret) 3579 return ret; 3580 3581 ret = split_item(path, new_key, split_offset); 3582 return ret; 3583 } 3584 3585 /* 3586 * make the item pointed to by the path smaller. new_size indicates 3587 * how small to make it, and from_end tells us if we just chop bytes 3588 * off the end of the item or if we shift the item to chop bytes off 3589 * the front. 3590 */ 3591 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 3592 { 3593 int slot; 3594 struct extent_buffer *leaf; 3595 struct btrfs_item *item; 3596 u32 nritems; 3597 unsigned int data_end; 3598 unsigned int old_data_start; 3599 unsigned int old_size; 3600 unsigned int size_diff; 3601 int i; 3602 struct btrfs_map_token token; 3603 3604 leaf = path->nodes[0]; 3605 slot = path->slots[0]; 3606 3607 old_size = btrfs_item_size_nr(leaf, slot); 3608 if (old_size == new_size) 3609 return; 3610 3611 nritems = btrfs_header_nritems(leaf); 3612 data_end = leaf_data_end(leaf); 3613 3614 old_data_start = btrfs_item_offset_nr(leaf, slot); 3615 3616 size_diff = old_size - new_size; 3617 3618 BUG_ON(slot < 0); 3619 BUG_ON(slot >= nritems); 3620 3621 /* 3622 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3623 */ 3624 /* first correct the data pointers */ 3625 btrfs_init_map_token(&token, leaf); 3626 for (i = slot; i < nritems; i++) { 3627 u32 ioff; 3628 item = btrfs_item_nr(i); 3629 3630 ioff = btrfs_token_item_offset(&token, item); 3631 btrfs_set_token_item_offset(&token, item, ioff + size_diff); 3632 } 3633 3634 /* shift the data */ 3635 if (from_end) { 3636 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3637 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET + 3638 data_end, old_data_start + new_size - data_end); 3639 } else { 3640 struct btrfs_disk_key disk_key; 3641 u64 offset; 3642 3643 btrfs_item_key(leaf, &disk_key, slot); 3644 3645 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 3646 unsigned long ptr; 3647 struct btrfs_file_extent_item *fi; 3648 3649 fi = btrfs_item_ptr(leaf, slot, 3650 struct btrfs_file_extent_item); 3651 fi = (struct btrfs_file_extent_item *)( 3652 (unsigned long)fi - size_diff); 3653 3654 if (btrfs_file_extent_type(leaf, fi) == 3655 BTRFS_FILE_EXTENT_INLINE) { 3656 ptr = btrfs_item_ptr_offset(leaf, slot); 3657 memmove_extent_buffer(leaf, ptr, 3658 (unsigned long)fi, 3659 BTRFS_FILE_EXTENT_INLINE_DATA_START); 3660 } 3661 } 3662 3663 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3664 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET + 3665 data_end, old_data_start - data_end); 3666 3667 offset = btrfs_disk_key_offset(&disk_key); 3668 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 3669 btrfs_set_item_key(leaf, &disk_key, slot); 3670 if (slot == 0) 3671 fixup_low_keys(path, &disk_key, 1); 3672 } 3673 3674 item = btrfs_item_nr(slot); 3675 btrfs_set_item_size(leaf, item, new_size); 3676 btrfs_mark_buffer_dirty(leaf); 3677 3678 if (btrfs_leaf_free_space(leaf) < 0) { 3679 btrfs_print_leaf(leaf); 3680 BUG(); 3681 } 3682 } 3683 3684 /* 3685 * make the item pointed to by the path bigger, data_size is the added size. 3686 */ 3687 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 3688 { 3689 int slot; 3690 struct extent_buffer *leaf; 3691 struct btrfs_item *item; 3692 u32 nritems; 3693 unsigned int data_end; 3694 unsigned int old_data; 3695 unsigned int old_size; 3696 int i; 3697 struct btrfs_map_token token; 3698 3699 leaf = path->nodes[0]; 3700 3701 nritems = btrfs_header_nritems(leaf); 3702 data_end = leaf_data_end(leaf); 3703 3704 if (btrfs_leaf_free_space(leaf) < data_size) { 3705 btrfs_print_leaf(leaf); 3706 BUG(); 3707 } 3708 slot = path->slots[0]; 3709 old_data = btrfs_item_end_nr(leaf, slot); 3710 3711 BUG_ON(slot < 0); 3712 if (slot >= nritems) { 3713 btrfs_print_leaf(leaf); 3714 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 3715 slot, nritems); 3716 BUG(); 3717 } 3718 3719 /* 3720 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3721 */ 3722 /* first correct the data pointers */ 3723 btrfs_init_map_token(&token, leaf); 3724 for (i = slot; i < nritems; i++) { 3725 u32 ioff; 3726 item = btrfs_item_nr(i); 3727 3728 ioff = btrfs_token_item_offset(&token, item); 3729 btrfs_set_token_item_offset(&token, item, ioff - data_size); 3730 } 3731 3732 /* shift the data */ 3733 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3734 data_end - data_size, BTRFS_LEAF_DATA_OFFSET + 3735 data_end, old_data - data_end); 3736 3737 data_end = old_data; 3738 old_size = btrfs_item_size_nr(leaf, slot); 3739 item = btrfs_item_nr(slot); 3740 btrfs_set_item_size(leaf, item, old_size + data_size); 3741 btrfs_mark_buffer_dirty(leaf); 3742 3743 if (btrfs_leaf_free_space(leaf) < 0) { 3744 btrfs_print_leaf(leaf); 3745 BUG(); 3746 } 3747 } 3748 3749 /** 3750 * setup_items_for_insert - Helper called before inserting one or more items 3751 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work 3752 * in a function that doesn't call btrfs_search_slot 3753 * 3754 * @root: root we are inserting items to 3755 * @path: points to the leaf/slot where we are going to insert new items 3756 * @batch: information about the batch of items to insert 3757 */ 3758 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 3759 const struct btrfs_item_batch *batch) 3760 { 3761 struct btrfs_fs_info *fs_info = root->fs_info; 3762 struct btrfs_item *item; 3763 int i; 3764 u32 nritems; 3765 unsigned int data_end; 3766 struct btrfs_disk_key disk_key; 3767 struct extent_buffer *leaf; 3768 int slot; 3769 struct btrfs_map_token token; 3770 u32 total_size; 3771 3772 /* 3773 * Before anything else, update keys in the parent and other ancestors 3774 * if needed, then release the write locks on them, so that other tasks 3775 * can use them while we modify the leaf. 3776 */ 3777 if (path->slots[0] == 0) { 3778 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 3779 fixup_low_keys(path, &disk_key, 1); 3780 } 3781 btrfs_unlock_up_safe(path, 1); 3782 3783 leaf = path->nodes[0]; 3784 slot = path->slots[0]; 3785 3786 nritems = btrfs_header_nritems(leaf); 3787 data_end = leaf_data_end(leaf); 3788 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 3789 3790 if (btrfs_leaf_free_space(leaf) < total_size) { 3791 btrfs_print_leaf(leaf); 3792 btrfs_crit(fs_info, "not enough freespace need %u have %d", 3793 total_size, btrfs_leaf_free_space(leaf)); 3794 BUG(); 3795 } 3796 3797 btrfs_init_map_token(&token, leaf); 3798 if (slot != nritems) { 3799 unsigned int old_data = btrfs_item_end_nr(leaf, slot); 3800 3801 if (old_data < data_end) { 3802 btrfs_print_leaf(leaf); 3803 btrfs_crit(fs_info, 3804 "item at slot %d with data offset %u beyond data end of leaf %u", 3805 slot, old_data, data_end); 3806 BUG(); 3807 } 3808 /* 3809 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3810 */ 3811 /* first correct the data pointers */ 3812 for (i = slot; i < nritems; i++) { 3813 u32 ioff; 3814 3815 item = btrfs_item_nr(i); 3816 ioff = btrfs_token_item_offset(&token, item); 3817 btrfs_set_token_item_offset(&token, item, 3818 ioff - batch->total_data_size); 3819 } 3820 /* shift the items */ 3821 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + batch->nr), 3822 btrfs_item_nr_offset(slot), 3823 (nritems - slot) * sizeof(struct btrfs_item)); 3824 3825 /* shift the data */ 3826 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 3827 data_end - batch->total_data_size, 3828 BTRFS_LEAF_DATA_OFFSET + data_end, 3829 old_data - data_end); 3830 data_end = old_data; 3831 } 3832 3833 /* setup the item for the new data */ 3834 for (i = 0; i < batch->nr; i++) { 3835 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 3836 btrfs_set_item_key(leaf, &disk_key, slot + i); 3837 item = btrfs_item_nr(slot + i); 3838 data_end -= batch->data_sizes[i]; 3839 btrfs_set_token_item_offset(&token, item, data_end); 3840 btrfs_set_token_item_size(&token, item, batch->data_sizes[i]); 3841 } 3842 3843 btrfs_set_header_nritems(leaf, nritems + batch->nr); 3844 btrfs_mark_buffer_dirty(leaf); 3845 3846 if (btrfs_leaf_free_space(leaf) < 0) { 3847 btrfs_print_leaf(leaf); 3848 BUG(); 3849 } 3850 } 3851 3852 /* 3853 * Insert a new item into a leaf. 3854 * 3855 * @root: The root of the btree. 3856 * @path: A path pointing to the target leaf and slot. 3857 * @key: The key of the new item. 3858 * @data_size: The size of the data associated with the new key. 3859 */ 3860 void btrfs_setup_item_for_insert(struct btrfs_root *root, 3861 struct btrfs_path *path, 3862 const struct btrfs_key *key, 3863 u32 data_size) 3864 { 3865 struct btrfs_item_batch batch; 3866 3867 batch.keys = key; 3868 batch.data_sizes = &data_size; 3869 batch.total_data_size = data_size; 3870 batch.nr = 1; 3871 3872 setup_items_for_insert(root, path, &batch); 3873 } 3874 3875 /* 3876 * Given a key and some data, insert items into the tree. 3877 * This does all the path init required, making room in the tree if needed. 3878 */ 3879 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 3880 struct btrfs_root *root, 3881 struct btrfs_path *path, 3882 const struct btrfs_item_batch *batch) 3883 { 3884 int ret = 0; 3885 int slot; 3886 u32 total_size; 3887 3888 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 3889 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 3890 if (ret == 0) 3891 return -EEXIST; 3892 if (ret < 0) 3893 return ret; 3894 3895 slot = path->slots[0]; 3896 BUG_ON(slot < 0); 3897 3898 setup_items_for_insert(root, path, batch); 3899 return 0; 3900 } 3901 3902 /* 3903 * Given a key and some data, insert an item into the tree. 3904 * This does all the path init required, making room in the tree if needed. 3905 */ 3906 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3907 const struct btrfs_key *cpu_key, void *data, 3908 u32 data_size) 3909 { 3910 int ret = 0; 3911 struct btrfs_path *path; 3912 struct extent_buffer *leaf; 3913 unsigned long ptr; 3914 3915 path = btrfs_alloc_path(); 3916 if (!path) 3917 return -ENOMEM; 3918 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 3919 if (!ret) { 3920 leaf = path->nodes[0]; 3921 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3922 write_extent_buffer(leaf, data, ptr, data_size); 3923 btrfs_mark_buffer_dirty(leaf); 3924 } 3925 btrfs_free_path(path); 3926 return ret; 3927 } 3928 3929 /* 3930 * This function duplicates an item, giving 'new_key' to the new item. 3931 * It guarantees both items live in the same tree leaf and the new item is 3932 * contiguous with the original item. 3933 * 3934 * This allows us to split a file extent in place, keeping a lock on the leaf 3935 * the entire time. 3936 */ 3937 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 3938 struct btrfs_root *root, 3939 struct btrfs_path *path, 3940 const struct btrfs_key *new_key) 3941 { 3942 struct extent_buffer *leaf; 3943 int ret; 3944 u32 item_size; 3945 3946 leaf = path->nodes[0]; 3947 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 3948 ret = setup_leaf_for_split(trans, root, path, 3949 item_size + sizeof(struct btrfs_item)); 3950 if (ret) 3951 return ret; 3952 3953 path->slots[0]++; 3954 btrfs_setup_item_for_insert(root, path, new_key, item_size); 3955 leaf = path->nodes[0]; 3956 memcpy_extent_buffer(leaf, 3957 btrfs_item_ptr_offset(leaf, path->slots[0]), 3958 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 3959 item_size); 3960 return 0; 3961 } 3962 3963 /* 3964 * delete the pointer from a given node. 3965 * 3966 * the tree should have been previously balanced so the deletion does not 3967 * empty a node. 3968 */ 3969 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 3970 int level, int slot) 3971 { 3972 struct extent_buffer *parent = path->nodes[level]; 3973 u32 nritems; 3974 int ret; 3975 3976 nritems = btrfs_header_nritems(parent); 3977 if (slot != nritems - 1) { 3978 if (level) { 3979 ret = btrfs_tree_mod_log_insert_move(parent, slot, 3980 slot + 1, nritems - slot - 1); 3981 BUG_ON(ret < 0); 3982 } 3983 memmove_extent_buffer(parent, 3984 btrfs_node_key_ptr_offset(slot), 3985 btrfs_node_key_ptr_offset(slot + 1), 3986 sizeof(struct btrfs_key_ptr) * 3987 (nritems - slot - 1)); 3988 } else if (level) { 3989 ret = btrfs_tree_mod_log_insert_key(parent, slot, 3990 BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS); 3991 BUG_ON(ret < 0); 3992 } 3993 3994 nritems--; 3995 btrfs_set_header_nritems(parent, nritems); 3996 if (nritems == 0 && parent == root->node) { 3997 BUG_ON(btrfs_header_level(root->node) != 1); 3998 /* just turn the root into a leaf and break */ 3999 btrfs_set_header_level(root->node, 0); 4000 } else if (slot == 0) { 4001 struct btrfs_disk_key disk_key; 4002 4003 btrfs_node_key(parent, &disk_key, 0); 4004 fixup_low_keys(path, &disk_key, level + 1); 4005 } 4006 btrfs_mark_buffer_dirty(parent); 4007 } 4008 4009 /* 4010 * a helper function to delete the leaf pointed to by path->slots[1] and 4011 * path->nodes[1]. 4012 * 4013 * This deletes the pointer in path->nodes[1] and frees the leaf 4014 * block extent. zero is returned if it all worked out, < 0 otherwise. 4015 * 4016 * The path must have already been setup for deleting the leaf, including 4017 * all the proper balancing. path->nodes[1] must be locked. 4018 */ 4019 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4020 struct btrfs_root *root, 4021 struct btrfs_path *path, 4022 struct extent_buffer *leaf) 4023 { 4024 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4025 del_ptr(root, path, 1, path->slots[1]); 4026 4027 /* 4028 * btrfs_free_extent is expensive, we want to make sure we 4029 * aren't holding any locks when we call it 4030 */ 4031 btrfs_unlock_up_safe(path, 0); 4032 4033 root_sub_used(root, leaf->len); 4034 4035 atomic_inc(&leaf->refs); 4036 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4037 free_extent_buffer_stale(leaf); 4038 } 4039 /* 4040 * delete the item at the leaf level in path. If that empties 4041 * the leaf, remove it from the tree 4042 */ 4043 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4044 struct btrfs_path *path, int slot, int nr) 4045 { 4046 struct btrfs_fs_info *fs_info = root->fs_info; 4047 struct extent_buffer *leaf; 4048 struct btrfs_item *item; 4049 u32 last_off; 4050 u32 dsize = 0; 4051 int ret = 0; 4052 int wret; 4053 int i; 4054 u32 nritems; 4055 4056 leaf = path->nodes[0]; 4057 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); 4058 4059 for (i = 0; i < nr; i++) 4060 dsize += btrfs_item_size_nr(leaf, slot + i); 4061 4062 nritems = btrfs_header_nritems(leaf); 4063 4064 if (slot + nr != nritems) { 4065 int data_end = leaf_data_end(leaf); 4066 struct btrfs_map_token token; 4067 4068 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4069 data_end + dsize, 4070 BTRFS_LEAF_DATA_OFFSET + data_end, 4071 last_off - data_end); 4072 4073 btrfs_init_map_token(&token, leaf); 4074 for (i = slot + nr; i < nritems; i++) { 4075 u32 ioff; 4076 4077 item = btrfs_item_nr(i); 4078 ioff = btrfs_token_item_offset(&token, item); 4079 btrfs_set_token_item_offset(&token, item, ioff + dsize); 4080 } 4081 4082 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 4083 btrfs_item_nr_offset(slot + nr), 4084 sizeof(struct btrfs_item) * 4085 (nritems - slot - nr)); 4086 } 4087 btrfs_set_header_nritems(leaf, nritems - nr); 4088 nritems -= nr; 4089 4090 /* delete the leaf if we've emptied it */ 4091 if (nritems == 0) { 4092 if (leaf == root->node) { 4093 btrfs_set_header_level(leaf, 0); 4094 } else { 4095 btrfs_clean_tree_block(leaf); 4096 btrfs_del_leaf(trans, root, path, leaf); 4097 } 4098 } else { 4099 int used = leaf_space_used(leaf, 0, nritems); 4100 if (slot == 0) { 4101 struct btrfs_disk_key disk_key; 4102 4103 btrfs_item_key(leaf, &disk_key, 0); 4104 fixup_low_keys(path, &disk_key, 1); 4105 } 4106 4107 /* delete the leaf if it is mostly empty */ 4108 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4109 /* push_leaf_left fixes the path. 4110 * make sure the path still points to our leaf 4111 * for possible call to del_ptr below 4112 */ 4113 slot = path->slots[1]; 4114 atomic_inc(&leaf->refs); 4115 4116 wret = push_leaf_left(trans, root, path, 1, 1, 4117 1, (u32)-1); 4118 if (wret < 0 && wret != -ENOSPC) 4119 ret = wret; 4120 4121 if (path->nodes[0] == leaf && 4122 btrfs_header_nritems(leaf)) { 4123 wret = push_leaf_right(trans, root, path, 1, 4124 1, 1, 0); 4125 if (wret < 0 && wret != -ENOSPC) 4126 ret = wret; 4127 } 4128 4129 if (btrfs_header_nritems(leaf) == 0) { 4130 path->slots[1] = slot; 4131 btrfs_del_leaf(trans, root, path, leaf); 4132 free_extent_buffer(leaf); 4133 ret = 0; 4134 } else { 4135 /* if we're still in the path, make sure 4136 * we're dirty. Otherwise, one of the 4137 * push_leaf functions must have already 4138 * dirtied this buffer 4139 */ 4140 if (path->nodes[0] == leaf) 4141 btrfs_mark_buffer_dirty(leaf); 4142 free_extent_buffer(leaf); 4143 } 4144 } else { 4145 btrfs_mark_buffer_dirty(leaf); 4146 } 4147 } 4148 return ret; 4149 } 4150 4151 /* 4152 * search the tree again to find a leaf with lesser keys 4153 * returns 0 if it found something or 1 if there are no lesser leaves. 4154 * returns < 0 on io errors. 4155 * 4156 * This may release the path, and so you may lose any locks held at the 4157 * time you call it. 4158 */ 4159 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 4160 { 4161 struct btrfs_key key; 4162 struct btrfs_disk_key found_key; 4163 int ret; 4164 4165 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 4166 4167 if (key.offset > 0) { 4168 key.offset--; 4169 } else if (key.type > 0) { 4170 key.type--; 4171 key.offset = (u64)-1; 4172 } else if (key.objectid > 0) { 4173 key.objectid--; 4174 key.type = (u8)-1; 4175 key.offset = (u64)-1; 4176 } else { 4177 return 1; 4178 } 4179 4180 btrfs_release_path(path); 4181 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4182 if (ret < 0) 4183 return ret; 4184 btrfs_item_key(path->nodes[0], &found_key, 0); 4185 ret = comp_keys(&found_key, &key); 4186 /* 4187 * We might have had an item with the previous key in the tree right 4188 * before we released our path. And after we released our path, that 4189 * item might have been pushed to the first slot (0) of the leaf we 4190 * were holding due to a tree balance. Alternatively, an item with the 4191 * previous key can exist as the only element of a leaf (big fat item). 4192 * Therefore account for these 2 cases, so that our callers (like 4193 * btrfs_previous_item) don't miss an existing item with a key matching 4194 * the previous key we computed above. 4195 */ 4196 if (ret <= 0) 4197 return 0; 4198 return 1; 4199 } 4200 4201 /* 4202 * A helper function to walk down the tree starting at min_key, and looking 4203 * for nodes or leaves that are have a minimum transaction id. 4204 * This is used by the btree defrag code, and tree logging 4205 * 4206 * This does not cow, but it does stuff the starting key it finds back 4207 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4208 * key and get a writable path. 4209 * 4210 * This honors path->lowest_level to prevent descent past a given level 4211 * of the tree. 4212 * 4213 * min_trans indicates the oldest transaction that you are interested 4214 * in walking through. Any nodes or leaves older than min_trans are 4215 * skipped over (without reading them). 4216 * 4217 * returns zero if something useful was found, < 0 on error and 1 if there 4218 * was nothing in the tree that matched the search criteria. 4219 */ 4220 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4221 struct btrfs_path *path, 4222 u64 min_trans) 4223 { 4224 struct extent_buffer *cur; 4225 struct btrfs_key found_key; 4226 int slot; 4227 int sret; 4228 u32 nritems; 4229 int level; 4230 int ret = 1; 4231 int keep_locks = path->keep_locks; 4232 4233 path->keep_locks = 1; 4234 again: 4235 cur = btrfs_read_lock_root_node(root); 4236 level = btrfs_header_level(cur); 4237 WARN_ON(path->nodes[level]); 4238 path->nodes[level] = cur; 4239 path->locks[level] = BTRFS_READ_LOCK; 4240 4241 if (btrfs_header_generation(cur) < min_trans) { 4242 ret = 1; 4243 goto out; 4244 } 4245 while (1) { 4246 nritems = btrfs_header_nritems(cur); 4247 level = btrfs_header_level(cur); 4248 sret = btrfs_bin_search(cur, min_key, &slot); 4249 if (sret < 0) { 4250 ret = sret; 4251 goto out; 4252 } 4253 4254 /* at the lowest level, we're done, setup the path and exit */ 4255 if (level == path->lowest_level) { 4256 if (slot >= nritems) 4257 goto find_next_key; 4258 ret = 0; 4259 path->slots[level] = slot; 4260 btrfs_item_key_to_cpu(cur, &found_key, slot); 4261 goto out; 4262 } 4263 if (sret && slot > 0) 4264 slot--; 4265 /* 4266 * check this node pointer against the min_trans parameters. 4267 * If it is too old, skip to the next one. 4268 */ 4269 while (slot < nritems) { 4270 u64 gen; 4271 4272 gen = btrfs_node_ptr_generation(cur, slot); 4273 if (gen < min_trans) { 4274 slot++; 4275 continue; 4276 } 4277 break; 4278 } 4279 find_next_key: 4280 /* 4281 * we didn't find a candidate key in this node, walk forward 4282 * and find another one 4283 */ 4284 if (slot >= nritems) { 4285 path->slots[level] = slot; 4286 sret = btrfs_find_next_key(root, path, min_key, level, 4287 min_trans); 4288 if (sret == 0) { 4289 btrfs_release_path(path); 4290 goto again; 4291 } else { 4292 goto out; 4293 } 4294 } 4295 /* save our key for returning back */ 4296 btrfs_node_key_to_cpu(cur, &found_key, slot); 4297 path->slots[level] = slot; 4298 if (level == path->lowest_level) { 4299 ret = 0; 4300 goto out; 4301 } 4302 cur = btrfs_read_node_slot(cur, slot); 4303 if (IS_ERR(cur)) { 4304 ret = PTR_ERR(cur); 4305 goto out; 4306 } 4307 4308 btrfs_tree_read_lock(cur); 4309 4310 path->locks[level - 1] = BTRFS_READ_LOCK; 4311 path->nodes[level - 1] = cur; 4312 unlock_up(path, level, 1, 0, NULL); 4313 } 4314 out: 4315 path->keep_locks = keep_locks; 4316 if (ret == 0) { 4317 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4318 memcpy(min_key, &found_key, sizeof(found_key)); 4319 } 4320 return ret; 4321 } 4322 4323 /* 4324 * this is similar to btrfs_next_leaf, but does not try to preserve 4325 * and fixup the path. It looks for and returns the next key in the 4326 * tree based on the current path and the min_trans parameters. 4327 * 4328 * 0 is returned if another key is found, < 0 if there are any errors 4329 * and 1 is returned if there are no higher keys in the tree 4330 * 4331 * path->keep_locks should be set to 1 on the search made before 4332 * calling this function. 4333 */ 4334 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4335 struct btrfs_key *key, int level, u64 min_trans) 4336 { 4337 int slot; 4338 struct extent_buffer *c; 4339 4340 WARN_ON(!path->keep_locks && !path->skip_locking); 4341 while (level < BTRFS_MAX_LEVEL) { 4342 if (!path->nodes[level]) 4343 return 1; 4344 4345 slot = path->slots[level] + 1; 4346 c = path->nodes[level]; 4347 next: 4348 if (slot >= btrfs_header_nritems(c)) { 4349 int ret; 4350 int orig_lowest; 4351 struct btrfs_key cur_key; 4352 if (level + 1 >= BTRFS_MAX_LEVEL || 4353 !path->nodes[level + 1]) 4354 return 1; 4355 4356 if (path->locks[level + 1] || path->skip_locking) { 4357 level++; 4358 continue; 4359 } 4360 4361 slot = btrfs_header_nritems(c) - 1; 4362 if (level == 0) 4363 btrfs_item_key_to_cpu(c, &cur_key, slot); 4364 else 4365 btrfs_node_key_to_cpu(c, &cur_key, slot); 4366 4367 orig_lowest = path->lowest_level; 4368 btrfs_release_path(path); 4369 path->lowest_level = level; 4370 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4371 0, 0); 4372 path->lowest_level = orig_lowest; 4373 if (ret < 0) 4374 return ret; 4375 4376 c = path->nodes[level]; 4377 slot = path->slots[level]; 4378 if (ret == 0) 4379 slot++; 4380 goto next; 4381 } 4382 4383 if (level == 0) 4384 btrfs_item_key_to_cpu(c, key, slot); 4385 else { 4386 u64 gen = btrfs_node_ptr_generation(c, slot); 4387 4388 if (gen < min_trans) { 4389 slot++; 4390 goto next; 4391 } 4392 btrfs_node_key_to_cpu(c, key, slot); 4393 } 4394 return 0; 4395 } 4396 return 1; 4397 } 4398 4399 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4400 u64 time_seq) 4401 { 4402 int slot; 4403 int level; 4404 struct extent_buffer *c; 4405 struct extent_buffer *next; 4406 struct btrfs_key key; 4407 u32 nritems; 4408 int ret; 4409 int i; 4410 4411 nritems = btrfs_header_nritems(path->nodes[0]); 4412 if (nritems == 0) 4413 return 1; 4414 4415 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4416 again: 4417 level = 1; 4418 next = NULL; 4419 btrfs_release_path(path); 4420 4421 path->keep_locks = 1; 4422 4423 if (time_seq) 4424 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4425 else 4426 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4427 path->keep_locks = 0; 4428 4429 if (ret < 0) 4430 return ret; 4431 4432 nritems = btrfs_header_nritems(path->nodes[0]); 4433 /* 4434 * by releasing the path above we dropped all our locks. A balance 4435 * could have added more items next to the key that used to be 4436 * at the very end of the block. So, check again here and 4437 * advance the path if there are now more items available. 4438 */ 4439 if (nritems > 0 && path->slots[0] < nritems - 1) { 4440 if (ret == 0) 4441 path->slots[0]++; 4442 ret = 0; 4443 goto done; 4444 } 4445 /* 4446 * So the above check misses one case: 4447 * - after releasing the path above, someone has removed the item that 4448 * used to be at the very end of the block, and balance between leafs 4449 * gets another one with bigger key.offset to replace it. 4450 * 4451 * This one should be returned as well, or we can get leaf corruption 4452 * later(esp. in __btrfs_drop_extents()). 4453 * 4454 * And a bit more explanation about this check, 4455 * with ret > 0, the key isn't found, the path points to the slot 4456 * where it should be inserted, so the path->slots[0] item must be the 4457 * bigger one. 4458 */ 4459 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4460 ret = 0; 4461 goto done; 4462 } 4463 4464 while (level < BTRFS_MAX_LEVEL) { 4465 if (!path->nodes[level]) { 4466 ret = 1; 4467 goto done; 4468 } 4469 4470 slot = path->slots[level] + 1; 4471 c = path->nodes[level]; 4472 if (slot >= btrfs_header_nritems(c)) { 4473 level++; 4474 if (level == BTRFS_MAX_LEVEL) { 4475 ret = 1; 4476 goto done; 4477 } 4478 continue; 4479 } 4480 4481 4482 /* 4483 * Our current level is where we're going to start from, and to 4484 * make sure lockdep doesn't complain we need to drop our locks 4485 * and nodes from 0 to our current level. 4486 */ 4487 for (i = 0; i < level; i++) { 4488 if (path->locks[level]) { 4489 btrfs_tree_read_unlock(path->nodes[i]); 4490 path->locks[i] = 0; 4491 } 4492 free_extent_buffer(path->nodes[i]); 4493 path->nodes[i] = NULL; 4494 } 4495 4496 next = c; 4497 ret = read_block_for_search(root, path, &next, level, 4498 slot, &key); 4499 if (ret == -EAGAIN) 4500 goto again; 4501 4502 if (ret < 0) { 4503 btrfs_release_path(path); 4504 goto done; 4505 } 4506 4507 if (!path->skip_locking) { 4508 ret = btrfs_try_tree_read_lock(next); 4509 if (!ret && time_seq) { 4510 /* 4511 * If we don't get the lock, we may be racing 4512 * with push_leaf_left, holding that lock while 4513 * itself waiting for the leaf we've currently 4514 * locked. To solve this situation, we give up 4515 * on our lock and cycle. 4516 */ 4517 free_extent_buffer(next); 4518 btrfs_release_path(path); 4519 cond_resched(); 4520 goto again; 4521 } 4522 if (!ret) 4523 btrfs_tree_read_lock(next); 4524 } 4525 break; 4526 } 4527 path->slots[level] = slot; 4528 while (1) { 4529 level--; 4530 path->nodes[level] = next; 4531 path->slots[level] = 0; 4532 if (!path->skip_locking) 4533 path->locks[level] = BTRFS_READ_LOCK; 4534 if (!level) 4535 break; 4536 4537 ret = read_block_for_search(root, path, &next, level, 4538 0, &key); 4539 if (ret == -EAGAIN) 4540 goto again; 4541 4542 if (ret < 0) { 4543 btrfs_release_path(path); 4544 goto done; 4545 } 4546 4547 if (!path->skip_locking) 4548 btrfs_tree_read_lock(next); 4549 } 4550 ret = 0; 4551 done: 4552 unlock_up(path, 0, 1, 0, NULL); 4553 4554 return ret; 4555 } 4556 4557 /* 4558 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4559 * searching until it gets past min_objectid or finds an item of 'type' 4560 * 4561 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4562 */ 4563 int btrfs_previous_item(struct btrfs_root *root, 4564 struct btrfs_path *path, u64 min_objectid, 4565 int type) 4566 { 4567 struct btrfs_key found_key; 4568 struct extent_buffer *leaf; 4569 u32 nritems; 4570 int ret; 4571 4572 while (1) { 4573 if (path->slots[0] == 0) { 4574 ret = btrfs_prev_leaf(root, path); 4575 if (ret != 0) 4576 return ret; 4577 } else { 4578 path->slots[0]--; 4579 } 4580 leaf = path->nodes[0]; 4581 nritems = btrfs_header_nritems(leaf); 4582 if (nritems == 0) 4583 return 1; 4584 if (path->slots[0] == nritems) 4585 path->slots[0]--; 4586 4587 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4588 if (found_key.objectid < min_objectid) 4589 break; 4590 if (found_key.type == type) 4591 return 0; 4592 if (found_key.objectid == min_objectid && 4593 found_key.type < type) 4594 break; 4595 } 4596 return 1; 4597 } 4598 4599 /* 4600 * search in extent tree to find a previous Metadata/Data extent item with 4601 * min objecitd. 4602 * 4603 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4604 */ 4605 int btrfs_previous_extent_item(struct btrfs_root *root, 4606 struct btrfs_path *path, u64 min_objectid) 4607 { 4608 struct btrfs_key found_key; 4609 struct extent_buffer *leaf; 4610 u32 nritems; 4611 int ret; 4612 4613 while (1) { 4614 if (path->slots[0] == 0) { 4615 ret = btrfs_prev_leaf(root, path); 4616 if (ret != 0) 4617 return ret; 4618 } else { 4619 path->slots[0]--; 4620 } 4621 leaf = path->nodes[0]; 4622 nritems = btrfs_header_nritems(leaf); 4623 if (nritems == 0) 4624 return 1; 4625 if (path->slots[0] == nritems) 4626 path->slots[0]--; 4627 4628 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4629 if (found_key.objectid < min_objectid) 4630 break; 4631 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 4632 found_key.type == BTRFS_METADATA_ITEM_KEY) 4633 return 0; 4634 if (found_key.objectid == min_objectid && 4635 found_key.type < BTRFS_EXTENT_ITEM_KEY) 4636 break; 4637 } 4638 return 1; 4639 } 4640