1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int errno) 234 { 235 switch (errno) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 320 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 321 trans->transid != fs_info->running_transaction->transid); 322 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 323 trans->transid != root->last_trans); 324 325 level = btrfs_header_level(buf); 326 if (level == 0) 327 btrfs_item_key(buf, &disk_key, 0); 328 else 329 btrfs_node_key(buf, &disk_key, 0); 330 331 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 332 &disk_key, level, buf->start, 0, 333 BTRFS_NESTING_NEW_ROOT); 334 if (IS_ERR(cow)) 335 return PTR_ERR(cow); 336 337 copy_extent_buffer_full(cow, buf); 338 btrfs_set_header_bytenr(cow, cow->start); 339 btrfs_set_header_generation(cow, trans->transid); 340 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 341 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 342 BTRFS_HEADER_FLAG_RELOC); 343 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 344 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 345 else 346 btrfs_set_header_owner(cow, new_root_objectid); 347 348 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 349 350 WARN_ON(btrfs_header_generation(buf) > trans->transid); 351 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 352 ret = btrfs_inc_ref(trans, root, cow, 1); 353 else 354 ret = btrfs_inc_ref(trans, root, cow, 0); 355 if (ret) { 356 btrfs_tree_unlock(cow); 357 free_extent_buffer(cow); 358 btrfs_abort_transaction(trans, ret); 359 return ret; 360 } 361 362 btrfs_mark_buffer_dirty(cow); 363 *cow_ret = cow; 364 return 0; 365 } 366 367 /* 368 * check if the tree block can be shared by multiple trees 369 */ 370 int btrfs_block_can_be_shared(struct btrfs_root *root, 371 struct extent_buffer *buf) 372 { 373 /* 374 * Tree blocks not in shareable trees and tree roots are never shared. 375 * If a block was allocated after the last snapshot and the block was 376 * not allocated by tree relocation, we know the block is not shared. 377 */ 378 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 379 buf != root->node && buf != root->commit_root && 380 (btrfs_header_generation(buf) <= 381 btrfs_root_last_snapshot(&root->root_item) || 382 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 383 return 1; 384 385 return 0; 386 } 387 388 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 389 struct btrfs_root *root, 390 struct extent_buffer *buf, 391 struct extent_buffer *cow, 392 int *last_ref) 393 { 394 struct btrfs_fs_info *fs_info = root->fs_info; 395 u64 refs; 396 u64 owner; 397 u64 flags; 398 u64 new_flags = 0; 399 int ret; 400 401 /* 402 * Backrefs update rules: 403 * 404 * Always use full backrefs for extent pointers in tree block 405 * allocated by tree relocation. 406 * 407 * If a shared tree block is no longer referenced by its owner 408 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 409 * use full backrefs for extent pointers in tree block. 410 * 411 * If a tree block is been relocating 412 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 413 * use full backrefs for extent pointers in tree block. 414 * The reason for this is some operations (such as drop tree) 415 * are only allowed for blocks use full backrefs. 416 */ 417 418 if (btrfs_block_can_be_shared(root, buf)) { 419 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 420 btrfs_header_level(buf), 1, 421 &refs, &flags); 422 if (ret) 423 return ret; 424 if (unlikely(refs == 0)) { 425 btrfs_crit(fs_info, 426 "found 0 references for tree block at bytenr %llu level %d root %llu", 427 buf->start, btrfs_header_level(buf), 428 btrfs_root_id(root)); 429 ret = -EUCLEAN; 430 btrfs_abort_transaction(trans, ret); 431 return ret; 432 } 433 } else { 434 refs = 1; 435 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 436 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 437 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 438 else 439 flags = 0; 440 } 441 442 owner = btrfs_header_owner(buf); 443 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 444 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 445 446 if (refs > 1) { 447 if ((owner == root->root_key.objectid || 448 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 449 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 450 ret = btrfs_inc_ref(trans, root, buf, 1); 451 if (ret) 452 return ret; 453 454 if (root->root_key.objectid == 455 BTRFS_TREE_RELOC_OBJECTID) { 456 ret = btrfs_dec_ref(trans, root, buf, 0); 457 if (ret) 458 return ret; 459 ret = btrfs_inc_ref(trans, root, cow, 1); 460 if (ret) 461 return ret; 462 } 463 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 464 } else { 465 466 if (root->root_key.objectid == 467 BTRFS_TREE_RELOC_OBJECTID) 468 ret = btrfs_inc_ref(trans, root, cow, 1); 469 else 470 ret = btrfs_inc_ref(trans, root, cow, 0); 471 if (ret) 472 return ret; 473 } 474 if (new_flags != 0) { 475 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags); 476 if (ret) 477 return ret; 478 } 479 } else { 480 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 481 if (root->root_key.objectid == 482 BTRFS_TREE_RELOC_OBJECTID) 483 ret = btrfs_inc_ref(trans, root, cow, 1); 484 else 485 ret = btrfs_inc_ref(trans, root, cow, 0); 486 if (ret) 487 return ret; 488 ret = btrfs_dec_ref(trans, root, buf, 1); 489 if (ret) 490 return ret; 491 } 492 btrfs_clear_buffer_dirty(trans, buf); 493 *last_ref = 1; 494 } 495 return 0; 496 } 497 498 /* 499 * does the dirty work in cow of a single block. The parent block (if 500 * supplied) is updated to point to the new cow copy. The new buffer is marked 501 * dirty and returned locked. If you modify the block it needs to be marked 502 * dirty again. 503 * 504 * search_start -- an allocation hint for the new block 505 * 506 * empty_size -- a hint that you plan on doing more cow. This is the size in 507 * bytes the allocator should try to find free next to the block it returns. 508 * This is just a hint and may be ignored by the allocator. 509 */ 510 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 511 struct btrfs_root *root, 512 struct extent_buffer *buf, 513 struct extent_buffer *parent, int parent_slot, 514 struct extent_buffer **cow_ret, 515 u64 search_start, u64 empty_size, 516 enum btrfs_lock_nesting nest) 517 { 518 struct btrfs_fs_info *fs_info = root->fs_info; 519 struct btrfs_disk_key disk_key; 520 struct extent_buffer *cow; 521 int level, ret; 522 int last_ref = 0; 523 int unlock_orig = 0; 524 u64 parent_start = 0; 525 526 if (*cow_ret == buf) 527 unlock_orig = 1; 528 529 btrfs_assert_tree_write_locked(buf); 530 531 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 532 trans->transid != fs_info->running_transaction->transid); 533 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 534 trans->transid != root->last_trans); 535 536 level = btrfs_header_level(buf); 537 538 if (level == 0) 539 btrfs_item_key(buf, &disk_key, 0); 540 else 541 btrfs_node_key(buf, &disk_key, 0); 542 543 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 544 parent_start = parent->start; 545 546 cow = btrfs_alloc_tree_block(trans, root, parent_start, 547 root->root_key.objectid, &disk_key, level, 548 search_start, empty_size, nest); 549 if (IS_ERR(cow)) 550 return PTR_ERR(cow); 551 552 /* cow is set to blocking by btrfs_init_new_buffer */ 553 554 copy_extent_buffer_full(cow, buf); 555 btrfs_set_header_bytenr(cow, cow->start); 556 btrfs_set_header_generation(cow, trans->transid); 557 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 558 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 559 BTRFS_HEADER_FLAG_RELOC); 560 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 561 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 562 else 563 btrfs_set_header_owner(cow, root->root_key.objectid); 564 565 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 566 567 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 568 if (ret) { 569 btrfs_tree_unlock(cow); 570 free_extent_buffer(cow); 571 btrfs_abort_transaction(trans, ret); 572 return ret; 573 } 574 575 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 576 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 577 if (ret) { 578 btrfs_tree_unlock(cow); 579 free_extent_buffer(cow); 580 btrfs_abort_transaction(trans, ret); 581 return ret; 582 } 583 } 584 585 if (buf == root->node) { 586 WARN_ON(parent && parent != buf); 587 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 588 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 589 parent_start = buf->start; 590 591 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 592 if (ret < 0) { 593 btrfs_tree_unlock(cow); 594 free_extent_buffer(cow); 595 btrfs_abort_transaction(trans, ret); 596 return ret; 597 } 598 atomic_inc(&cow->refs); 599 rcu_assign_pointer(root->node, cow); 600 601 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 602 parent_start, last_ref); 603 free_extent_buffer(buf); 604 add_root_to_dirty_list(root); 605 } else { 606 WARN_ON(trans->transid != btrfs_header_generation(parent)); 607 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 608 BTRFS_MOD_LOG_KEY_REPLACE); 609 if (ret) { 610 btrfs_tree_unlock(cow); 611 free_extent_buffer(cow); 612 btrfs_abort_transaction(trans, ret); 613 return ret; 614 } 615 btrfs_set_node_blockptr(parent, parent_slot, 616 cow->start); 617 btrfs_set_node_ptr_generation(parent, parent_slot, 618 trans->transid); 619 btrfs_mark_buffer_dirty(parent); 620 if (last_ref) { 621 ret = btrfs_tree_mod_log_free_eb(buf); 622 if (ret) { 623 btrfs_tree_unlock(cow); 624 free_extent_buffer(cow); 625 btrfs_abort_transaction(trans, ret); 626 return ret; 627 } 628 } 629 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 630 parent_start, last_ref); 631 } 632 if (unlock_orig) 633 btrfs_tree_unlock(buf); 634 free_extent_buffer_stale(buf); 635 btrfs_mark_buffer_dirty(cow); 636 *cow_ret = cow; 637 return 0; 638 } 639 640 static inline int should_cow_block(struct btrfs_trans_handle *trans, 641 struct btrfs_root *root, 642 struct extent_buffer *buf) 643 { 644 if (btrfs_is_testing(root->fs_info)) 645 return 0; 646 647 /* Ensure we can see the FORCE_COW bit */ 648 smp_mb__before_atomic(); 649 650 /* 651 * We do not need to cow a block if 652 * 1) this block is not created or changed in this transaction; 653 * 2) this block does not belong to TREE_RELOC tree; 654 * 3) the root is not forced COW. 655 * 656 * What is forced COW: 657 * when we create snapshot during committing the transaction, 658 * after we've finished copying src root, we must COW the shared 659 * block to ensure the metadata consistency. 660 */ 661 if (btrfs_header_generation(buf) == trans->transid && 662 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 663 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 664 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 665 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 666 return 0; 667 return 1; 668 } 669 670 /* 671 * cows a single block, see __btrfs_cow_block for the real work. 672 * This version of it has extra checks so that a block isn't COWed more than 673 * once per transaction, as long as it hasn't been written yet 674 */ 675 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 676 struct btrfs_root *root, struct extent_buffer *buf, 677 struct extent_buffer *parent, int parent_slot, 678 struct extent_buffer **cow_ret, 679 enum btrfs_lock_nesting nest) 680 { 681 struct btrfs_fs_info *fs_info = root->fs_info; 682 u64 search_start; 683 int ret; 684 685 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 686 btrfs_err(fs_info, 687 "COW'ing blocks on a fs root that's being dropped"); 688 689 if (trans->transaction != fs_info->running_transaction) 690 WARN(1, KERN_CRIT "trans %llu running %llu\n", 691 trans->transid, 692 fs_info->running_transaction->transid); 693 694 if (trans->transid != fs_info->generation) 695 WARN(1, KERN_CRIT "trans %llu running %llu\n", 696 trans->transid, fs_info->generation); 697 698 if (!should_cow_block(trans, root, buf)) { 699 *cow_ret = buf; 700 return 0; 701 } 702 703 search_start = buf->start & ~((u64)SZ_1G - 1); 704 705 /* 706 * Before CoWing this block for later modification, check if it's 707 * the subtree root and do the delayed subtree trace if needed. 708 * 709 * Also We don't care about the error, as it's handled internally. 710 */ 711 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 712 ret = __btrfs_cow_block(trans, root, buf, parent, 713 parent_slot, cow_ret, search_start, 0, nest); 714 715 trace_btrfs_cow_block(root, buf, *cow_ret); 716 717 return ret; 718 } 719 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 720 721 /* 722 * helper function for defrag to decide if two blocks pointed to by a 723 * node are actually close by 724 */ 725 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 726 { 727 if (blocknr < other && other - (blocknr + blocksize) < 32768) 728 return 1; 729 if (blocknr > other && blocknr - (other + blocksize) < 32768) 730 return 1; 731 return 0; 732 } 733 734 #ifdef __LITTLE_ENDIAN 735 736 /* 737 * Compare two keys, on little-endian the disk order is same as CPU order and 738 * we can avoid the conversion. 739 */ 740 static int comp_keys(const struct btrfs_disk_key *disk_key, 741 const struct btrfs_key *k2) 742 { 743 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 744 745 return btrfs_comp_cpu_keys(k1, k2); 746 } 747 748 #else 749 750 /* 751 * compare two keys in a memcmp fashion 752 */ 753 static int comp_keys(const struct btrfs_disk_key *disk, 754 const struct btrfs_key *k2) 755 { 756 struct btrfs_key k1; 757 758 btrfs_disk_key_to_cpu(&k1, disk); 759 760 return btrfs_comp_cpu_keys(&k1, k2); 761 } 762 #endif 763 764 /* 765 * same as comp_keys only with two btrfs_key's 766 */ 767 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 768 { 769 if (k1->objectid > k2->objectid) 770 return 1; 771 if (k1->objectid < k2->objectid) 772 return -1; 773 if (k1->type > k2->type) 774 return 1; 775 if (k1->type < k2->type) 776 return -1; 777 if (k1->offset > k2->offset) 778 return 1; 779 if (k1->offset < k2->offset) 780 return -1; 781 return 0; 782 } 783 784 /* 785 * this is used by the defrag code to go through all the 786 * leaves pointed to by a node and reallocate them so that 787 * disk order is close to key order 788 */ 789 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 790 struct btrfs_root *root, struct extent_buffer *parent, 791 int start_slot, u64 *last_ret, 792 struct btrfs_key *progress) 793 { 794 struct btrfs_fs_info *fs_info = root->fs_info; 795 struct extent_buffer *cur; 796 u64 blocknr; 797 u64 search_start = *last_ret; 798 u64 last_block = 0; 799 u64 other; 800 u32 parent_nritems; 801 int end_slot; 802 int i; 803 int err = 0; 804 u32 blocksize; 805 int progress_passed = 0; 806 struct btrfs_disk_key disk_key; 807 808 WARN_ON(trans->transaction != fs_info->running_transaction); 809 WARN_ON(trans->transid != fs_info->generation); 810 811 parent_nritems = btrfs_header_nritems(parent); 812 blocksize = fs_info->nodesize; 813 end_slot = parent_nritems - 1; 814 815 if (parent_nritems <= 1) 816 return 0; 817 818 for (i = start_slot; i <= end_slot; i++) { 819 int close = 1; 820 821 btrfs_node_key(parent, &disk_key, i); 822 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 823 continue; 824 825 progress_passed = 1; 826 blocknr = btrfs_node_blockptr(parent, i); 827 if (last_block == 0) 828 last_block = blocknr; 829 830 if (i > 0) { 831 other = btrfs_node_blockptr(parent, i - 1); 832 close = close_blocks(blocknr, other, blocksize); 833 } 834 if (!close && i < end_slot) { 835 other = btrfs_node_blockptr(parent, i + 1); 836 close = close_blocks(blocknr, other, blocksize); 837 } 838 if (close) { 839 last_block = blocknr; 840 continue; 841 } 842 843 cur = btrfs_read_node_slot(parent, i); 844 if (IS_ERR(cur)) 845 return PTR_ERR(cur); 846 if (search_start == 0) 847 search_start = last_block; 848 849 btrfs_tree_lock(cur); 850 err = __btrfs_cow_block(trans, root, cur, parent, i, 851 &cur, search_start, 852 min(16 * blocksize, 853 (end_slot - i) * blocksize), 854 BTRFS_NESTING_COW); 855 if (err) { 856 btrfs_tree_unlock(cur); 857 free_extent_buffer(cur); 858 break; 859 } 860 search_start = cur->start; 861 last_block = cur->start; 862 *last_ret = search_start; 863 btrfs_tree_unlock(cur); 864 free_extent_buffer(cur); 865 } 866 return err; 867 } 868 869 /* 870 * Search for a key in the given extent_buffer. 871 * 872 * The lower boundary for the search is specified by the slot number @first_slot. 873 * Use a value of 0 to search over the whole extent buffer. Works for both 874 * leaves and nodes. 875 * 876 * The slot in the extent buffer is returned via @slot. If the key exists in the 877 * extent buffer, then @slot will point to the slot where the key is, otherwise 878 * it points to the slot where you would insert the key. 879 * 880 * Slot may point to the total number of items (i.e. one position beyond the last 881 * key) if the key is bigger than the last key in the extent buffer. 882 */ 883 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 884 const struct btrfs_key *key, int *slot) 885 { 886 unsigned long p; 887 int item_size; 888 /* 889 * Use unsigned types for the low and high slots, so that we get a more 890 * efficient division in the search loop below. 891 */ 892 u32 low = first_slot; 893 u32 high = btrfs_header_nritems(eb); 894 int ret; 895 const int key_size = sizeof(struct btrfs_disk_key); 896 897 if (unlikely(low > high)) { 898 btrfs_err(eb->fs_info, 899 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 900 __func__, low, high, eb->start, 901 btrfs_header_owner(eb), btrfs_header_level(eb)); 902 return -EINVAL; 903 } 904 905 if (btrfs_header_level(eb) == 0) { 906 p = offsetof(struct btrfs_leaf, items); 907 item_size = sizeof(struct btrfs_item); 908 } else { 909 p = offsetof(struct btrfs_node, ptrs); 910 item_size = sizeof(struct btrfs_key_ptr); 911 } 912 913 while (low < high) { 914 unsigned long oip; 915 unsigned long offset; 916 struct btrfs_disk_key *tmp; 917 struct btrfs_disk_key unaligned; 918 int mid; 919 920 mid = (low + high) / 2; 921 offset = p + mid * item_size; 922 oip = offset_in_page(offset); 923 924 if (oip + key_size <= PAGE_SIZE) { 925 const unsigned long idx = get_eb_page_index(offset); 926 char *kaddr = page_address(eb->pages[idx]); 927 928 oip = get_eb_offset_in_page(eb, offset); 929 tmp = (struct btrfs_disk_key *)(kaddr + oip); 930 } else { 931 read_extent_buffer(eb, &unaligned, offset, key_size); 932 tmp = &unaligned; 933 } 934 935 ret = comp_keys(tmp, key); 936 937 if (ret < 0) 938 low = mid + 1; 939 else if (ret > 0) 940 high = mid; 941 else { 942 *slot = mid; 943 return 0; 944 } 945 } 946 *slot = low; 947 return 1; 948 } 949 950 static void root_add_used(struct btrfs_root *root, u32 size) 951 { 952 spin_lock(&root->accounting_lock); 953 btrfs_set_root_used(&root->root_item, 954 btrfs_root_used(&root->root_item) + size); 955 spin_unlock(&root->accounting_lock); 956 } 957 958 static void root_sub_used(struct btrfs_root *root, u32 size) 959 { 960 spin_lock(&root->accounting_lock); 961 btrfs_set_root_used(&root->root_item, 962 btrfs_root_used(&root->root_item) - size); 963 spin_unlock(&root->accounting_lock); 964 } 965 966 /* given a node and slot number, this reads the blocks it points to. The 967 * extent buffer is returned with a reference taken (but unlocked). 968 */ 969 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 970 int slot) 971 { 972 int level = btrfs_header_level(parent); 973 struct btrfs_tree_parent_check check = { 0 }; 974 struct extent_buffer *eb; 975 976 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 977 return ERR_PTR(-ENOENT); 978 979 ASSERT(level); 980 981 check.level = level - 1; 982 check.transid = btrfs_node_ptr_generation(parent, slot); 983 check.owner_root = btrfs_header_owner(parent); 984 check.has_first_key = true; 985 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 986 987 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 988 &check); 989 if (IS_ERR(eb)) 990 return eb; 991 if (!extent_buffer_uptodate(eb)) { 992 free_extent_buffer(eb); 993 return ERR_PTR(-EIO); 994 } 995 996 return eb; 997 } 998 999 /* 1000 * node level balancing, used to make sure nodes are in proper order for 1001 * item deletion. We balance from the top down, so we have to make sure 1002 * that a deletion won't leave an node completely empty later on. 1003 */ 1004 static noinline int balance_level(struct btrfs_trans_handle *trans, 1005 struct btrfs_root *root, 1006 struct btrfs_path *path, int level) 1007 { 1008 struct btrfs_fs_info *fs_info = root->fs_info; 1009 struct extent_buffer *right = NULL; 1010 struct extent_buffer *mid; 1011 struct extent_buffer *left = NULL; 1012 struct extent_buffer *parent = NULL; 1013 int ret = 0; 1014 int wret; 1015 int pslot; 1016 int orig_slot = path->slots[level]; 1017 u64 orig_ptr; 1018 1019 ASSERT(level > 0); 1020 1021 mid = path->nodes[level]; 1022 1023 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1024 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1025 1026 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1027 1028 if (level < BTRFS_MAX_LEVEL - 1) { 1029 parent = path->nodes[level + 1]; 1030 pslot = path->slots[level + 1]; 1031 } 1032 1033 /* 1034 * deal with the case where there is only one pointer in the root 1035 * by promoting the node below to a root 1036 */ 1037 if (!parent) { 1038 struct extent_buffer *child; 1039 1040 if (btrfs_header_nritems(mid) != 1) 1041 return 0; 1042 1043 /* promote the child to a root */ 1044 child = btrfs_read_node_slot(mid, 0); 1045 if (IS_ERR(child)) { 1046 ret = PTR_ERR(child); 1047 goto out; 1048 } 1049 1050 btrfs_tree_lock(child); 1051 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1052 BTRFS_NESTING_COW); 1053 if (ret) { 1054 btrfs_tree_unlock(child); 1055 free_extent_buffer(child); 1056 goto out; 1057 } 1058 1059 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 1060 if (ret < 0) { 1061 btrfs_tree_unlock(child); 1062 free_extent_buffer(child); 1063 btrfs_abort_transaction(trans, ret); 1064 goto out; 1065 } 1066 rcu_assign_pointer(root->node, child); 1067 1068 add_root_to_dirty_list(root); 1069 btrfs_tree_unlock(child); 1070 1071 path->locks[level] = 0; 1072 path->nodes[level] = NULL; 1073 btrfs_clear_buffer_dirty(trans, mid); 1074 btrfs_tree_unlock(mid); 1075 /* once for the path */ 1076 free_extent_buffer(mid); 1077 1078 root_sub_used(root, mid->len); 1079 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1080 /* once for the root ptr */ 1081 free_extent_buffer_stale(mid); 1082 return 0; 1083 } 1084 if (btrfs_header_nritems(mid) > 1085 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1086 return 0; 1087 1088 if (pslot) { 1089 left = btrfs_read_node_slot(parent, pslot - 1); 1090 if (IS_ERR(left)) { 1091 ret = PTR_ERR(left); 1092 left = NULL; 1093 goto out; 1094 } 1095 1096 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1097 wret = btrfs_cow_block(trans, root, left, 1098 parent, pslot - 1, &left, 1099 BTRFS_NESTING_LEFT_COW); 1100 if (wret) { 1101 ret = wret; 1102 goto out; 1103 } 1104 } 1105 1106 if (pslot + 1 < btrfs_header_nritems(parent)) { 1107 right = btrfs_read_node_slot(parent, pslot + 1); 1108 if (IS_ERR(right)) { 1109 ret = PTR_ERR(right); 1110 right = NULL; 1111 goto out; 1112 } 1113 1114 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1115 wret = btrfs_cow_block(trans, root, right, 1116 parent, pslot + 1, &right, 1117 BTRFS_NESTING_RIGHT_COW); 1118 if (wret) { 1119 ret = wret; 1120 goto out; 1121 } 1122 } 1123 1124 /* first, try to make some room in the middle buffer */ 1125 if (left) { 1126 orig_slot += btrfs_header_nritems(left); 1127 wret = push_node_left(trans, left, mid, 1); 1128 if (wret < 0) 1129 ret = wret; 1130 } 1131 1132 /* 1133 * then try to empty the right most buffer into the middle 1134 */ 1135 if (right) { 1136 wret = push_node_left(trans, mid, right, 1); 1137 if (wret < 0 && wret != -ENOSPC) 1138 ret = wret; 1139 if (btrfs_header_nritems(right) == 0) { 1140 btrfs_clear_buffer_dirty(trans, right); 1141 btrfs_tree_unlock(right); 1142 btrfs_del_ptr(root, path, level + 1, pslot + 1); 1143 root_sub_used(root, right->len); 1144 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1145 0, 1); 1146 free_extent_buffer_stale(right); 1147 right = NULL; 1148 } else { 1149 struct btrfs_disk_key right_key; 1150 btrfs_node_key(right, &right_key, 0); 1151 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1152 BTRFS_MOD_LOG_KEY_REPLACE); 1153 if (ret < 0) { 1154 btrfs_abort_transaction(trans, ret); 1155 goto out; 1156 } 1157 btrfs_set_node_key(parent, &right_key, pslot + 1); 1158 btrfs_mark_buffer_dirty(parent); 1159 } 1160 } 1161 if (btrfs_header_nritems(mid) == 1) { 1162 /* 1163 * we're not allowed to leave a node with one item in the 1164 * tree during a delete. A deletion from lower in the tree 1165 * could try to delete the only pointer in this node. 1166 * So, pull some keys from the left. 1167 * There has to be a left pointer at this point because 1168 * otherwise we would have pulled some pointers from the 1169 * right 1170 */ 1171 if (unlikely(!left)) { 1172 btrfs_crit(fs_info, 1173 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu", 1174 parent->start, btrfs_header_level(parent), 1175 mid->start, btrfs_root_id(root)); 1176 ret = -EUCLEAN; 1177 btrfs_abort_transaction(trans, ret); 1178 goto out; 1179 } 1180 wret = balance_node_right(trans, mid, left); 1181 if (wret < 0) { 1182 ret = wret; 1183 goto out; 1184 } 1185 if (wret == 1) { 1186 wret = push_node_left(trans, left, mid, 1); 1187 if (wret < 0) 1188 ret = wret; 1189 } 1190 BUG_ON(wret == 1); 1191 } 1192 if (btrfs_header_nritems(mid) == 0) { 1193 btrfs_clear_buffer_dirty(trans, mid); 1194 btrfs_tree_unlock(mid); 1195 btrfs_del_ptr(root, path, level + 1, pslot); 1196 root_sub_used(root, mid->len); 1197 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1198 free_extent_buffer_stale(mid); 1199 mid = NULL; 1200 } else { 1201 /* update the parent key to reflect our changes */ 1202 struct btrfs_disk_key mid_key; 1203 btrfs_node_key(mid, &mid_key, 0); 1204 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1205 BTRFS_MOD_LOG_KEY_REPLACE); 1206 if (ret < 0) { 1207 btrfs_abort_transaction(trans, ret); 1208 goto out; 1209 } 1210 btrfs_set_node_key(parent, &mid_key, pslot); 1211 btrfs_mark_buffer_dirty(parent); 1212 } 1213 1214 /* update the path */ 1215 if (left) { 1216 if (btrfs_header_nritems(left) > orig_slot) { 1217 atomic_inc(&left->refs); 1218 /* left was locked after cow */ 1219 path->nodes[level] = left; 1220 path->slots[level + 1] -= 1; 1221 path->slots[level] = orig_slot; 1222 if (mid) { 1223 btrfs_tree_unlock(mid); 1224 free_extent_buffer(mid); 1225 } 1226 } else { 1227 orig_slot -= btrfs_header_nritems(left); 1228 path->slots[level] = orig_slot; 1229 } 1230 } 1231 /* double check we haven't messed things up */ 1232 if (orig_ptr != 1233 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1234 BUG(); 1235 out: 1236 if (right) { 1237 btrfs_tree_unlock(right); 1238 free_extent_buffer(right); 1239 } 1240 if (left) { 1241 if (path->nodes[level] != left) 1242 btrfs_tree_unlock(left); 1243 free_extent_buffer(left); 1244 } 1245 return ret; 1246 } 1247 1248 /* Node balancing for insertion. Here we only split or push nodes around 1249 * when they are completely full. This is also done top down, so we 1250 * have to be pessimistic. 1251 */ 1252 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1253 struct btrfs_root *root, 1254 struct btrfs_path *path, int level) 1255 { 1256 struct btrfs_fs_info *fs_info = root->fs_info; 1257 struct extent_buffer *right = NULL; 1258 struct extent_buffer *mid; 1259 struct extent_buffer *left = NULL; 1260 struct extent_buffer *parent = NULL; 1261 int ret = 0; 1262 int wret; 1263 int pslot; 1264 int orig_slot = path->slots[level]; 1265 1266 if (level == 0) 1267 return 1; 1268 1269 mid = path->nodes[level]; 1270 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1271 1272 if (level < BTRFS_MAX_LEVEL - 1) { 1273 parent = path->nodes[level + 1]; 1274 pslot = path->slots[level + 1]; 1275 } 1276 1277 if (!parent) 1278 return 1; 1279 1280 /* first, try to make some room in the middle buffer */ 1281 if (pslot) { 1282 u32 left_nr; 1283 1284 left = btrfs_read_node_slot(parent, pslot - 1); 1285 if (IS_ERR(left)) 1286 return PTR_ERR(left); 1287 1288 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1289 1290 left_nr = btrfs_header_nritems(left); 1291 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1292 wret = 1; 1293 } else { 1294 ret = btrfs_cow_block(trans, root, left, parent, 1295 pslot - 1, &left, 1296 BTRFS_NESTING_LEFT_COW); 1297 if (ret) 1298 wret = 1; 1299 else { 1300 wret = push_node_left(trans, left, mid, 0); 1301 } 1302 } 1303 if (wret < 0) 1304 ret = wret; 1305 if (wret == 0) { 1306 struct btrfs_disk_key disk_key; 1307 orig_slot += left_nr; 1308 btrfs_node_key(mid, &disk_key, 0); 1309 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1310 BTRFS_MOD_LOG_KEY_REPLACE); 1311 if (ret < 0) { 1312 btrfs_tree_unlock(left); 1313 free_extent_buffer(left); 1314 btrfs_abort_transaction(trans, ret); 1315 return ret; 1316 } 1317 btrfs_set_node_key(parent, &disk_key, pslot); 1318 btrfs_mark_buffer_dirty(parent); 1319 if (btrfs_header_nritems(left) > orig_slot) { 1320 path->nodes[level] = left; 1321 path->slots[level + 1] -= 1; 1322 path->slots[level] = orig_slot; 1323 btrfs_tree_unlock(mid); 1324 free_extent_buffer(mid); 1325 } else { 1326 orig_slot -= 1327 btrfs_header_nritems(left); 1328 path->slots[level] = orig_slot; 1329 btrfs_tree_unlock(left); 1330 free_extent_buffer(left); 1331 } 1332 return 0; 1333 } 1334 btrfs_tree_unlock(left); 1335 free_extent_buffer(left); 1336 } 1337 1338 /* 1339 * then try to empty the right most buffer into the middle 1340 */ 1341 if (pslot + 1 < btrfs_header_nritems(parent)) { 1342 u32 right_nr; 1343 1344 right = btrfs_read_node_slot(parent, pslot + 1); 1345 if (IS_ERR(right)) 1346 return PTR_ERR(right); 1347 1348 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1349 1350 right_nr = btrfs_header_nritems(right); 1351 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1352 wret = 1; 1353 } else { 1354 ret = btrfs_cow_block(trans, root, right, 1355 parent, pslot + 1, 1356 &right, BTRFS_NESTING_RIGHT_COW); 1357 if (ret) 1358 wret = 1; 1359 else { 1360 wret = balance_node_right(trans, right, mid); 1361 } 1362 } 1363 if (wret < 0) 1364 ret = wret; 1365 if (wret == 0) { 1366 struct btrfs_disk_key disk_key; 1367 1368 btrfs_node_key(right, &disk_key, 0); 1369 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1370 BTRFS_MOD_LOG_KEY_REPLACE); 1371 if (ret < 0) { 1372 btrfs_tree_unlock(right); 1373 free_extent_buffer(right); 1374 btrfs_abort_transaction(trans, ret); 1375 return ret; 1376 } 1377 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1378 btrfs_mark_buffer_dirty(parent); 1379 1380 if (btrfs_header_nritems(mid) <= orig_slot) { 1381 path->nodes[level] = right; 1382 path->slots[level + 1] += 1; 1383 path->slots[level] = orig_slot - 1384 btrfs_header_nritems(mid); 1385 btrfs_tree_unlock(mid); 1386 free_extent_buffer(mid); 1387 } else { 1388 btrfs_tree_unlock(right); 1389 free_extent_buffer(right); 1390 } 1391 return 0; 1392 } 1393 btrfs_tree_unlock(right); 1394 free_extent_buffer(right); 1395 } 1396 return 1; 1397 } 1398 1399 /* 1400 * readahead one full node of leaves, finding things that are close 1401 * to the block in 'slot', and triggering ra on them. 1402 */ 1403 static void reada_for_search(struct btrfs_fs_info *fs_info, 1404 struct btrfs_path *path, 1405 int level, int slot, u64 objectid) 1406 { 1407 struct extent_buffer *node; 1408 struct btrfs_disk_key disk_key; 1409 u32 nritems; 1410 u64 search; 1411 u64 target; 1412 u64 nread = 0; 1413 u64 nread_max; 1414 u32 nr; 1415 u32 blocksize; 1416 u32 nscan = 0; 1417 1418 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1419 return; 1420 1421 if (!path->nodes[level]) 1422 return; 1423 1424 node = path->nodes[level]; 1425 1426 /* 1427 * Since the time between visiting leaves is much shorter than the time 1428 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1429 * much IO at once (possibly random). 1430 */ 1431 if (path->reada == READA_FORWARD_ALWAYS) { 1432 if (level > 1) 1433 nread_max = node->fs_info->nodesize; 1434 else 1435 nread_max = SZ_128K; 1436 } else { 1437 nread_max = SZ_64K; 1438 } 1439 1440 search = btrfs_node_blockptr(node, slot); 1441 blocksize = fs_info->nodesize; 1442 if (path->reada != READA_FORWARD_ALWAYS) { 1443 struct extent_buffer *eb; 1444 1445 eb = find_extent_buffer(fs_info, search); 1446 if (eb) { 1447 free_extent_buffer(eb); 1448 return; 1449 } 1450 } 1451 1452 target = search; 1453 1454 nritems = btrfs_header_nritems(node); 1455 nr = slot; 1456 1457 while (1) { 1458 if (path->reada == READA_BACK) { 1459 if (nr == 0) 1460 break; 1461 nr--; 1462 } else if (path->reada == READA_FORWARD || 1463 path->reada == READA_FORWARD_ALWAYS) { 1464 nr++; 1465 if (nr >= nritems) 1466 break; 1467 } 1468 if (path->reada == READA_BACK && objectid) { 1469 btrfs_node_key(node, &disk_key, nr); 1470 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1471 break; 1472 } 1473 search = btrfs_node_blockptr(node, nr); 1474 if (path->reada == READA_FORWARD_ALWAYS || 1475 (search <= target && target - search <= 65536) || 1476 (search > target && search - target <= 65536)) { 1477 btrfs_readahead_node_child(node, nr); 1478 nread += blocksize; 1479 } 1480 nscan++; 1481 if (nread > nread_max || nscan > 32) 1482 break; 1483 } 1484 } 1485 1486 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1487 { 1488 struct extent_buffer *parent; 1489 int slot; 1490 int nritems; 1491 1492 parent = path->nodes[level + 1]; 1493 if (!parent) 1494 return; 1495 1496 nritems = btrfs_header_nritems(parent); 1497 slot = path->slots[level + 1]; 1498 1499 if (slot > 0) 1500 btrfs_readahead_node_child(parent, slot - 1); 1501 if (slot + 1 < nritems) 1502 btrfs_readahead_node_child(parent, slot + 1); 1503 } 1504 1505 1506 /* 1507 * when we walk down the tree, it is usually safe to unlock the higher layers 1508 * in the tree. The exceptions are when our path goes through slot 0, because 1509 * operations on the tree might require changing key pointers higher up in the 1510 * tree. 1511 * 1512 * callers might also have set path->keep_locks, which tells this code to keep 1513 * the lock if the path points to the last slot in the block. This is part of 1514 * walking through the tree, and selecting the next slot in the higher block. 1515 * 1516 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1517 * if lowest_unlock is 1, level 0 won't be unlocked 1518 */ 1519 static noinline void unlock_up(struct btrfs_path *path, int level, 1520 int lowest_unlock, int min_write_lock_level, 1521 int *write_lock_level) 1522 { 1523 int i; 1524 int skip_level = level; 1525 bool check_skip = true; 1526 1527 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1528 if (!path->nodes[i]) 1529 break; 1530 if (!path->locks[i]) 1531 break; 1532 1533 if (check_skip) { 1534 if (path->slots[i] == 0) { 1535 skip_level = i + 1; 1536 continue; 1537 } 1538 1539 if (path->keep_locks) { 1540 u32 nritems; 1541 1542 nritems = btrfs_header_nritems(path->nodes[i]); 1543 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1544 skip_level = i + 1; 1545 continue; 1546 } 1547 } 1548 } 1549 1550 if (i >= lowest_unlock && i > skip_level) { 1551 check_skip = false; 1552 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1553 path->locks[i] = 0; 1554 if (write_lock_level && 1555 i > min_write_lock_level && 1556 i <= *write_lock_level) { 1557 *write_lock_level = i - 1; 1558 } 1559 } 1560 } 1561 } 1562 1563 /* 1564 * Helper function for btrfs_search_slot() and other functions that do a search 1565 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1566 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1567 * its pages from disk. 1568 * 1569 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1570 * whole btree search, starting again from the current root node. 1571 */ 1572 static int 1573 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1574 struct extent_buffer **eb_ret, int level, int slot, 1575 const struct btrfs_key *key) 1576 { 1577 struct btrfs_fs_info *fs_info = root->fs_info; 1578 struct btrfs_tree_parent_check check = { 0 }; 1579 u64 blocknr; 1580 u64 gen; 1581 struct extent_buffer *tmp; 1582 int ret; 1583 int parent_level; 1584 bool unlock_up; 1585 1586 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1587 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1588 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1589 parent_level = btrfs_header_level(*eb_ret); 1590 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1591 check.has_first_key = true; 1592 check.level = parent_level - 1; 1593 check.transid = gen; 1594 check.owner_root = root->root_key.objectid; 1595 1596 /* 1597 * If we need to read an extent buffer from disk and we are holding locks 1598 * on upper level nodes, we unlock all the upper nodes before reading the 1599 * extent buffer, and then return -EAGAIN to the caller as it needs to 1600 * restart the search. We don't release the lock on the current level 1601 * because we need to walk this node to figure out which blocks to read. 1602 */ 1603 tmp = find_extent_buffer(fs_info, blocknr); 1604 if (tmp) { 1605 if (p->reada == READA_FORWARD_ALWAYS) 1606 reada_for_search(fs_info, p, level, slot, key->objectid); 1607 1608 /* first we do an atomic uptodate check */ 1609 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1610 /* 1611 * Do extra check for first_key, eb can be stale due to 1612 * being cached, read from scrub, or have multiple 1613 * parents (shared tree blocks). 1614 */ 1615 if (btrfs_verify_level_key(tmp, 1616 parent_level - 1, &check.first_key, gen)) { 1617 free_extent_buffer(tmp); 1618 return -EUCLEAN; 1619 } 1620 *eb_ret = tmp; 1621 return 0; 1622 } 1623 1624 if (p->nowait) { 1625 free_extent_buffer(tmp); 1626 return -EAGAIN; 1627 } 1628 1629 if (unlock_up) 1630 btrfs_unlock_up_safe(p, level + 1); 1631 1632 /* now we're allowed to do a blocking uptodate check */ 1633 ret = btrfs_read_extent_buffer(tmp, &check); 1634 if (ret) { 1635 free_extent_buffer(tmp); 1636 btrfs_release_path(p); 1637 return -EIO; 1638 } 1639 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1640 free_extent_buffer(tmp); 1641 btrfs_release_path(p); 1642 return -EUCLEAN; 1643 } 1644 1645 if (unlock_up) 1646 ret = -EAGAIN; 1647 1648 goto out; 1649 } else if (p->nowait) { 1650 return -EAGAIN; 1651 } 1652 1653 if (unlock_up) { 1654 btrfs_unlock_up_safe(p, level + 1); 1655 ret = -EAGAIN; 1656 } else { 1657 ret = 0; 1658 } 1659 1660 if (p->reada != READA_NONE) 1661 reada_for_search(fs_info, p, level, slot, key->objectid); 1662 1663 tmp = read_tree_block(fs_info, blocknr, &check); 1664 if (IS_ERR(tmp)) { 1665 btrfs_release_path(p); 1666 return PTR_ERR(tmp); 1667 } 1668 /* 1669 * If the read above didn't mark this buffer up to date, 1670 * it will never end up being up to date. Set ret to EIO now 1671 * and give up so that our caller doesn't loop forever 1672 * on our EAGAINs. 1673 */ 1674 if (!extent_buffer_uptodate(tmp)) 1675 ret = -EIO; 1676 1677 out: 1678 if (ret == 0) { 1679 *eb_ret = tmp; 1680 } else { 1681 free_extent_buffer(tmp); 1682 btrfs_release_path(p); 1683 } 1684 1685 return ret; 1686 } 1687 1688 /* 1689 * helper function for btrfs_search_slot. This does all of the checks 1690 * for node-level blocks and does any balancing required based on 1691 * the ins_len. 1692 * 1693 * If no extra work was required, zero is returned. If we had to 1694 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1695 * start over 1696 */ 1697 static int 1698 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1699 struct btrfs_root *root, struct btrfs_path *p, 1700 struct extent_buffer *b, int level, int ins_len, 1701 int *write_lock_level) 1702 { 1703 struct btrfs_fs_info *fs_info = root->fs_info; 1704 int ret = 0; 1705 1706 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1707 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1708 1709 if (*write_lock_level < level + 1) { 1710 *write_lock_level = level + 1; 1711 btrfs_release_path(p); 1712 return -EAGAIN; 1713 } 1714 1715 reada_for_balance(p, level); 1716 ret = split_node(trans, root, p, level); 1717 1718 b = p->nodes[level]; 1719 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1720 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1721 1722 if (*write_lock_level < level + 1) { 1723 *write_lock_level = level + 1; 1724 btrfs_release_path(p); 1725 return -EAGAIN; 1726 } 1727 1728 reada_for_balance(p, level); 1729 ret = balance_level(trans, root, p, level); 1730 if (ret) 1731 return ret; 1732 1733 b = p->nodes[level]; 1734 if (!b) { 1735 btrfs_release_path(p); 1736 return -EAGAIN; 1737 } 1738 BUG_ON(btrfs_header_nritems(b) == 1); 1739 } 1740 return ret; 1741 } 1742 1743 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1744 u64 iobjectid, u64 ioff, u8 key_type, 1745 struct btrfs_key *found_key) 1746 { 1747 int ret; 1748 struct btrfs_key key; 1749 struct extent_buffer *eb; 1750 1751 ASSERT(path); 1752 ASSERT(found_key); 1753 1754 key.type = key_type; 1755 key.objectid = iobjectid; 1756 key.offset = ioff; 1757 1758 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1759 if (ret < 0) 1760 return ret; 1761 1762 eb = path->nodes[0]; 1763 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1764 ret = btrfs_next_leaf(fs_root, path); 1765 if (ret) 1766 return ret; 1767 eb = path->nodes[0]; 1768 } 1769 1770 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1771 if (found_key->type != key.type || 1772 found_key->objectid != key.objectid) 1773 return 1; 1774 1775 return 0; 1776 } 1777 1778 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1779 struct btrfs_path *p, 1780 int write_lock_level) 1781 { 1782 struct extent_buffer *b; 1783 int root_lock = 0; 1784 int level = 0; 1785 1786 if (p->search_commit_root) { 1787 b = root->commit_root; 1788 atomic_inc(&b->refs); 1789 level = btrfs_header_level(b); 1790 /* 1791 * Ensure that all callers have set skip_locking when 1792 * p->search_commit_root = 1. 1793 */ 1794 ASSERT(p->skip_locking == 1); 1795 1796 goto out; 1797 } 1798 1799 if (p->skip_locking) { 1800 b = btrfs_root_node(root); 1801 level = btrfs_header_level(b); 1802 goto out; 1803 } 1804 1805 /* We try very hard to do read locks on the root */ 1806 root_lock = BTRFS_READ_LOCK; 1807 1808 /* 1809 * If the level is set to maximum, we can skip trying to get the read 1810 * lock. 1811 */ 1812 if (write_lock_level < BTRFS_MAX_LEVEL) { 1813 /* 1814 * We don't know the level of the root node until we actually 1815 * have it read locked 1816 */ 1817 if (p->nowait) { 1818 b = btrfs_try_read_lock_root_node(root); 1819 if (IS_ERR(b)) 1820 return b; 1821 } else { 1822 b = btrfs_read_lock_root_node(root); 1823 } 1824 level = btrfs_header_level(b); 1825 if (level > write_lock_level) 1826 goto out; 1827 1828 /* Whoops, must trade for write lock */ 1829 btrfs_tree_read_unlock(b); 1830 free_extent_buffer(b); 1831 } 1832 1833 b = btrfs_lock_root_node(root); 1834 root_lock = BTRFS_WRITE_LOCK; 1835 1836 /* The level might have changed, check again */ 1837 level = btrfs_header_level(b); 1838 1839 out: 1840 /* 1841 * The root may have failed to write out at some point, and thus is no 1842 * longer valid, return an error in this case. 1843 */ 1844 if (!extent_buffer_uptodate(b)) { 1845 if (root_lock) 1846 btrfs_tree_unlock_rw(b, root_lock); 1847 free_extent_buffer(b); 1848 return ERR_PTR(-EIO); 1849 } 1850 1851 p->nodes[level] = b; 1852 if (!p->skip_locking) 1853 p->locks[level] = root_lock; 1854 /* 1855 * Callers are responsible for dropping b's references. 1856 */ 1857 return b; 1858 } 1859 1860 /* 1861 * Replace the extent buffer at the lowest level of the path with a cloned 1862 * version. The purpose is to be able to use it safely, after releasing the 1863 * commit root semaphore, even if relocation is happening in parallel, the 1864 * transaction used for relocation is committed and the extent buffer is 1865 * reallocated in the next transaction. 1866 * 1867 * This is used in a context where the caller does not prevent transaction 1868 * commits from happening, either by holding a transaction handle or holding 1869 * some lock, while it's doing searches through a commit root. 1870 * At the moment it's only used for send operations. 1871 */ 1872 static int finish_need_commit_sem_search(struct btrfs_path *path) 1873 { 1874 const int i = path->lowest_level; 1875 const int slot = path->slots[i]; 1876 struct extent_buffer *lowest = path->nodes[i]; 1877 struct extent_buffer *clone; 1878 1879 ASSERT(path->need_commit_sem); 1880 1881 if (!lowest) 1882 return 0; 1883 1884 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1885 1886 clone = btrfs_clone_extent_buffer(lowest); 1887 if (!clone) 1888 return -ENOMEM; 1889 1890 btrfs_release_path(path); 1891 path->nodes[i] = clone; 1892 path->slots[i] = slot; 1893 1894 return 0; 1895 } 1896 1897 static inline int search_for_key_slot(struct extent_buffer *eb, 1898 int search_low_slot, 1899 const struct btrfs_key *key, 1900 int prev_cmp, 1901 int *slot) 1902 { 1903 /* 1904 * If a previous call to btrfs_bin_search() on a parent node returned an 1905 * exact match (prev_cmp == 0), we can safely assume the target key will 1906 * always be at slot 0 on lower levels, since each key pointer 1907 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1908 * subtree it points to. Thus we can skip searching lower levels. 1909 */ 1910 if (prev_cmp == 0) { 1911 *slot = 0; 1912 return 0; 1913 } 1914 1915 return btrfs_bin_search(eb, search_low_slot, key, slot); 1916 } 1917 1918 static int search_leaf(struct btrfs_trans_handle *trans, 1919 struct btrfs_root *root, 1920 const struct btrfs_key *key, 1921 struct btrfs_path *path, 1922 int ins_len, 1923 int prev_cmp) 1924 { 1925 struct extent_buffer *leaf = path->nodes[0]; 1926 int leaf_free_space = -1; 1927 int search_low_slot = 0; 1928 int ret; 1929 bool do_bin_search = true; 1930 1931 /* 1932 * If we are doing an insertion, the leaf has enough free space and the 1933 * destination slot for the key is not slot 0, then we can unlock our 1934 * write lock on the parent, and any other upper nodes, before doing the 1935 * binary search on the leaf (with search_for_key_slot()), allowing other 1936 * tasks to lock the parent and any other upper nodes. 1937 */ 1938 if (ins_len > 0) { 1939 /* 1940 * Cache the leaf free space, since we will need it later and it 1941 * will not change until then. 1942 */ 1943 leaf_free_space = btrfs_leaf_free_space(leaf); 1944 1945 /* 1946 * !path->locks[1] means we have a single node tree, the leaf is 1947 * the root of the tree. 1948 */ 1949 if (path->locks[1] && leaf_free_space >= ins_len) { 1950 struct btrfs_disk_key first_key; 1951 1952 ASSERT(btrfs_header_nritems(leaf) > 0); 1953 btrfs_item_key(leaf, &first_key, 0); 1954 1955 /* 1956 * Doing the extra comparison with the first key is cheap, 1957 * taking into account that the first key is very likely 1958 * already in a cache line because it immediately follows 1959 * the extent buffer's header and we have recently accessed 1960 * the header's level field. 1961 */ 1962 ret = comp_keys(&first_key, key); 1963 if (ret < 0) { 1964 /* 1965 * The first key is smaller than the key we want 1966 * to insert, so we are safe to unlock all upper 1967 * nodes and we have to do the binary search. 1968 * 1969 * We do use btrfs_unlock_up_safe() and not 1970 * unlock_up() because the later does not unlock 1971 * nodes with a slot of 0 - we can safely unlock 1972 * any node even if its slot is 0 since in this 1973 * case the key does not end up at slot 0 of the 1974 * leaf and there's no need to split the leaf. 1975 */ 1976 btrfs_unlock_up_safe(path, 1); 1977 search_low_slot = 1; 1978 } else { 1979 /* 1980 * The first key is >= then the key we want to 1981 * insert, so we can skip the binary search as 1982 * the target key will be at slot 0. 1983 * 1984 * We can not unlock upper nodes when the key is 1985 * less than the first key, because we will need 1986 * to update the key at slot 0 of the parent node 1987 * and possibly of other upper nodes too. 1988 * If the key matches the first key, then we can 1989 * unlock all the upper nodes, using 1990 * btrfs_unlock_up_safe() instead of unlock_up() 1991 * as stated above. 1992 */ 1993 if (ret == 0) 1994 btrfs_unlock_up_safe(path, 1); 1995 /* 1996 * ret is already 0 or 1, matching the result of 1997 * a btrfs_bin_search() call, so there is no need 1998 * to adjust it. 1999 */ 2000 do_bin_search = false; 2001 path->slots[0] = 0; 2002 } 2003 } 2004 } 2005 2006 if (do_bin_search) { 2007 ret = search_for_key_slot(leaf, search_low_slot, key, 2008 prev_cmp, &path->slots[0]); 2009 if (ret < 0) 2010 return ret; 2011 } 2012 2013 if (ins_len > 0) { 2014 /* 2015 * Item key already exists. In this case, if we are allowed to 2016 * insert the item (for example, in dir_item case, item key 2017 * collision is allowed), it will be merged with the original 2018 * item. Only the item size grows, no new btrfs item will be 2019 * added. If search_for_extension is not set, ins_len already 2020 * accounts the size btrfs_item, deduct it here so leaf space 2021 * check will be correct. 2022 */ 2023 if (ret == 0 && !path->search_for_extension) { 2024 ASSERT(ins_len >= sizeof(struct btrfs_item)); 2025 ins_len -= sizeof(struct btrfs_item); 2026 } 2027 2028 ASSERT(leaf_free_space >= 0); 2029 2030 if (leaf_free_space < ins_len) { 2031 int err; 2032 2033 err = split_leaf(trans, root, key, path, ins_len, 2034 (ret == 0)); 2035 ASSERT(err <= 0); 2036 if (WARN_ON(err > 0)) 2037 err = -EUCLEAN; 2038 if (err) 2039 ret = err; 2040 } 2041 } 2042 2043 return ret; 2044 } 2045 2046 /* 2047 * btrfs_search_slot - look for a key in a tree and perform necessary 2048 * modifications to preserve tree invariants. 2049 * 2050 * @trans: Handle of transaction, used when modifying the tree 2051 * @p: Holds all btree nodes along the search path 2052 * @root: The root node of the tree 2053 * @key: The key we are looking for 2054 * @ins_len: Indicates purpose of search: 2055 * >0 for inserts it's size of item inserted (*) 2056 * <0 for deletions 2057 * 0 for plain searches, not modifying the tree 2058 * 2059 * (*) If size of item inserted doesn't include 2060 * sizeof(struct btrfs_item), then p->search_for_extension must 2061 * be set. 2062 * @cow: boolean should CoW operations be performed. Must always be 1 2063 * when modifying the tree. 2064 * 2065 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2066 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2067 * 2068 * If @key is found, 0 is returned and you can find the item in the leaf level 2069 * of the path (level 0) 2070 * 2071 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2072 * points to the slot where it should be inserted 2073 * 2074 * If an error is encountered while searching the tree a negative error number 2075 * is returned 2076 */ 2077 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2078 const struct btrfs_key *key, struct btrfs_path *p, 2079 int ins_len, int cow) 2080 { 2081 struct btrfs_fs_info *fs_info = root->fs_info; 2082 struct extent_buffer *b; 2083 int slot; 2084 int ret; 2085 int err; 2086 int level; 2087 int lowest_unlock = 1; 2088 /* everything at write_lock_level or lower must be write locked */ 2089 int write_lock_level = 0; 2090 u8 lowest_level = 0; 2091 int min_write_lock_level; 2092 int prev_cmp; 2093 2094 might_sleep(); 2095 2096 lowest_level = p->lowest_level; 2097 WARN_ON(lowest_level && ins_len > 0); 2098 WARN_ON(p->nodes[0] != NULL); 2099 BUG_ON(!cow && ins_len); 2100 2101 /* 2102 * For now only allow nowait for read only operations. There's no 2103 * strict reason why we can't, we just only need it for reads so it's 2104 * only implemented for reads. 2105 */ 2106 ASSERT(!p->nowait || !cow); 2107 2108 if (ins_len < 0) { 2109 lowest_unlock = 2; 2110 2111 /* when we are removing items, we might have to go up to level 2112 * two as we update tree pointers Make sure we keep write 2113 * for those levels as well 2114 */ 2115 write_lock_level = 2; 2116 } else if (ins_len > 0) { 2117 /* 2118 * for inserting items, make sure we have a write lock on 2119 * level 1 so we can update keys 2120 */ 2121 write_lock_level = 1; 2122 } 2123 2124 if (!cow) 2125 write_lock_level = -1; 2126 2127 if (cow && (p->keep_locks || p->lowest_level)) 2128 write_lock_level = BTRFS_MAX_LEVEL; 2129 2130 min_write_lock_level = write_lock_level; 2131 2132 if (p->need_commit_sem) { 2133 ASSERT(p->search_commit_root); 2134 if (p->nowait) { 2135 if (!down_read_trylock(&fs_info->commit_root_sem)) 2136 return -EAGAIN; 2137 } else { 2138 down_read(&fs_info->commit_root_sem); 2139 } 2140 } 2141 2142 again: 2143 prev_cmp = -1; 2144 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2145 if (IS_ERR(b)) { 2146 ret = PTR_ERR(b); 2147 goto done; 2148 } 2149 2150 while (b) { 2151 int dec = 0; 2152 2153 level = btrfs_header_level(b); 2154 2155 if (cow) { 2156 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2157 2158 /* 2159 * if we don't really need to cow this block 2160 * then we don't want to set the path blocking, 2161 * so we test it here 2162 */ 2163 if (!should_cow_block(trans, root, b)) 2164 goto cow_done; 2165 2166 /* 2167 * must have write locks on this node and the 2168 * parent 2169 */ 2170 if (level > write_lock_level || 2171 (level + 1 > write_lock_level && 2172 level + 1 < BTRFS_MAX_LEVEL && 2173 p->nodes[level + 1])) { 2174 write_lock_level = level + 1; 2175 btrfs_release_path(p); 2176 goto again; 2177 } 2178 2179 if (last_level) 2180 err = btrfs_cow_block(trans, root, b, NULL, 0, 2181 &b, 2182 BTRFS_NESTING_COW); 2183 else 2184 err = btrfs_cow_block(trans, root, b, 2185 p->nodes[level + 1], 2186 p->slots[level + 1], &b, 2187 BTRFS_NESTING_COW); 2188 if (err) { 2189 ret = err; 2190 goto done; 2191 } 2192 } 2193 cow_done: 2194 p->nodes[level] = b; 2195 2196 /* 2197 * we have a lock on b and as long as we aren't changing 2198 * the tree, there is no way to for the items in b to change. 2199 * It is safe to drop the lock on our parent before we 2200 * go through the expensive btree search on b. 2201 * 2202 * If we're inserting or deleting (ins_len != 0), then we might 2203 * be changing slot zero, which may require changing the parent. 2204 * So, we can't drop the lock until after we know which slot 2205 * we're operating on. 2206 */ 2207 if (!ins_len && !p->keep_locks) { 2208 int u = level + 1; 2209 2210 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2211 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2212 p->locks[u] = 0; 2213 } 2214 } 2215 2216 if (level == 0) { 2217 if (ins_len > 0) 2218 ASSERT(write_lock_level >= 1); 2219 2220 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2221 if (!p->search_for_split) 2222 unlock_up(p, level, lowest_unlock, 2223 min_write_lock_level, NULL); 2224 goto done; 2225 } 2226 2227 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2228 if (ret < 0) 2229 goto done; 2230 prev_cmp = ret; 2231 2232 if (ret && slot > 0) { 2233 dec = 1; 2234 slot--; 2235 } 2236 p->slots[level] = slot; 2237 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2238 &write_lock_level); 2239 if (err == -EAGAIN) 2240 goto again; 2241 if (err) { 2242 ret = err; 2243 goto done; 2244 } 2245 b = p->nodes[level]; 2246 slot = p->slots[level]; 2247 2248 /* 2249 * Slot 0 is special, if we change the key we have to update 2250 * the parent pointer which means we must have a write lock on 2251 * the parent 2252 */ 2253 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2254 write_lock_level = level + 1; 2255 btrfs_release_path(p); 2256 goto again; 2257 } 2258 2259 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2260 &write_lock_level); 2261 2262 if (level == lowest_level) { 2263 if (dec) 2264 p->slots[level]++; 2265 goto done; 2266 } 2267 2268 err = read_block_for_search(root, p, &b, level, slot, key); 2269 if (err == -EAGAIN) 2270 goto again; 2271 if (err) { 2272 ret = err; 2273 goto done; 2274 } 2275 2276 if (!p->skip_locking) { 2277 level = btrfs_header_level(b); 2278 2279 btrfs_maybe_reset_lockdep_class(root, b); 2280 2281 if (level <= write_lock_level) { 2282 btrfs_tree_lock(b); 2283 p->locks[level] = BTRFS_WRITE_LOCK; 2284 } else { 2285 if (p->nowait) { 2286 if (!btrfs_try_tree_read_lock(b)) { 2287 free_extent_buffer(b); 2288 ret = -EAGAIN; 2289 goto done; 2290 } 2291 } else { 2292 btrfs_tree_read_lock(b); 2293 } 2294 p->locks[level] = BTRFS_READ_LOCK; 2295 } 2296 p->nodes[level] = b; 2297 } 2298 } 2299 ret = 1; 2300 done: 2301 if (ret < 0 && !p->skip_release_on_error) 2302 btrfs_release_path(p); 2303 2304 if (p->need_commit_sem) { 2305 int ret2; 2306 2307 ret2 = finish_need_commit_sem_search(p); 2308 up_read(&fs_info->commit_root_sem); 2309 if (ret2) 2310 ret = ret2; 2311 } 2312 2313 return ret; 2314 } 2315 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2316 2317 /* 2318 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2319 * current state of the tree together with the operations recorded in the tree 2320 * modification log to search for the key in a previous version of this tree, as 2321 * denoted by the time_seq parameter. 2322 * 2323 * Naturally, there is no support for insert, delete or cow operations. 2324 * 2325 * The resulting path and return value will be set up as if we called 2326 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2327 */ 2328 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2329 struct btrfs_path *p, u64 time_seq) 2330 { 2331 struct btrfs_fs_info *fs_info = root->fs_info; 2332 struct extent_buffer *b; 2333 int slot; 2334 int ret; 2335 int err; 2336 int level; 2337 int lowest_unlock = 1; 2338 u8 lowest_level = 0; 2339 2340 lowest_level = p->lowest_level; 2341 WARN_ON(p->nodes[0] != NULL); 2342 ASSERT(!p->nowait); 2343 2344 if (p->search_commit_root) { 2345 BUG_ON(time_seq); 2346 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2347 } 2348 2349 again: 2350 b = btrfs_get_old_root(root, time_seq); 2351 if (!b) { 2352 ret = -EIO; 2353 goto done; 2354 } 2355 level = btrfs_header_level(b); 2356 p->locks[level] = BTRFS_READ_LOCK; 2357 2358 while (b) { 2359 int dec = 0; 2360 2361 level = btrfs_header_level(b); 2362 p->nodes[level] = b; 2363 2364 /* 2365 * we have a lock on b and as long as we aren't changing 2366 * the tree, there is no way to for the items in b to change. 2367 * It is safe to drop the lock on our parent before we 2368 * go through the expensive btree search on b. 2369 */ 2370 btrfs_unlock_up_safe(p, level + 1); 2371 2372 ret = btrfs_bin_search(b, 0, key, &slot); 2373 if (ret < 0) 2374 goto done; 2375 2376 if (level == 0) { 2377 p->slots[level] = slot; 2378 unlock_up(p, level, lowest_unlock, 0, NULL); 2379 goto done; 2380 } 2381 2382 if (ret && slot > 0) { 2383 dec = 1; 2384 slot--; 2385 } 2386 p->slots[level] = slot; 2387 unlock_up(p, level, lowest_unlock, 0, NULL); 2388 2389 if (level == lowest_level) { 2390 if (dec) 2391 p->slots[level]++; 2392 goto done; 2393 } 2394 2395 err = read_block_for_search(root, p, &b, level, slot, key); 2396 if (err == -EAGAIN) 2397 goto again; 2398 if (err) { 2399 ret = err; 2400 goto done; 2401 } 2402 2403 level = btrfs_header_level(b); 2404 btrfs_tree_read_lock(b); 2405 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2406 if (!b) { 2407 ret = -ENOMEM; 2408 goto done; 2409 } 2410 p->locks[level] = BTRFS_READ_LOCK; 2411 p->nodes[level] = b; 2412 } 2413 ret = 1; 2414 done: 2415 if (ret < 0) 2416 btrfs_release_path(p); 2417 2418 return ret; 2419 } 2420 2421 /* 2422 * Search the tree again to find a leaf with smaller keys. 2423 * Returns 0 if it found something. 2424 * Returns 1 if there are no smaller keys. 2425 * Returns < 0 on error. 2426 * 2427 * This may release the path, and so you may lose any locks held at the 2428 * time you call it. 2429 */ 2430 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2431 { 2432 struct btrfs_key key; 2433 struct btrfs_key orig_key; 2434 struct btrfs_disk_key found_key; 2435 int ret; 2436 2437 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2438 orig_key = key; 2439 2440 if (key.offset > 0) { 2441 key.offset--; 2442 } else if (key.type > 0) { 2443 key.type--; 2444 key.offset = (u64)-1; 2445 } else if (key.objectid > 0) { 2446 key.objectid--; 2447 key.type = (u8)-1; 2448 key.offset = (u64)-1; 2449 } else { 2450 return 1; 2451 } 2452 2453 btrfs_release_path(path); 2454 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2455 if (ret <= 0) 2456 return ret; 2457 2458 /* 2459 * Previous key not found. Even if we were at slot 0 of the leaf we had 2460 * before releasing the path and calling btrfs_search_slot(), we now may 2461 * be in a slot pointing to the same original key - this can happen if 2462 * after we released the path, one of more items were moved from a 2463 * sibling leaf into the front of the leaf we had due to an insertion 2464 * (see push_leaf_right()). 2465 * If we hit this case and our slot is > 0 and just decrement the slot 2466 * so that the caller does not process the same key again, which may or 2467 * may not break the caller, depending on its logic. 2468 */ 2469 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2470 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2471 ret = comp_keys(&found_key, &orig_key); 2472 if (ret == 0) { 2473 if (path->slots[0] > 0) { 2474 path->slots[0]--; 2475 return 0; 2476 } 2477 /* 2478 * At slot 0, same key as before, it means orig_key is 2479 * the lowest, leftmost, key in the tree. We're done. 2480 */ 2481 return 1; 2482 } 2483 } 2484 2485 btrfs_item_key(path->nodes[0], &found_key, 0); 2486 ret = comp_keys(&found_key, &key); 2487 /* 2488 * We might have had an item with the previous key in the tree right 2489 * before we released our path. And after we released our path, that 2490 * item might have been pushed to the first slot (0) of the leaf we 2491 * were holding due to a tree balance. Alternatively, an item with the 2492 * previous key can exist as the only element of a leaf (big fat item). 2493 * Therefore account for these 2 cases, so that our callers (like 2494 * btrfs_previous_item) don't miss an existing item with a key matching 2495 * the previous key we computed above. 2496 */ 2497 if (ret <= 0) 2498 return 0; 2499 return 1; 2500 } 2501 2502 /* 2503 * helper to use instead of search slot if no exact match is needed but 2504 * instead the next or previous item should be returned. 2505 * When find_higher is true, the next higher item is returned, the next lower 2506 * otherwise. 2507 * When return_any and find_higher are both true, and no higher item is found, 2508 * return the next lower instead. 2509 * When return_any is true and find_higher is false, and no lower item is found, 2510 * return the next higher instead. 2511 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2512 * < 0 on error 2513 */ 2514 int btrfs_search_slot_for_read(struct btrfs_root *root, 2515 const struct btrfs_key *key, 2516 struct btrfs_path *p, int find_higher, 2517 int return_any) 2518 { 2519 int ret; 2520 struct extent_buffer *leaf; 2521 2522 again: 2523 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2524 if (ret <= 0) 2525 return ret; 2526 /* 2527 * a return value of 1 means the path is at the position where the 2528 * item should be inserted. Normally this is the next bigger item, 2529 * but in case the previous item is the last in a leaf, path points 2530 * to the first free slot in the previous leaf, i.e. at an invalid 2531 * item. 2532 */ 2533 leaf = p->nodes[0]; 2534 2535 if (find_higher) { 2536 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2537 ret = btrfs_next_leaf(root, p); 2538 if (ret <= 0) 2539 return ret; 2540 if (!return_any) 2541 return 1; 2542 /* 2543 * no higher item found, return the next 2544 * lower instead 2545 */ 2546 return_any = 0; 2547 find_higher = 0; 2548 btrfs_release_path(p); 2549 goto again; 2550 } 2551 } else { 2552 if (p->slots[0] == 0) { 2553 ret = btrfs_prev_leaf(root, p); 2554 if (ret < 0) 2555 return ret; 2556 if (!ret) { 2557 leaf = p->nodes[0]; 2558 if (p->slots[0] == btrfs_header_nritems(leaf)) 2559 p->slots[0]--; 2560 return 0; 2561 } 2562 if (!return_any) 2563 return 1; 2564 /* 2565 * no lower item found, return the next 2566 * higher instead 2567 */ 2568 return_any = 0; 2569 find_higher = 1; 2570 btrfs_release_path(p); 2571 goto again; 2572 } else { 2573 --p->slots[0]; 2574 } 2575 } 2576 return 0; 2577 } 2578 2579 /* 2580 * Execute search and call btrfs_previous_item to traverse backwards if the item 2581 * was not found. 2582 * 2583 * Return 0 if found, 1 if not found and < 0 if error. 2584 */ 2585 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2586 struct btrfs_path *path) 2587 { 2588 int ret; 2589 2590 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2591 if (ret > 0) 2592 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2593 2594 if (ret == 0) 2595 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2596 2597 return ret; 2598 } 2599 2600 /* 2601 * Search for a valid slot for the given path. 2602 * 2603 * @root: The root node of the tree. 2604 * @key: Will contain a valid item if found. 2605 * @path: The starting point to validate the slot. 2606 * 2607 * Return: 0 if the item is valid 2608 * 1 if not found 2609 * <0 if error. 2610 */ 2611 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2612 struct btrfs_path *path) 2613 { 2614 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2615 int ret; 2616 2617 ret = btrfs_next_leaf(root, path); 2618 if (ret) 2619 return ret; 2620 } 2621 2622 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2623 return 0; 2624 } 2625 2626 /* 2627 * adjust the pointers going up the tree, starting at level 2628 * making sure the right key of each node is points to 'key'. 2629 * This is used after shifting pointers to the left, so it stops 2630 * fixing up pointers when a given leaf/node is not in slot 0 of the 2631 * higher levels 2632 * 2633 */ 2634 static void fixup_low_keys(struct btrfs_path *path, 2635 struct btrfs_disk_key *key, int level) 2636 { 2637 int i; 2638 struct extent_buffer *t; 2639 int ret; 2640 2641 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2642 int tslot = path->slots[i]; 2643 2644 if (!path->nodes[i]) 2645 break; 2646 t = path->nodes[i]; 2647 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2648 BTRFS_MOD_LOG_KEY_REPLACE); 2649 BUG_ON(ret < 0); 2650 btrfs_set_node_key(t, key, tslot); 2651 btrfs_mark_buffer_dirty(path->nodes[i]); 2652 if (tslot != 0) 2653 break; 2654 } 2655 } 2656 2657 /* 2658 * update item key. 2659 * 2660 * This function isn't completely safe. It's the caller's responsibility 2661 * that the new key won't break the order 2662 */ 2663 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2664 struct btrfs_path *path, 2665 const struct btrfs_key *new_key) 2666 { 2667 struct btrfs_disk_key disk_key; 2668 struct extent_buffer *eb; 2669 int slot; 2670 2671 eb = path->nodes[0]; 2672 slot = path->slots[0]; 2673 if (slot > 0) { 2674 btrfs_item_key(eb, &disk_key, slot - 1); 2675 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2676 btrfs_print_leaf(eb); 2677 btrfs_crit(fs_info, 2678 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2679 slot, btrfs_disk_key_objectid(&disk_key), 2680 btrfs_disk_key_type(&disk_key), 2681 btrfs_disk_key_offset(&disk_key), 2682 new_key->objectid, new_key->type, 2683 new_key->offset); 2684 BUG(); 2685 } 2686 } 2687 if (slot < btrfs_header_nritems(eb) - 1) { 2688 btrfs_item_key(eb, &disk_key, slot + 1); 2689 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2690 btrfs_print_leaf(eb); 2691 btrfs_crit(fs_info, 2692 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2693 slot, btrfs_disk_key_objectid(&disk_key), 2694 btrfs_disk_key_type(&disk_key), 2695 btrfs_disk_key_offset(&disk_key), 2696 new_key->objectid, new_key->type, 2697 new_key->offset); 2698 BUG(); 2699 } 2700 } 2701 2702 btrfs_cpu_key_to_disk(&disk_key, new_key); 2703 btrfs_set_item_key(eb, &disk_key, slot); 2704 btrfs_mark_buffer_dirty(eb); 2705 if (slot == 0) 2706 fixup_low_keys(path, &disk_key, 1); 2707 } 2708 2709 /* 2710 * Check key order of two sibling extent buffers. 2711 * 2712 * Return true if something is wrong. 2713 * Return false if everything is fine. 2714 * 2715 * Tree-checker only works inside one tree block, thus the following 2716 * corruption can not be detected by tree-checker: 2717 * 2718 * Leaf @left | Leaf @right 2719 * -------------------------------------------------------------- 2720 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2721 * 2722 * Key f6 in leaf @left itself is valid, but not valid when the next 2723 * key in leaf @right is 7. 2724 * This can only be checked at tree block merge time. 2725 * And since tree checker has ensured all key order in each tree block 2726 * is correct, we only need to bother the last key of @left and the first 2727 * key of @right. 2728 */ 2729 static bool check_sibling_keys(struct extent_buffer *left, 2730 struct extent_buffer *right) 2731 { 2732 struct btrfs_key left_last; 2733 struct btrfs_key right_first; 2734 int level = btrfs_header_level(left); 2735 int nr_left = btrfs_header_nritems(left); 2736 int nr_right = btrfs_header_nritems(right); 2737 2738 /* No key to check in one of the tree blocks */ 2739 if (!nr_left || !nr_right) 2740 return false; 2741 2742 if (level) { 2743 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2744 btrfs_node_key_to_cpu(right, &right_first, 0); 2745 } else { 2746 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2747 btrfs_item_key_to_cpu(right, &right_first, 0); 2748 } 2749 2750 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2751 btrfs_crit(left->fs_info, "left extent buffer:"); 2752 btrfs_print_tree(left, false); 2753 btrfs_crit(left->fs_info, "right extent buffer:"); 2754 btrfs_print_tree(right, false); 2755 btrfs_crit(left->fs_info, 2756 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2757 left_last.objectid, left_last.type, 2758 left_last.offset, right_first.objectid, 2759 right_first.type, right_first.offset); 2760 return true; 2761 } 2762 return false; 2763 } 2764 2765 /* 2766 * try to push data from one node into the next node left in the 2767 * tree. 2768 * 2769 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2770 * error, and > 0 if there was no room in the left hand block. 2771 */ 2772 static int push_node_left(struct btrfs_trans_handle *trans, 2773 struct extent_buffer *dst, 2774 struct extent_buffer *src, int empty) 2775 { 2776 struct btrfs_fs_info *fs_info = trans->fs_info; 2777 int push_items = 0; 2778 int src_nritems; 2779 int dst_nritems; 2780 int ret = 0; 2781 2782 src_nritems = btrfs_header_nritems(src); 2783 dst_nritems = btrfs_header_nritems(dst); 2784 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2785 WARN_ON(btrfs_header_generation(src) != trans->transid); 2786 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2787 2788 if (!empty && src_nritems <= 8) 2789 return 1; 2790 2791 if (push_items <= 0) 2792 return 1; 2793 2794 if (empty) { 2795 push_items = min(src_nritems, push_items); 2796 if (push_items < src_nritems) { 2797 /* leave at least 8 pointers in the node if 2798 * we aren't going to empty it 2799 */ 2800 if (src_nritems - push_items < 8) { 2801 if (push_items <= 8) 2802 return 1; 2803 push_items -= 8; 2804 } 2805 } 2806 } else 2807 push_items = min(src_nritems - 8, push_items); 2808 2809 /* dst is the left eb, src is the middle eb */ 2810 if (check_sibling_keys(dst, src)) { 2811 ret = -EUCLEAN; 2812 btrfs_abort_transaction(trans, ret); 2813 return ret; 2814 } 2815 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2816 if (ret) { 2817 btrfs_abort_transaction(trans, ret); 2818 return ret; 2819 } 2820 copy_extent_buffer(dst, src, 2821 btrfs_node_key_ptr_offset(dst, dst_nritems), 2822 btrfs_node_key_ptr_offset(src, 0), 2823 push_items * sizeof(struct btrfs_key_ptr)); 2824 2825 if (push_items < src_nritems) { 2826 /* 2827 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2828 * don't need to do an explicit tree mod log operation for it. 2829 */ 2830 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2831 btrfs_node_key_ptr_offset(src, push_items), 2832 (src_nritems - push_items) * 2833 sizeof(struct btrfs_key_ptr)); 2834 } 2835 btrfs_set_header_nritems(src, src_nritems - push_items); 2836 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2837 btrfs_mark_buffer_dirty(src); 2838 btrfs_mark_buffer_dirty(dst); 2839 2840 return ret; 2841 } 2842 2843 /* 2844 * try to push data from one node into the next node right in the 2845 * tree. 2846 * 2847 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2848 * error, and > 0 if there was no room in the right hand block. 2849 * 2850 * this will only push up to 1/2 the contents of the left node over 2851 */ 2852 static int balance_node_right(struct btrfs_trans_handle *trans, 2853 struct extent_buffer *dst, 2854 struct extent_buffer *src) 2855 { 2856 struct btrfs_fs_info *fs_info = trans->fs_info; 2857 int push_items = 0; 2858 int max_push; 2859 int src_nritems; 2860 int dst_nritems; 2861 int ret = 0; 2862 2863 WARN_ON(btrfs_header_generation(src) != trans->transid); 2864 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2865 2866 src_nritems = btrfs_header_nritems(src); 2867 dst_nritems = btrfs_header_nritems(dst); 2868 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2869 if (push_items <= 0) 2870 return 1; 2871 2872 if (src_nritems < 4) 2873 return 1; 2874 2875 max_push = src_nritems / 2 + 1; 2876 /* don't try to empty the node */ 2877 if (max_push >= src_nritems) 2878 return 1; 2879 2880 if (max_push < push_items) 2881 push_items = max_push; 2882 2883 /* dst is the right eb, src is the middle eb */ 2884 if (check_sibling_keys(src, dst)) { 2885 ret = -EUCLEAN; 2886 btrfs_abort_transaction(trans, ret); 2887 return ret; 2888 } 2889 2890 /* 2891 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2892 * need to do an explicit tree mod log operation for it. 2893 */ 2894 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2895 btrfs_node_key_ptr_offset(dst, 0), 2896 (dst_nritems) * 2897 sizeof(struct btrfs_key_ptr)); 2898 2899 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2900 push_items); 2901 if (ret) { 2902 btrfs_abort_transaction(trans, ret); 2903 return ret; 2904 } 2905 copy_extent_buffer(dst, src, 2906 btrfs_node_key_ptr_offset(dst, 0), 2907 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2908 push_items * sizeof(struct btrfs_key_ptr)); 2909 2910 btrfs_set_header_nritems(src, src_nritems - push_items); 2911 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2912 2913 btrfs_mark_buffer_dirty(src); 2914 btrfs_mark_buffer_dirty(dst); 2915 2916 return ret; 2917 } 2918 2919 /* 2920 * helper function to insert a new root level in the tree. 2921 * A new node is allocated, and a single item is inserted to 2922 * point to the existing root 2923 * 2924 * returns zero on success or < 0 on failure. 2925 */ 2926 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2927 struct btrfs_root *root, 2928 struct btrfs_path *path, int level) 2929 { 2930 struct btrfs_fs_info *fs_info = root->fs_info; 2931 u64 lower_gen; 2932 struct extent_buffer *lower; 2933 struct extent_buffer *c; 2934 struct extent_buffer *old; 2935 struct btrfs_disk_key lower_key; 2936 int ret; 2937 2938 BUG_ON(path->nodes[level]); 2939 BUG_ON(path->nodes[level-1] != root->node); 2940 2941 lower = path->nodes[level-1]; 2942 if (level == 1) 2943 btrfs_item_key(lower, &lower_key, 0); 2944 else 2945 btrfs_node_key(lower, &lower_key, 0); 2946 2947 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2948 &lower_key, level, root->node->start, 0, 2949 BTRFS_NESTING_NEW_ROOT); 2950 if (IS_ERR(c)) 2951 return PTR_ERR(c); 2952 2953 root_add_used(root, fs_info->nodesize); 2954 2955 btrfs_set_header_nritems(c, 1); 2956 btrfs_set_node_key(c, &lower_key, 0); 2957 btrfs_set_node_blockptr(c, 0, lower->start); 2958 lower_gen = btrfs_header_generation(lower); 2959 WARN_ON(lower_gen != trans->transid); 2960 2961 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2962 2963 btrfs_mark_buffer_dirty(c); 2964 2965 old = root->node; 2966 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2967 BUG_ON(ret < 0); 2968 rcu_assign_pointer(root->node, c); 2969 2970 /* the super has an extra ref to root->node */ 2971 free_extent_buffer(old); 2972 2973 add_root_to_dirty_list(root); 2974 atomic_inc(&c->refs); 2975 path->nodes[level] = c; 2976 path->locks[level] = BTRFS_WRITE_LOCK; 2977 path->slots[level] = 0; 2978 return 0; 2979 } 2980 2981 /* 2982 * worker function to insert a single pointer in a node. 2983 * the node should have enough room for the pointer already 2984 * 2985 * slot and level indicate where you want the key to go, and 2986 * blocknr is the block the key points to. 2987 */ 2988 static void insert_ptr(struct btrfs_trans_handle *trans, 2989 struct btrfs_path *path, 2990 struct btrfs_disk_key *key, u64 bytenr, 2991 int slot, int level) 2992 { 2993 struct extent_buffer *lower; 2994 int nritems; 2995 int ret; 2996 2997 BUG_ON(!path->nodes[level]); 2998 btrfs_assert_tree_write_locked(path->nodes[level]); 2999 lower = path->nodes[level]; 3000 nritems = btrfs_header_nritems(lower); 3001 BUG_ON(slot > nritems); 3002 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 3003 if (slot != nritems) { 3004 if (level) { 3005 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 3006 slot, nritems - slot); 3007 BUG_ON(ret < 0); 3008 } 3009 memmove_extent_buffer(lower, 3010 btrfs_node_key_ptr_offset(lower, slot + 1), 3011 btrfs_node_key_ptr_offset(lower, slot), 3012 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3013 } 3014 if (level) { 3015 ret = btrfs_tree_mod_log_insert_key(lower, slot, 3016 BTRFS_MOD_LOG_KEY_ADD); 3017 BUG_ON(ret < 0); 3018 } 3019 btrfs_set_node_key(lower, key, slot); 3020 btrfs_set_node_blockptr(lower, slot, bytenr); 3021 WARN_ON(trans->transid == 0); 3022 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3023 btrfs_set_header_nritems(lower, nritems + 1); 3024 btrfs_mark_buffer_dirty(lower); 3025 } 3026 3027 /* 3028 * split the node at the specified level in path in two. 3029 * The path is corrected to point to the appropriate node after the split 3030 * 3031 * Before splitting this tries to make some room in the node by pushing 3032 * left and right, if either one works, it returns right away. 3033 * 3034 * returns 0 on success and < 0 on failure 3035 */ 3036 static noinline int split_node(struct btrfs_trans_handle *trans, 3037 struct btrfs_root *root, 3038 struct btrfs_path *path, int level) 3039 { 3040 struct btrfs_fs_info *fs_info = root->fs_info; 3041 struct extent_buffer *c; 3042 struct extent_buffer *split; 3043 struct btrfs_disk_key disk_key; 3044 int mid; 3045 int ret; 3046 u32 c_nritems; 3047 3048 c = path->nodes[level]; 3049 WARN_ON(btrfs_header_generation(c) != trans->transid); 3050 if (c == root->node) { 3051 /* 3052 * trying to split the root, lets make a new one 3053 * 3054 * tree mod log: We don't log_removal old root in 3055 * insert_new_root, because that root buffer will be kept as a 3056 * normal node. We are going to log removal of half of the 3057 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3058 * holding a tree lock on the buffer, which is why we cannot 3059 * race with other tree_mod_log users. 3060 */ 3061 ret = insert_new_root(trans, root, path, level + 1); 3062 if (ret) 3063 return ret; 3064 } else { 3065 ret = push_nodes_for_insert(trans, root, path, level); 3066 c = path->nodes[level]; 3067 if (!ret && btrfs_header_nritems(c) < 3068 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3069 return 0; 3070 if (ret < 0) 3071 return ret; 3072 } 3073 3074 c_nritems = btrfs_header_nritems(c); 3075 mid = (c_nritems + 1) / 2; 3076 btrfs_node_key(c, &disk_key, mid); 3077 3078 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3079 &disk_key, level, c->start, 0, 3080 BTRFS_NESTING_SPLIT); 3081 if (IS_ERR(split)) 3082 return PTR_ERR(split); 3083 3084 root_add_used(root, fs_info->nodesize); 3085 ASSERT(btrfs_header_level(c) == level); 3086 3087 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3088 if (ret) { 3089 btrfs_tree_unlock(split); 3090 free_extent_buffer(split); 3091 btrfs_abort_transaction(trans, ret); 3092 return ret; 3093 } 3094 copy_extent_buffer(split, c, 3095 btrfs_node_key_ptr_offset(split, 0), 3096 btrfs_node_key_ptr_offset(c, mid), 3097 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3098 btrfs_set_header_nritems(split, c_nritems - mid); 3099 btrfs_set_header_nritems(c, mid); 3100 3101 btrfs_mark_buffer_dirty(c); 3102 btrfs_mark_buffer_dirty(split); 3103 3104 insert_ptr(trans, path, &disk_key, split->start, 3105 path->slots[level + 1] + 1, level + 1); 3106 3107 if (path->slots[level] >= mid) { 3108 path->slots[level] -= mid; 3109 btrfs_tree_unlock(c); 3110 free_extent_buffer(c); 3111 path->nodes[level] = split; 3112 path->slots[level + 1] += 1; 3113 } else { 3114 btrfs_tree_unlock(split); 3115 free_extent_buffer(split); 3116 } 3117 return 0; 3118 } 3119 3120 /* 3121 * how many bytes are required to store the items in a leaf. start 3122 * and nr indicate which items in the leaf to check. This totals up the 3123 * space used both by the item structs and the item data 3124 */ 3125 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3126 { 3127 int data_len; 3128 int nritems = btrfs_header_nritems(l); 3129 int end = min(nritems, start + nr) - 1; 3130 3131 if (!nr) 3132 return 0; 3133 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3134 data_len = data_len - btrfs_item_offset(l, end); 3135 data_len += sizeof(struct btrfs_item) * nr; 3136 WARN_ON(data_len < 0); 3137 return data_len; 3138 } 3139 3140 /* 3141 * The space between the end of the leaf items and 3142 * the start of the leaf data. IOW, how much room 3143 * the leaf has left for both items and data 3144 */ 3145 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3146 { 3147 struct btrfs_fs_info *fs_info = leaf->fs_info; 3148 int nritems = btrfs_header_nritems(leaf); 3149 int ret; 3150 3151 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3152 if (ret < 0) { 3153 btrfs_crit(fs_info, 3154 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3155 ret, 3156 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3157 leaf_space_used(leaf, 0, nritems), nritems); 3158 } 3159 return ret; 3160 } 3161 3162 /* 3163 * min slot controls the lowest index we're willing to push to the 3164 * right. We'll push up to and including min_slot, but no lower 3165 */ 3166 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3167 struct btrfs_path *path, 3168 int data_size, int empty, 3169 struct extent_buffer *right, 3170 int free_space, u32 left_nritems, 3171 u32 min_slot) 3172 { 3173 struct btrfs_fs_info *fs_info = right->fs_info; 3174 struct extent_buffer *left = path->nodes[0]; 3175 struct extent_buffer *upper = path->nodes[1]; 3176 struct btrfs_map_token token; 3177 struct btrfs_disk_key disk_key; 3178 int slot; 3179 u32 i; 3180 int push_space = 0; 3181 int push_items = 0; 3182 u32 nr; 3183 u32 right_nritems; 3184 u32 data_end; 3185 u32 this_item_size; 3186 3187 if (empty) 3188 nr = 0; 3189 else 3190 nr = max_t(u32, 1, min_slot); 3191 3192 if (path->slots[0] >= left_nritems) 3193 push_space += data_size; 3194 3195 slot = path->slots[1]; 3196 i = left_nritems - 1; 3197 while (i >= nr) { 3198 if (!empty && push_items > 0) { 3199 if (path->slots[0] > i) 3200 break; 3201 if (path->slots[0] == i) { 3202 int space = btrfs_leaf_free_space(left); 3203 3204 if (space + push_space * 2 > free_space) 3205 break; 3206 } 3207 } 3208 3209 if (path->slots[0] == i) 3210 push_space += data_size; 3211 3212 this_item_size = btrfs_item_size(left, i); 3213 if (this_item_size + sizeof(struct btrfs_item) + 3214 push_space > free_space) 3215 break; 3216 3217 push_items++; 3218 push_space += this_item_size + sizeof(struct btrfs_item); 3219 if (i == 0) 3220 break; 3221 i--; 3222 } 3223 3224 if (push_items == 0) 3225 goto out_unlock; 3226 3227 WARN_ON(!empty && push_items == left_nritems); 3228 3229 /* push left to right */ 3230 right_nritems = btrfs_header_nritems(right); 3231 3232 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3233 push_space -= leaf_data_end(left); 3234 3235 /* make room in the right data area */ 3236 data_end = leaf_data_end(right); 3237 memmove_leaf_data(right, data_end - push_space, data_end, 3238 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3239 3240 /* copy from the left data area */ 3241 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3242 leaf_data_end(left), push_space); 3243 3244 memmove_leaf_items(right, push_items, 0, right_nritems); 3245 3246 /* copy the items from left to right */ 3247 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3248 3249 /* update the item pointers */ 3250 btrfs_init_map_token(&token, right); 3251 right_nritems += push_items; 3252 btrfs_set_header_nritems(right, right_nritems); 3253 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3254 for (i = 0; i < right_nritems; i++) { 3255 push_space -= btrfs_token_item_size(&token, i); 3256 btrfs_set_token_item_offset(&token, i, push_space); 3257 } 3258 3259 left_nritems -= push_items; 3260 btrfs_set_header_nritems(left, left_nritems); 3261 3262 if (left_nritems) 3263 btrfs_mark_buffer_dirty(left); 3264 else 3265 btrfs_clear_buffer_dirty(trans, left); 3266 3267 btrfs_mark_buffer_dirty(right); 3268 3269 btrfs_item_key(right, &disk_key, 0); 3270 btrfs_set_node_key(upper, &disk_key, slot + 1); 3271 btrfs_mark_buffer_dirty(upper); 3272 3273 /* then fixup the leaf pointer in the path */ 3274 if (path->slots[0] >= left_nritems) { 3275 path->slots[0] -= left_nritems; 3276 if (btrfs_header_nritems(path->nodes[0]) == 0) 3277 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3278 btrfs_tree_unlock(path->nodes[0]); 3279 free_extent_buffer(path->nodes[0]); 3280 path->nodes[0] = right; 3281 path->slots[1] += 1; 3282 } else { 3283 btrfs_tree_unlock(right); 3284 free_extent_buffer(right); 3285 } 3286 return 0; 3287 3288 out_unlock: 3289 btrfs_tree_unlock(right); 3290 free_extent_buffer(right); 3291 return 1; 3292 } 3293 3294 /* 3295 * push some data in the path leaf to the right, trying to free up at 3296 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3297 * 3298 * returns 1 if the push failed because the other node didn't have enough 3299 * room, 0 if everything worked out and < 0 if there were major errors. 3300 * 3301 * this will push starting from min_slot to the end of the leaf. It won't 3302 * push any slot lower than min_slot 3303 */ 3304 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3305 *root, struct btrfs_path *path, 3306 int min_data_size, int data_size, 3307 int empty, u32 min_slot) 3308 { 3309 struct extent_buffer *left = path->nodes[0]; 3310 struct extent_buffer *right; 3311 struct extent_buffer *upper; 3312 int slot; 3313 int free_space; 3314 u32 left_nritems; 3315 int ret; 3316 3317 if (!path->nodes[1]) 3318 return 1; 3319 3320 slot = path->slots[1]; 3321 upper = path->nodes[1]; 3322 if (slot >= btrfs_header_nritems(upper) - 1) 3323 return 1; 3324 3325 btrfs_assert_tree_write_locked(path->nodes[1]); 3326 3327 right = btrfs_read_node_slot(upper, slot + 1); 3328 if (IS_ERR(right)) 3329 return PTR_ERR(right); 3330 3331 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3332 3333 free_space = btrfs_leaf_free_space(right); 3334 if (free_space < data_size) 3335 goto out_unlock; 3336 3337 ret = btrfs_cow_block(trans, root, right, upper, 3338 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3339 if (ret) 3340 goto out_unlock; 3341 3342 left_nritems = btrfs_header_nritems(left); 3343 if (left_nritems == 0) 3344 goto out_unlock; 3345 3346 if (check_sibling_keys(left, right)) { 3347 ret = -EUCLEAN; 3348 btrfs_abort_transaction(trans, ret); 3349 btrfs_tree_unlock(right); 3350 free_extent_buffer(right); 3351 return ret; 3352 } 3353 if (path->slots[0] == left_nritems && !empty) { 3354 /* Key greater than all keys in the leaf, right neighbor has 3355 * enough room for it and we're not emptying our leaf to delete 3356 * it, therefore use right neighbor to insert the new item and 3357 * no need to touch/dirty our left leaf. */ 3358 btrfs_tree_unlock(left); 3359 free_extent_buffer(left); 3360 path->nodes[0] = right; 3361 path->slots[0] = 0; 3362 path->slots[1]++; 3363 return 0; 3364 } 3365 3366 return __push_leaf_right(trans, path, min_data_size, empty, right, 3367 free_space, left_nritems, min_slot); 3368 out_unlock: 3369 btrfs_tree_unlock(right); 3370 free_extent_buffer(right); 3371 return 1; 3372 } 3373 3374 /* 3375 * push some data in the path leaf to the left, trying to free up at 3376 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3377 * 3378 * max_slot can put a limit on how far into the leaf we'll push items. The 3379 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3380 * items 3381 */ 3382 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3383 struct btrfs_path *path, int data_size, 3384 int empty, struct extent_buffer *left, 3385 int free_space, u32 right_nritems, 3386 u32 max_slot) 3387 { 3388 struct btrfs_fs_info *fs_info = left->fs_info; 3389 struct btrfs_disk_key disk_key; 3390 struct extent_buffer *right = path->nodes[0]; 3391 int i; 3392 int push_space = 0; 3393 int push_items = 0; 3394 u32 old_left_nritems; 3395 u32 nr; 3396 int ret = 0; 3397 u32 this_item_size; 3398 u32 old_left_item_size; 3399 struct btrfs_map_token token; 3400 3401 if (empty) 3402 nr = min(right_nritems, max_slot); 3403 else 3404 nr = min(right_nritems - 1, max_slot); 3405 3406 for (i = 0; i < nr; i++) { 3407 if (!empty && push_items > 0) { 3408 if (path->slots[0] < i) 3409 break; 3410 if (path->slots[0] == i) { 3411 int space = btrfs_leaf_free_space(right); 3412 3413 if (space + push_space * 2 > free_space) 3414 break; 3415 } 3416 } 3417 3418 if (path->slots[0] == i) 3419 push_space += data_size; 3420 3421 this_item_size = btrfs_item_size(right, i); 3422 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3423 free_space) 3424 break; 3425 3426 push_items++; 3427 push_space += this_item_size + sizeof(struct btrfs_item); 3428 } 3429 3430 if (push_items == 0) { 3431 ret = 1; 3432 goto out; 3433 } 3434 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3435 3436 /* push data from right to left */ 3437 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3438 3439 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3440 btrfs_item_offset(right, push_items - 1); 3441 3442 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3443 btrfs_item_offset(right, push_items - 1), push_space); 3444 old_left_nritems = btrfs_header_nritems(left); 3445 BUG_ON(old_left_nritems <= 0); 3446 3447 btrfs_init_map_token(&token, left); 3448 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3449 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3450 u32 ioff; 3451 3452 ioff = btrfs_token_item_offset(&token, i); 3453 btrfs_set_token_item_offset(&token, i, 3454 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3455 } 3456 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3457 3458 /* fixup right node */ 3459 if (push_items > right_nritems) 3460 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3461 right_nritems); 3462 3463 if (push_items < right_nritems) { 3464 push_space = btrfs_item_offset(right, push_items - 1) - 3465 leaf_data_end(right); 3466 memmove_leaf_data(right, 3467 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3468 leaf_data_end(right), push_space); 3469 3470 memmove_leaf_items(right, 0, push_items, 3471 btrfs_header_nritems(right) - push_items); 3472 } 3473 3474 btrfs_init_map_token(&token, right); 3475 right_nritems -= push_items; 3476 btrfs_set_header_nritems(right, right_nritems); 3477 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3478 for (i = 0; i < right_nritems; i++) { 3479 push_space = push_space - btrfs_token_item_size(&token, i); 3480 btrfs_set_token_item_offset(&token, i, push_space); 3481 } 3482 3483 btrfs_mark_buffer_dirty(left); 3484 if (right_nritems) 3485 btrfs_mark_buffer_dirty(right); 3486 else 3487 btrfs_clear_buffer_dirty(trans, right); 3488 3489 btrfs_item_key(right, &disk_key, 0); 3490 fixup_low_keys(path, &disk_key, 1); 3491 3492 /* then fixup the leaf pointer in the path */ 3493 if (path->slots[0] < push_items) { 3494 path->slots[0] += old_left_nritems; 3495 btrfs_tree_unlock(path->nodes[0]); 3496 free_extent_buffer(path->nodes[0]); 3497 path->nodes[0] = left; 3498 path->slots[1] -= 1; 3499 } else { 3500 btrfs_tree_unlock(left); 3501 free_extent_buffer(left); 3502 path->slots[0] -= push_items; 3503 } 3504 BUG_ON(path->slots[0] < 0); 3505 return ret; 3506 out: 3507 btrfs_tree_unlock(left); 3508 free_extent_buffer(left); 3509 return ret; 3510 } 3511 3512 /* 3513 * push some data in the path leaf to the left, trying to free up at 3514 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3515 * 3516 * max_slot can put a limit on how far into the leaf we'll push items. The 3517 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3518 * items 3519 */ 3520 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3521 *root, struct btrfs_path *path, int min_data_size, 3522 int data_size, int empty, u32 max_slot) 3523 { 3524 struct extent_buffer *right = path->nodes[0]; 3525 struct extent_buffer *left; 3526 int slot; 3527 int free_space; 3528 u32 right_nritems; 3529 int ret = 0; 3530 3531 slot = path->slots[1]; 3532 if (slot == 0) 3533 return 1; 3534 if (!path->nodes[1]) 3535 return 1; 3536 3537 right_nritems = btrfs_header_nritems(right); 3538 if (right_nritems == 0) 3539 return 1; 3540 3541 btrfs_assert_tree_write_locked(path->nodes[1]); 3542 3543 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3544 if (IS_ERR(left)) 3545 return PTR_ERR(left); 3546 3547 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3548 3549 free_space = btrfs_leaf_free_space(left); 3550 if (free_space < data_size) { 3551 ret = 1; 3552 goto out; 3553 } 3554 3555 ret = btrfs_cow_block(trans, root, left, 3556 path->nodes[1], slot - 1, &left, 3557 BTRFS_NESTING_LEFT_COW); 3558 if (ret) { 3559 /* we hit -ENOSPC, but it isn't fatal here */ 3560 if (ret == -ENOSPC) 3561 ret = 1; 3562 goto out; 3563 } 3564 3565 if (check_sibling_keys(left, right)) { 3566 ret = -EUCLEAN; 3567 btrfs_abort_transaction(trans, ret); 3568 goto out; 3569 } 3570 return __push_leaf_left(trans, path, min_data_size, empty, left, 3571 free_space, right_nritems, max_slot); 3572 out: 3573 btrfs_tree_unlock(left); 3574 free_extent_buffer(left); 3575 return ret; 3576 } 3577 3578 /* 3579 * split the path's leaf in two, making sure there is at least data_size 3580 * available for the resulting leaf level of the path. 3581 */ 3582 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3583 struct btrfs_path *path, 3584 struct extent_buffer *l, 3585 struct extent_buffer *right, 3586 int slot, int mid, int nritems) 3587 { 3588 struct btrfs_fs_info *fs_info = trans->fs_info; 3589 int data_copy_size; 3590 int rt_data_off; 3591 int i; 3592 struct btrfs_disk_key disk_key; 3593 struct btrfs_map_token token; 3594 3595 nritems = nritems - mid; 3596 btrfs_set_header_nritems(right, nritems); 3597 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3598 3599 copy_leaf_items(right, l, 0, mid, nritems); 3600 3601 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3602 leaf_data_end(l), data_copy_size); 3603 3604 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3605 3606 btrfs_init_map_token(&token, right); 3607 for (i = 0; i < nritems; i++) { 3608 u32 ioff; 3609 3610 ioff = btrfs_token_item_offset(&token, i); 3611 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3612 } 3613 3614 btrfs_set_header_nritems(l, mid); 3615 btrfs_item_key(right, &disk_key, 0); 3616 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3617 3618 btrfs_mark_buffer_dirty(right); 3619 btrfs_mark_buffer_dirty(l); 3620 BUG_ON(path->slots[0] != slot); 3621 3622 if (mid <= slot) { 3623 btrfs_tree_unlock(path->nodes[0]); 3624 free_extent_buffer(path->nodes[0]); 3625 path->nodes[0] = right; 3626 path->slots[0] -= mid; 3627 path->slots[1] += 1; 3628 } else { 3629 btrfs_tree_unlock(right); 3630 free_extent_buffer(right); 3631 } 3632 3633 BUG_ON(path->slots[0] < 0); 3634 } 3635 3636 /* 3637 * double splits happen when we need to insert a big item in the middle 3638 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3639 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3640 * A B C 3641 * 3642 * We avoid this by trying to push the items on either side of our target 3643 * into the adjacent leaves. If all goes well we can avoid the double split 3644 * completely. 3645 */ 3646 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3647 struct btrfs_root *root, 3648 struct btrfs_path *path, 3649 int data_size) 3650 { 3651 int ret; 3652 int progress = 0; 3653 int slot; 3654 u32 nritems; 3655 int space_needed = data_size; 3656 3657 slot = path->slots[0]; 3658 if (slot < btrfs_header_nritems(path->nodes[0])) 3659 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3660 3661 /* 3662 * try to push all the items after our slot into the 3663 * right leaf 3664 */ 3665 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3666 if (ret < 0) 3667 return ret; 3668 3669 if (ret == 0) 3670 progress++; 3671 3672 nritems = btrfs_header_nritems(path->nodes[0]); 3673 /* 3674 * our goal is to get our slot at the start or end of a leaf. If 3675 * we've done so we're done 3676 */ 3677 if (path->slots[0] == 0 || path->slots[0] == nritems) 3678 return 0; 3679 3680 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3681 return 0; 3682 3683 /* try to push all the items before our slot into the next leaf */ 3684 slot = path->slots[0]; 3685 space_needed = data_size; 3686 if (slot > 0) 3687 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3688 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3689 if (ret < 0) 3690 return ret; 3691 3692 if (ret == 0) 3693 progress++; 3694 3695 if (progress) 3696 return 0; 3697 return 1; 3698 } 3699 3700 /* 3701 * split the path's leaf in two, making sure there is at least data_size 3702 * available for the resulting leaf level of the path. 3703 * 3704 * returns 0 if all went well and < 0 on failure. 3705 */ 3706 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3707 struct btrfs_root *root, 3708 const struct btrfs_key *ins_key, 3709 struct btrfs_path *path, int data_size, 3710 int extend) 3711 { 3712 struct btrfs_disk_key disk_key; 3713 struct extent_buffer *l; 3714 u32 nritems; 3715 int mid; 3716 int slot; 3717 struct extent_buffer *right; 3718 struct btrfs_fs_info *fs_info = root->fs_info; 3719 int ret = 0; 3720 int wret; 3721 int split; 3722 int num_doubles = 0; 3723 int tried_avoid_double = 0; 3724 3725 l = path->nodes[0]; 3726 slot = path->slots[0]; 3727 if (extend && data_size + btrfs_item_size(l, slot) + 3728 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3729 return -EOVERFLOW; 3730 3731 /* first try to make some room by pushing left and right */ 3732 if (data_size && path->nodes[1]) { 3733 int space_needed = data_size; 3734 3735 if (slot < btrfs_header_nritems(l)) 3736 space_needed -= btrfs_leaf_free_space(l); 3737 3738 wret = push_leaf_right(trans, root, path, space_needed, 3739 space_needed, 0, 0); 3740 if (wret < 0) 3741 return wret; 3742 if (wret) { 3743 space_needed = data_size; 3744 if (slot > 0) 3745 space_needed -= btrfs_leaf_free_space(l); 3746 wret = push_leaf_left(trans, root, path, space_needed, 3747 space_needed, 0, (u32)-1); 3748 if (wret < 0) 3749 return wret; 3750 } 3751 l = path->nodes[0]; 3752 3753 /* did the pushes work? */ 3754 if (btrfs_leaf_free_space(l) >= data_size) 3755 return 0; 3756 } 3757 3758 if (!path->nodes[1]) { 3759 ret = insert_new_root(trans, root, path, 1); 3760 if (ret) 3761 return ret; 3762 } 3763 again: 3764 split = 1; 3765 l = path->nodes[0]; 3766 slot = path->slots[0]; 3767 nritems = btrfs_header_nritems(l); 3768 mid = (nritems + 1) / 2; 3769 3770 if (mid <= slot) { 3771 if (nritems == 1 || 3772 leaf_space_used(l, mid, nritems - mid) + data_size > 3773 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3774 if (slot >= nritems) { 3775 split = 0; 3776 } else { 3777 mid = slot; 3778 if (mid != nritems && 3779 leaf_space_used(l, mid, nritems - mid) + 3780 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3781 if (data_size && !tried_avoid_double) 3782 goto push_for_double; 3783 split = 2; 3784 } 3785 } 3786 } 3787 } else { 3788 if (leaf_space_used(l, 0, mid) + data_size > 3789 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3790 if (!extend && data_size && slot == 0) { 3791 split = 0; 3792 } else if ((extend || !data_size) && slot == 0) { 3793 mid = 1; 3794 } else { 3795 mid = slot; 3796 if (mid != nritems && 3797 leaf_space_used(l, mid, nritems - mid) + 3798 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3799 if (data_size && !tried_avoid_double) 3800 goto push_for_double; 3801 split = 2; 3802 } 3803 } 3804 } 3805 } 3806 3807 if (split == 0) 3808 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3809 else 3810 btrfs_item_key(l, &disk_key, mid); 3811 3812 /* 3813 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3814 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3815 * subclasses, which is 8 at the time of this patch, and we've maxed it 3816 * out. In the future we could add a 3817 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3818 * use BTRFS_NESTING_NEW_ROOT. 3819 */ 3820 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3821 &disk_key, 0, l->start, 0, 3822 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3823 BTRFS_NESTING_SPLIT); 3824 if (IS_ERR(right)) 3825 return PTR_ERR(right); 3826 3827 root_add_used(root, fs_info->nodesize); 3828 3829 if (split == 0) { 3830 if (mid <= slot) { 3831 btrfs_set_header_nritems(right, 0); 3832 insert_ptr(trans, path, &disk_key, 3833 right->start, path->slots[1] + 1, 1); 3834 btrfs_tree_unlock(path->nodes[0]); 3835 free_extent_buffer(path->nodes[0]); 3836 path->nodes[0] = right; 3837 path->slots[0] = 0; 3838 path->slots[1] += 1; 3839 } else { 3840 btrfs_set_header_nritems(right, 0); 3841 insert_ptr(trans, path, &disk_key, 3842 right->start, path->slots[1], 1); 3843 btrfs_tree_unlock(path->nodes[0]); 3844 free_extent_buffer(path->nodes[0]); 3845 path->nodes[0] = right; 3846 path->slots[0] = 0; 3847 if (path->slots[1] == 0) 3848 fixup_low_keys(path, &disk_key, 1); 3849 } 3850 /* 3851 * We create a new leaf 'right' for the required ins_len and 3852 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3853 * the content of ins_len to 'right'. 3854 */ 3855 return ret; 3856 } 3857 3858 copy_for_split(trans, path, l, right, slot, mid, nritems); 3859 3860 if (split == 2) { 3861 BUG_ON(num_doubles != 0); 3862 num_doubles++; 3863 goto again; 3864 } 3865 3866 return 0; 3867 3868 push_for_double: 3869 push_for_double_split(trans, root, path, data_size); 3870 tried_avoid_double = 1; 3871 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3872 return 0; 3873 goto again; 3874 } 3875 3876 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3877 struct btrfs_root *root, 3878 struct btrfs_path *path, int ins_len) 3879 { 3880 struct btrfs_key key; 3881 struct extent_buffer *leaf; 3882 struct btrfs_file_extent_item *fi; 3883 u64 extent_len = 0; 3884 u32 item_size; 3885 int ret; 3886 3887 leaf = path->nodes[0]; 3888 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3889 3890 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3891 key.type != BTRFS_EXTENT_CSUM_KEY); 3892 3893 if (btrfs_leaf_free_space(leaf) >= ins_len) 3894 return 0; 3895 3896 item_size = btrfs_item_size(leaf, path->slots[0]); 3897 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3898 fi = btrfs_item_ptr(leaf, path->slots[0], 3899 struct btrfs_file_extent_item); 3900 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3901 } 3902 btrfs_release_path(path); 3903 3904 path->keep_locks = 1; 3905 path->search_for_split = 1; 3906 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3907 path->search_for_split = 0; 3908 if (ret > 0) 3909 ret = -EAGAIN; 3910 if (ret < 0) 3911 goto err; 3912 3913 ret = -EAGAIN; 3914 leaf = path->nodes[0]; 3915 /* if our item isn't there, return now */ 3916 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3917 goto err; 3918 3919 /* the leaf has changed, it now has room. return now */ 3920 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3921 goto err; 3922 3923 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3924 fi = btrfs_item_ptr(leaf, path->slots[0], 3925 struct btrfs_file_extent_item); 3926 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3927 goto err; 3928 } 3929 3930 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3931 if (ret) 3932 goto err; 3933 3934 path->keep_locks = 0; 3935 btrfs_unlock_up_safe(path, 1); 3936 return 0; 3937 err: 3938 path->keep_locks = 0; 3939 return ret; 3940 } 3941 3942 static noinline int split_item(struct btrfs_path *path, 3943 const struct btrfs_key *new_key, 3944 unsigned long split_offset) 3945 { 3946 struct extent_buffer *leaf; 3947 int orig_slot, slot; 3948 char *buf; 3949 u32 nritems; 3950 u32 item_size; 3951 u32 orig_offset; 3952 struct btrfs_disk_key disk_key; 3953 3954 leaf = path->nodes[0]; 3955 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3956 3957 orig_slot = path->slots[0]; 3958 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3959 item_size = btrfs_item_size(leaf, path->slots[0]); 3960 3961 buf = kmalloc(item_size, GFP_NOFS); 3962 if (!buf) 3963 return -ENOMEM; 3964 3965 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3966 path->slots[0]), item_size); 3967 3968 slot = path->slots[0] + 1; 3969 nritems = btrfs_header_nritems(leaf); 3970 if (slot != nritems) { 3971 /* shift the items */ 3972 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3973 } 3974 3975 btrfs_cpu_key_to_disk(&disk_key, new_key); 3976 btrfs_set_item_key(leaf, &disk_key, slot); 3977 3978 btrfs_set_item_offset(leaf, slot, orig_offset); 3979 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3980 3981 btrfs_set_item_offset(leaf, orig_slot, 3982 orig_offset + item_size - split_offset); 3983 btrfs_set_item_size(leaf, orig_slot, split_offset); 3984 3985 btrfs_set_header_nritems(leaf, nritems + 1); 3986 3987 /* write the data for the start of the original item */ 3988 write_extent_buffer(leaf, buf, 3989 btrfs_item_ptr_offset(leaf, path->slots[0]), 3990 split_offset); 3991 3992 /* write the data for the new item */ 3993 write_extent_buffer(leaf, buf + split_offset, 3994 btrfs_item_ptr_offset(leaf, slot), 3995 item_size - split_offset); 3996 btrfs_mark_buffer_dirty(leaf); 3997 3998 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3999 kfree(buf); 4000 return 0; 4001 } 4002 4003 /* 4004 * This function splits a single item into two items, 4005 * giving 'new_key' to the new item and splitting the 4006 * old one at split_offset (from the start of the item). 4007 * 4008 * The path may be released by this operation. After 4009 * the split, the path is pointing to the old item. The 4010 * new item is going to be in the same node as the old one. 4011 * 4012 * Note, the item being split must be smaller enough to live alone on 4013 * a tree block with room for one extra struct btrfs_item 4014 * 4015 * This allows us to split the item in place, keeping a lock on the 4016 * leaf the entire time. 4017 */ 4018 int btrfs_split_item(struct btrfs_trans_handle *trans, 4019 struct btrfs_root *root, 4020 struct btrfs_path *path, 4021 const struct btrfs_key *new_key, 4022 unsigned long split_offset) 4023 { 4024 int ret; 4025 ret = setup_leaf_for_split(trans, root, path, 4026 sizeof(struct btrfs_item)); 4027 if (ret) 4028 return ret; 4029 4030 ret = split_item(path, new_key, split_offset); 4031 return ret; 4032 } 4033 4034 /* 4035 * make the item pointed to by the path smaller. new_size indicates 4036 * how small to make it, and from_end tells us if we just chop bytes 4037 * off the end of the item or if we shift the item to chop bytes off 4038 * the front. 4039 */ 4040 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 4041 { 4042 int slot; 4043 struct extent_buffer *leaf; 4044 u32 nritems; 4045 unsigned int data_end; 4046 unsigned int old_data_start; 4047 unsigned int old_size; 4048 unsigned int size_diff; 4049 int i; 4050 struct btrfs_map_token token; 4051 4052 leaf = path->nodes[0]; 4053 slot = path->slots[0]; 4054 4055 old_size = btrfs_item_size(leaf, slot); 4056 if (old_size == new_size) 4057 return; 4058 4059 nritems = btrfs_header_nritems(leaf); 4060 data_end = leaf_data_end(leaf); 4061 4062 old_data_start = btrfs_item_offset(leaf, slot); 4063 4064 size_diff = old_size - new_size; 4065 4066 BUG_ON(slot < 0); 4067 BUG_ON(slot >= nritems); 4068 4069 /* 4070 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4071 */ 4072 /* first correct the data pointers */ 4073 btrfs_init_map_token(&token, leaf); 4074 for (i = slot; i < nritems; i++) { 4075 u32 ioff; 4076 4077 ioff = btrfs_token_item_offset(&token, i); 4078 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4079 } 4080 4081 /* shift the data */ 4082 if (from_end) { 4083 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4084 old_data_start + new_size - data_end); 4085 } else { 4086 struct btrfs_disk_key disk_key; 4087 u64 offset; 4088 4089 btrfs_item_key(leaf, &disk_key, slot); 4090 4091 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4092 unsigned long ptr; 4093 struct btrfs_file_extent_item *fi; 4094 4095 fi = btrfs_item_ptr(leaf, slot, 4096 struct btrfs_file_extent_item); 4097 fi = (struct btrfs_file_extent_item *)( 4098 (unsigned long)fi - size_diff); 4099 4100 if (btrfs_file_extent_type(leaf, fi) == 4101 BTRFS_FILE_EXTENT_INLINE) { 4102 ptr = btrfs_item_ptr_offset(leaf, slot); 4103 memmove_extent_buffer(leaf, ptr, 4104 (unsigned long)fi, 4105 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4106 } 4107 } 4108 4109 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4110 old_data_start - data_end); 4111 4112 offset = btrfs_disk_key_offset(&disk_key); 4113 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4114 btrfs_set_item_key(leaf, &disk_key, slot); 4115 if (slot == 0) 4116 fixup_low_keys(path, &disk_key, 1); 4117 } 4118 4119 btrfs_set_item_size(leaf, slot, new_size); 4120 btrfs_mark_buffer_dirty(leaf); 4121 4122 if (btrfs_leaf_free_space(leaf) < 0) { 4123 btrfs_print_leaf(leaf); 4124 BUG(); 4125 } 4126 } 4127 4128 /* 4129 * make the item pointed to by the path bigger, data_size is the added size. 4130 */ 4131 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4132 { 4133 int slot; 4134 struct extent_buffer *leaf; 4135 u32 nritems; 4136 unsigned int data_end; 4137 unsigned int old_data; 4138 unsigned int old_size; 4139 int i; 4140 struct btrfs_map_token token; 4141 4142 leaf = path->nodes[0]; 4143 4144 nritems = btrfs_header_nritems(leaf); 4145 data_end = leaf_data_end(leaf); 4146 4147 if (btrfs_leaf_free_space(leaf) < data_size) { 4148 btrfs_print_leaf(leaf); 4149 BUG(); 4150 } 4151 slot = path->slots[0]; 4152 old_data = btrfs_item_data_end(leaf, slot); 4153 4154 BUG_ON(slot < 0); 4155 if (slot >= nritems) { 4156 btrfs_print_leaf(leaf); 4157 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4158 slot, nritems); 4159 BUG(); 4160 } 4161 4162 /* 4163 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4164 */ 4165 /* first correct the data pointers */ 4166 btrfs_init_map_token(&token, leaf); 4167 for (i = slot; i < nritems; i++) { 4168 u32 ioff; 4169 4170 ioff = btrfs_token_item_offset(&token, i); 4171 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4172 } 4173 4174 /* shift the data */ 4175 memmove_leaf_data(leaf, data_end - data_size, data_end, 4176 old_data - data_end); 4177 4178 data_end = old_data; 4179 old_size = btrfs_item_size(leaf, slot); 4180 btrfs_set_item_size(leaf, slot, old_size + data_size); 4181 btrfs_mark_buffer_dirty(leaf); 4182 4183 if (btrfs_leaf_free_space(leaf) < 0) { 4184 btrfs_print_leaf(leaf); 4185 BUG(); 4186 } 4187 } 4188 4189 /* 4190 * Make space in the node before inserting one or more items. 4191 * 4192 * @root: root we are inserting items to 4193 * @path: points to the leaf/slot where we are going to insert new items 4194 * @batch: information about the batch of items to insert 4195 * 4196 * Main purpose is to save stack depth by doing the bulk of the work in a 4197 * function that doesn't call btrfs_search_slot 4198 */ 4199 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4200 const struct btrfs_item_batch *batch) 4201 { 4202 struct btrfs_fs_info *fs_info = root->fs_info; 4203 int i; 4204 u32 nritems; 4205 unsigned int data_end; 4206 struct btrfs_disk_key disk_key; 4207 struct extent_buffer *leaf; 4208 int slot; 4209 struct btrfs_map_token token; 4210 u32 total_size; 4211 4212 /* 4213 * Before anything else, update keys in the parent and other ancestors 4214 * if needed, then release the write locks on them, so that other tasks 4215 * can use them while we modify the leaf. 4216 */ 4217 if (path->slots[0] == 0) { 4218 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4219 fixup_low_keys(path, &disk_key, 1); 4220 } 4221 btrfs_unlock_up_safe(path, 1); 4222 4223 leaf = path->nodes[0]; 4224 slot = path->slots[0]; 4225 4226 nritems = btrfs_header_nritems(leaf); 4227 data_end = leaf_data_end(leaf); 4228 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4229 4230 if (btrfs_leaf_free_space(leaf) < total_size) { 4231 btrfs_print_leaf(leaf); 4232 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4233 total_size, btrfs_leaf_free_space(leaf)); 4234 BUG(); 4235 } 4236 4237 btrfs_init_map_token(&token, leaf); 4238 if (slot != nritems) { 4239 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4240 4241 if (old_data < data_end) { 4242 btrfs_print_leaf(leaf); 4243 btrfs_crit(fs_info, 4244 "item at slot %d with data offset %u beyond data end of leaf %u", 4245 slot, old_data, data_end); 4246 BUG(); 4247 } 4248 /* 4249 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4250 */ 4251 /* first correct the data pointers */ 4252 for (i = slot; i < nritems; i++) { 4253 u32 ioff; 4254 4255 ioff = btrfs_token_item_offset(&token, i); 4256 btrfs_set_token_item_offset(&token, i, 4257 ioff - batch->total_data_size); 4258 } 4259 /* shift the items */ 4260 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4261 4262 /* shift the data */ 4263 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4264 data_end, old_data - data_end); 4265 data_end = old_data; 4266 } 4267 4268 /* setup the item for the new data */ 4269 for (i = 0; i < batch->nr; i++) { 4270 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4271 btrfs_set_item_key(leaf, &disk_key, slot + i); 4272 data_end -= batch->data_sizes[i]; 4273 btrfs_set_token_item_offset(&token, slot + i, data_end); 4274 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4275 } 4276 4277 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4278 btrfs_mark_buffer_dirty(leaf); 4279 4280 if (btrfs_leaf_free_space(leaf) < 0) { 4281 btrfs_print_leaf(leaf); 4282 BUG(); 4283 } 4284 } 4285 4286 /* 4287 * Insert a new item into a leaf. 4288 * 4289 * @root: The root of the btree. 4290 * @path: A path pointing to the target leaf and slot. 4291 * @key: The key of the new item. 4292 * @data_size: The size of the data associated with the new key. 4293 */ 4294 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4295 struct btrfs_path *path, 4296 const struct btrfs_key *key, 4297 u32 data_size) 4298 { 4299 struct btrfs_item_batch batch; 4300 4301 batch.keys = key; 4302 batch.data_sizes = &data_size; 4303 batch.total_data_size = data_size; 4304 batch.nr = 1; 4305 4306 setup_items_for_insert(root, path, &batch); 4307 } 4308 4309 /* 4310 * Given a key and some data, insert items into the tree. 4311 * This does all the path init required, making room in the tree if needed. 4312 */ 4313 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4314 struct btrfs_root *root, 4315 struct btrfs_path *path, 4316 const struct btrfs_item_batch *batch) 4317 { 4318 int ret = 0; 4319 int slot; 4320 u32 total_size; 4321 4322 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4323 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4324 if (ret == 0) 4325 return -EEXIST; 4326 if (ret < 0) 4327 return ret; 4328 4329 slot = path->slots[0]; 4330 BUG_ON(slot < 0); 4331 4332 setup_items_for_insert(root, path, batch); 4333 return 0; 4334 } 4335 4336 /* 4337 * Given a key and some data, insert an item into the tree. 4338 * This does all the path init required, making room in the tree if needed. 4339 */ 4340 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4341 const struct btrfs_key *cpu_key, void *data, 4342 u32 data_size) 4343 { 4344 int ret = 0; 4345 struct btrfs_path *path; 4346 struct extent_buffer *leaf; 4347 unsigned long ptr; 4348 4349 path = btrfs_alloc_path(); 4350 if (!path) 4351 return -ENOMEM; 4352 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4353 if (!ret) { 4354 leaf = path->nodes[0]; 4355 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4356 write_extent_buffer(leaf, data, ptr, data_size); 4357 btrfs_mark_buffer_dirty(leaf); 4358 } 4359 btrfs_free_path(path); 4360 return ret; 4361 } 4362 4363 /* 4364 * This function duplicates an item, giving 'new_key' to the new item. 4365 * It guarantees both items live in the same tree leaf and the new item is 4366 * contiguous with the original item. 4367 * 4368 * This allows us to split a file extent in place, keeping a lock on the leaf 4369 * the entire time. 4370 */ 4371 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4372 struct btrfs_root *root, 4373 struct btrfs_path *path, 4374 const struct btrfs_key *new_key) 4375 { 4376 struct extent_buffer *leaf; 4377 int ret; 4378 u32 item_size; 4379 4380 leaf = path->nodes[0]; 4381 item_size = btrfs_item_size(leaf, path->slots[0]); 4382 ret = setup_leaf_for_split(trans, root, path, 4383 item_size + sizeof(struct btrfs_item)); 4384 if (ret) 4385 return ret; 4386 4387 path->slots[0]++; 4388 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4389 leaf = path->nodes[0]; 4390 memcpy_extent_buffer(leaf, 4391 btrfs_item_ptr_offset(leaf, path->slots[0]), 4392 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4393 item_size); 4394 return 0; 4395 } 4396 4397 /* 4398 * delete the pointer from a given node. 4399 * 4400 * the tree should have been previously balanced so the deletion does not 4401 * empty a node. 4402 * 4403 * This is exported for use inside btrfs-progs, don't un-export it. 4404 */ 4405 void btrfs_del_ptr(struct btrfs_root *root, struct btrfs_path *path, int level, 4406 int slot) 4407 { 4408 struct extent_buffer *parent = path->nodes[level]; 4409 u32 nritems; 4410 int ret; 4411 4412 nritems = btrfs_header_nritems(parent); 4413 if (slot != nritems - 1) { 4414 if (level) { 4415 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4416 slot + 1, nritems - slot - 1); 4417 BUG_ON(ret < 0); 4418 } 4419 memmove_extent_buffer(parent, 4420 btrfs_node_key_ptr_offset(parent, slot), 4421 btrfs_node_key_ptr_offset(parent, slot + 1), 4422 sizeof(struct btrfs_key_ptr) * 4423 (nritems - slot - 1)); 4424 } else if (level) { 4425 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4426 BTRFS_MOD_LOG_KEY_REMOVE); 4427 BUG_ON(ret < 0); 4428 } 4429 4430 nritems--; 4431 btrfs_set_header_nritems(parent, nritems); 4432 if (nritems == 0 && parent == root->node) { 4433 BUG_ON(btrfs_header_level(root->node) != 1); 4434 /* just turn the root into a leaf and break */ 4435 btrfs_set_header_level(root->node, 0); 4436 } else if (slot == 0) { 4437 struct btrfs_disk_key disk_key; 4438 4439 btrfs_node_key(parent, &disk_key, 0); 4440 fixup_low_keys(path, &disk_key, level + 1); 4441 } 4442 btrfs_mark_buffer_dirty(parent); 4443 } 4444 4445 /* 4446 * a helper function to delete the leaf pointed to by path->slots[1] and 4447 * path->nodes[1]. 4448 * 4449 * This deletes the pointer in path->nodes[1] and frees the leaf 4450 * block extent. zero is returned if it all worked out, < 0 otherwise. 4451 * 4452 * The path must have already been setup for deleting the leaf, including 4453 * all the proper balancing. path->nodes[1] must be locked. 4454 */ 4455 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4456 struct btrfs_root *root, 4457 struct btrfs_path *path, 4458 struct extent_buffer *leaf) 4459 { 4460 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4461 btrfs_del_ptr(root, path, 1, path->slots[1]); 4462 4463 /* 4464 * btrfs_free_extent is expensive, we want to make sure we 4465 * aren't holding any locks when we call it 4466 */ 4467 btrfs_unlock_up_safe(path, 0); 4468 4469 root_sub_used(root, leaf->len); 4470 4471 atomic_inc(&leaf->refs); 4472 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4473 free_extent_buffer_stale(leaf); 4474 } 4475 /* 4476 * delete the item at the leaf level in path. If that empties 4477 * the leaf, remove it from the tree 4478 */ 4479 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4480 struct btrfs_path *path, int slot, int nr) 4481 { 4482 struct btrfs_fs_info *fs_info = root->fs_info; 4483 struct extent_buffer *leaf; 4484 int ret = 0; 4485 int wret; 4486 u32 nritems; 4487 4488 leaf = path->nodes[0]; 4489 nritems = btrfs_header_nritems(leaf); 4490 4491 if (slot + nr != nritems) { 4492 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4493 const int data_end = leaf_data_end(leaf); 4494 struct btrfs_map_token token; 4495 u32 dsize = 0; 4496 int i; 4497 4498 for (i = 0; i < nr; i++) 4499 dsize += btrfs_item_size(leaf, slot + i); 4500 4501 memmove_leaf_data(leaf, data_end + dsize, data_end, 4502 last_off - data_end); 4503 4504 btrfs_init_map_token(&token, leaf); 4505 for (i = slot + nr; i < nritems; i++) { 4506 u32 ioff; 4507 4508 ioff = btrfs_token_item_offset(&token, i); 4509 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4510 } 4511 4512 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4513 } 4514 btrfs_set_header_nritems(leaf, nritems - nr); 4515 nritems -= nr; 4516 4517 /* delete the leaf if we've emptied it */ 4518 if (nritems == 0) { 4519 if (leaf == root->node) { 4520 btrfs_set_header_level(leaf, 0); 4521 } else { 4522 btrfs_clear_buffer_dirty(trans, leaf); 4523 btrfs_del_leaf(trans, root, path, leaf); 4524 } 4525 } else { 4526 int used = leaf_space_used(leaf, 0, nritems); 4527 if (slot == 0) { 4528 struct btrfs_disk_key disk_key; 4529 4530 btrfs_item_key(leaf, &disk_key, 0); 4531 fixup_low_keys(path, &disk_key, 1); 4532 } 4533 4534 /* 4535 * Try to delete the leaf if it is mostly empty. We do this by 4536 * trying to move all its items into its left and right neighbours. 4537 * If we can't move all the items, then we don't delete it - it's 4538 * not ideal, but future insertions might fill the leaf with more 4539 * items, or items from other leaves might be moved later into our 4540 * leaf due to deletions on those leaves. 4541 */ 4542 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4543 u32 min_push_space; 4544 4545 /* push_leaf_left fixes the path. 4546 * make sure the path still points to our leaf 4547 * for possible call to btrfs_del_ptr below 4548 */ 4549 slot = path->slots[1]; 4550 atomic_inc(&leaf->refs); 4551 /* 4552 * We want to be able to at least push one item to the 4553 * left neighbour leaf, and that's the first item. 4554 */ 4555 min_push_space = sizeof(struct btrfs_item) + 4556 btrfs_item_size(leaf, 0); 4557 wret = push_leaf_left(trans, root, path, 0, 4558 min_push_space, 1, (u32)-1); 4559 if (wret < 0 && wret != -ENOSPC) 4560 ret = wret; 4561 4562 if (path->nodes[0] == leaf && 4563 btrfs_header_nritems(leaf)) { 4564 /* 4565 * If we were not able to push all items from our 4566 * leaf to its left neighbour, then attempt to 4567 * either push all the remaining items to the 4568 * right neighbour or none. There's no advantage 4569 * in pushing only some items, instead of all, as 4570 * it's pointless to end up with a leaf having 4571 * too few items while the neighbours can be full 4572 * or nearly full. 4573 */ 4574 nritems = btrfs_header_nritems(leaf); 4575 min_push_space = leaf_space_used(leaf, 0, nritems); 4576 wret = push_leaf_right(trans, root, path, 0, 4577 min_push_space, 1, 0); 4578 if (wret < 0 && wret != -ENOSPC) 4579 ret = wret; 4580 } 4581 4582 if (btrfs_header_nritems(leaf) == 0) { 4583 path->slots[1] = slot; 4584 btrfs_del_leaf(trans, root, path, leaf); 4585 free_extent_buffer(leaf); 4586 ret = 0; 4587 } else { 4588 /* if we're still in the path, make sure 4589 * we're dirty. Otherwise, one of the 4590 * push_leaf functions must have already 4591 * dirtied this buffer 4592 */ 4593 if (path->nodes[0] == leaf) 4594 btrfs_mark_buffer_dirty(leaf); 4595 free_extent_buffer(leaf); 4596 } 4597 } else { 4598 btrfs_mark_buffer_dirty(leaf); 4599 } 4600 } 4601 return ret; 4602 } 4603 4604 /* 4605 * A helper function to walk down the tree starting at min_key, and looking 4606 * for nodes or leaves that are have a minimum transaction id. 4607 * This is used by the btree defrag code, and tree logging 4608 * 4609 * This does not cow, but it does stuff the starting key it finds back 4610 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4611 * key and get a writable path. 4612 * 4613 * This honors path->lowest_level to prevent descent past a given level 4614 * of the tree. 4615 * 4616 * min_trans indicates the oldest transaction that you are interested 4617 * in walking through. Any nodes or leaves older than min_trans are 4618 * skipped over (without reading them). 4619 * 4620 * returns zero if something useful was found, < 0 on error and 1 if there 4621 * was nothing in the tree that matched the search criteria. 4622 */ 4623 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4624 struct btrfs_path *path, 4625 u64 min_trans) 4626 { 4627 struct extent_buffer *cur; 4628 struct btrfs_key found_key; 4629 int slot; 4630 int sret; 4631 u32 nritems; 4632 int level; 4633 int ret = 1; 4634 int keep_locks = path->keep_locks; 4635 4636 ASSERT(!path->nowait); 4637 path->keep_locks = 1; 4638 again: 4639 cur = btrfs_read_lock_root_node(root); 4640 level = btrfs_header_level(cur); 4641 WARN_ON(path->nodes[level]); 4642 path->nodes[level] = cur; 4643 path->locks[level] = BTRFS_READ_LOCK; 4644 4645 if (btrfs_header_generation(cur) < min_trans) { 4646 ret = 1; 4647 goto out; 4648 } 4649 while (1) { 4650 nritems = btrfs_header_nritems(cur); 4651 level = btrfs_header_level(cur); 4652 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4653 if (sret < 0) { 4654 ret = sret; 4655 goto out; 4656 } 4657 4658 /* at the lowest level, we're done, setup the path and exit */ 4659 if (level == path->lowest_level) { 4660 if (slot >= nritems) 4661 goto find_next_key; 4662 ret = 0; 4663 path->slots[level] = slot; 4664 btrfs_item_key_to_cpu(cur, &found_key, slot); 4665 goto out; 4666 } 4667 if (sret && slot > 0) 4668 slot--; 4669 /* 4670 * check this node pointer against the min_trans parameters. 4671 * If it is too old, skip to the next one. 4672 */ 4673 while (slot < nritems) { 4674 u64 gen; 4675 4676 gen = btrfs_node_ptr_generation(cur, slot); 4677 if (gen < min_trans) { 4678 slot++; 4679 continue; 4680 } 4681 break; 4682 } 4683 find_next_key: 4684 /* 4685 * we didn't find a candidate key in this node, walk forward 4686 * and find another one 4687 */ 4688 if (slot >= nritems) { 4689 path->slots[level] = slot; 4690 sret = btrfs_find_next_key(root, path, min_key, level, 4691 min_trans); 4692 if (sret == 0) { 4693 btrfs_release_path(path); 4694 goto again; 4695 } else { 4696 goto out; 4697 } 4698 } 4699 /* save our key for returning back */ 4700 btrfs_node_key_to_cpu(cur, &found_key, slot); 4701 path->slots[level] = slot; 4702 if (level == path->lowest_level) { 4703 ret = 0; 4704 goto out; 4705 } 4706 cur = btrfs_read_node_slot(cur, slot); 4707 if (IS_ERR(cur)) { 4708 ret = PTR_ERR(cur); 4709 goto out; 4710 } 4711 4712 btrfs_tree_read_lock(cur); 4713 4714 path->locks[level - 1] = BTRFS_READ_LOCK; 4715 path->nodes[level - 1] = cur; 4716 unlock_up(path, level, 1, 0, NULL); 4717 } 4718 out: 4719 path->keep_locks = keep_locks; 4720 if (ret == 0) { 4721 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4722 memcpy(min_key, &found_key, sizeof(found_key)); 4723 } 4724 return ret; 4725 } 4726 4727 /* 4728 * this is similar to btrfs_next_leaf, but does not try to preserve 4729 * and fixup the path. It looks for and returns the next key in the 4730 * tree based on the current path and the min_trans parameters. 4731 * 4732 * 0 is returned if another key is found, < 0 if there are any errors 4733 * and 1 is returned if there are no higher keys in the tree 4734 * 4735 * path->keep_locks should be set to 1 on the search made before 4736 * calling this function. 4737 */ 4738 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4739 struct btrfs_key *key, int level, u64 min_trans) 4740 { 4741 int slot; 4742 struct extent_buffer *c; 4743 4744 WARN_ON(!path->keep_locks && !path->skip_locking); 4745 while (level < BTRFS_MAX_LEVEL) { 4746 if (!path->nodes[level]) 4747 return 1; 4748 4749 slot = path->slots[level] + 1; 4750 c = path->nodes[level]; 4751 next: 4752 if (slot >= btrfs_header_nritems(c)) { 4753 int ret; 4754 int orig_lowest; 4755 struct btrfs_key cur_key; 4756 if (level + 1 >= BTRFS_MAX_LEVEL || 4757 !path->nodes[level + 1]) 4758 return 1; 4759 4760 if (path->locks[level + 1] || path->skip_locking) { 4761 level++; 4762 continue; 4763 } 4764 4765 slot = btrfs_header_nritems(c) - 1; 4766 if (level == 0) 4767 btrfs_item_key_to_cpu(c, &cur_key, slot); 4768 else 4769 btrfs_node_key_to_cpu(c, &cur_key, slot); 4770 4771 orig_lowest = path->lowest_level; 4772 btrfs_release_path(path); 4773 path->lowest_level = level; 4774 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4775 0, 0); 4776 path->lowest_level = orig_lowest; 4777 if (ret < 0) 4778 return ret; 4779 4780 c = path->nodes[level]; 4781 slot = path->slots[level]; 4782 if (ret == 0) 4783 slot++; 4784 goto next; 4785 } 4786 4787 if (level == 0) 4788 btrfs_item_key_to_cpu(c, key, slot); 4789 else { 4790 u64 gen = btrfs_node_ptr_generation(c, slot); 4791 4792 if (gen < min_trans) { 4793 slot++; 4794 goto next; 4795 } 4796 btrfs_node_key_to_cpu(c, key, slot); 4797 } 4798 return 0; 4799 } 4800 return 1; 4801 } 4802 4803 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4804 u64 time_seq) 4805 { 4806 int slot; 4807 int level; 4808 struct extent_buffer *c; 4809 struct extent_buffer *next; 4810 struct btrfs_fs_info *fs_info = root->fs_info; 4811 struct btrfs_key key; 4812 bool need_commit_sem = false; 4813 u32 nritems; 4814 int ret; 4815 int i; 4816 4817 /* 4818 * The nowait semantics are used only for write paths, where we don't 4819 * use the tree mod log and sequence numbers. 4820 */ 4821 if (time_seq) 4822 ASSERT(!path->nowait); 4823 4824 nritems = btrfs_header_nritems(path->nodes[0]); 4825 if (nritems == 0) 4826 return 1; 4827 4828 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4829 again: 4830 level = 1; 4831 next = NULL; 4832 btrfs_release_path(path); 4833 4834 path->keep_locks = 1; 4835 4836 if (time_seq) { 4837 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4838 } else { 4839 if (path->need_commit_sem) { 4840 path->need_commit_sem = 0; 4841 need_commit_sem = true; 4842 if (path->nowait) { 4843 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4844 ret = -EAGAIN; 4845 goto done; 4846 } 4847 } else { 4848 down_read(&fs_info->commit_root_sem); 4849 } 4850 } 4851 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4852 } 4853 path->keep_locks = 0; 4854 4855 if (ret < 0) 4856 goto done; 4857 4858 nritems = btrfs_header_nritems(path->nodes[0]); 4859 /* 4860 * by releasing the path above we dropped all our locks. A balance 4861 * could have added more items next to the key that used to be 4862 * at the very end of the block. So, check again here and 4863 * advance the path if there are now more items available. 4864 */ 4865 if (nritems > 0 && path->slots[0] < nritems - 1) { 4866 if (ret == 0) 4867 path->slots[0]++; 4868 ret = 0; 4869 goto done; 4870 } 4871 /* 4872 * So the above check misses one case: 4873 * - after releasing the path above, someone has removed the item that 4874 * used to be at the very end of the block, and balance between leafs 4875 * gets another one with bigger key.offset to replace it. 4876 * 4877 * This one should be returned as well, or we can get leaf corruption 4878 * later(esp. in __btrfs_drop_extents()). 4879 * 4880 * And a bit more explanation about this check, 4881 * with ret > 0, the key isn't found, the path points to the slot 4882 * where it should be inserted, so the path->slots[0] item must be the 4883 * bigger one. 4884 */ 4885 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4886 ret = 0; 4887 goto done; 4888 } 4889 4890 while (level < BTRFS_MAX_LEVEL) { 4891 if (!path->nodes[level]) { 4892 ret = 1; 4893 goto done; 4894 } 4895 4896 slot = path->slots[level] + 1; 4897 c = path->nodes[level]; 4898 if (slot >= btrfs_header_nritems(c)) { 4899 level++; 4900 if (level == BTRFS_MAX_LEVEL) { 4901 ret = 1; 4902 goto done; 4903 } 4904 continue; 4905 } 4906 4907 4908 /* 4909 * Our current level is where we're going to start from, and to 4910 * make sure lockdep doesn't complain we need to drop our locks 4911 * and nodes from 0 to our current level. 4912 */ 4913 for (i = 0; i < level; i++) { 4914 if (path->locks[level]) { 4915 btrfs_tree_read_unlock(path->nodes[i]); 4916 path->locks[i] = 0; 4917 } 4918 free_extent_buffer(path->nodes[i]); 4919 path->nodes[i] = NULL; 4920 } 4921 4922 next = c; 4923 ret = read_block_for_search(root, path, &next, level, 4924 slot, &key); 4925 if (ret == -EAGAIN && !path->nowait) 4926 goto again; 4927 4928 if (ret < 0) { 4929 btrfs_release_path(path); 4930 goto done; 4931 } 4932 4933 if (!path->skip_locking) { 4934 ret = btrfs_try_tree_read_lock(next); 4935 if (!ret && path->nowait) { 4936 ret = -EAGAIN; 4937 goto done; 4938 } 4939 if (!ret && time_seq) { 4940 /* 4941 * If we don't get the lock, we may be racing 4942 * with push_leaf_left, holding that lock while 4943 * itself waiting for the leaf we've currently 4944 * locked. To solve this situation, we give up 4945 * on our lock and cycle. 4946 */ 4947 free_extent_buffer(next); 4948 btrfs_release_path(path); 4949 cond_resched(); 4950 goto again; 4951 } 4952 if (!ret) 4953 btrfs_tree_read_lock(next); 4954 } 4955 break; 4956 } 4957 path->slots[level] = slot; 4958 while (1) { 4959 level--; 4960 path->nodes[level] = next; 4961 path->slots[level] = 0; 4962 if (!path->skip_locking) 4963 path->locks[level] = BTRFS_READ_LOCK; 4964 if (!level) 4965 break; 4966 4967 ret = read_block_for_search(root, path, &next, level, 4968 0, &key); 4969 if (ret == -EAGAIN && !path->nowait) 4970 goto again; 4971 4972 if (ret < 0) { 4973 btrfs_release_path(path); 4974 goto done; 4975 } 4976 4977 if (!path->skip_locking) { 4978 if (path->nowait) { 4979 if (!btrfs_try_tree_read_lock(next)) { 4980 ret = -EAGAIN; 4981 goto done; 4982 } 4983 } else { 4984 btrfs_tree_read_lock(next); 4985 } 4986 } 4987 } 4988 ret = 0; 4989 done: 4990 unlock_up(path, 0, 1, 0, NULL); 4991 if (need_commit_sem) { 4992 int ret2; 4993 4994 path->need_commit_sem = 1; 4995 ret2 = finish_need_commit_sem_search(path); 4996 up_read(&fs_info->commit_root_sem); 4997 if (ret2) 4998 ret = ret2; 4999 } 5000 5001 return ret; 5002 } 5003 5004 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 5005 { 5006 path->slots[0]++; 5007 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 5008 return btrfs_next_old_leaf(root, path, time_seq); 5009 return 0; 5010 } 5011 5012 /* 5013 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5014 * searching until it gets past min_objectid or finds an item of 'type' 5015 * 5016 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5017 */ 5018 int btrfs_previous_item(struct btrfs_root *root, 5019 struct btrfs_path *path, u64 min_objectid, 5020 int type) 5021 { 5022 struct btrfs_key found_key; 5023 struct extent_buffer *leaf; 5024 u32 nritems; 5025 int ret; 5026 5027 while (1) { 5028 if (path->slots[0] == 0) { 5029 ret = btrfs_prev_leaf(root, path); 5030 if (ret != 0) 5031 return ret; 5032 } else { 5033 path->slots[0]--; 5034 } 5035 leaf = path->nodes[0]; 5036 nritems = btrfs_header_nritems(leaf); 5037 if (nritems == 0) 5038 return 1; 5039 if (path->slots[0] == nritems) 5040 path->slots[0]--; 5041 5042 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5043 if (found_key.objectid < min_objectid) 5044 break; 5045 if (found_key.type == type) 5046 return 0; 5047 if (found_key.objectid == min_objectid && 5048 found_key.type < type) 5049 break; 5050 } 5051 return 1; 5052 } 5053 5054 /* 5055 * search in extent tree to find a previous Metadata/Data extent item with 5056 * min objecitd. 5057 * 5058 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5059 */ 5060 int btrfs_previous_extent_item(struct btrfs_root *root, 5061 struct btrfs_path *path, u64 min_objectid) 5062 { 5063 struct btrfs_key found_key; 5064 struct extent_buffer *leaf; 5065 u32 nritems; 5066 int ret; 5067 5068 while (1) { 5069 if (path->slots[0] == 0) { 5070 ret = btrfs_prev_leaf(root, path); 5071 if (ret != 0) 5072 return ret; 5073 } else { 5074 path->slots[0]--; 5075 } 5076 leaf = path->nodes[0]; 5077 nritems = btrfs_header_nritems(leaf); 5078 if (nritems == 0) 5079 return 1; 5080 if (path->slots[0] == nritems) 5081 path->slots[0]--; 5082 5083 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5084 if (found_key.objectid < min_objectid) 5085 break; 5086 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5087 found_key.type == BTRFS_METADATA_ITEM_KEY) 5088 return 0; 5089 if (found_key.objectid == min_objectid && 5090 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5091 break; 5092 } 5093 return 1; 5094 } 5095 5096 int __init btrfs_ctree_init(void) 5097 { 5098 btrfs_path_cachep = kmem_cache_create("btrfs_path", 5099 sizeof(struct btrfs_path), 0, 5100 SLAB_MEM_SPREAD, NULL); 5101 if (!btrfs_path_cachep) 5102 return -ENOMEM; 5103 return 0; 5104 } 5105 5106 void __cold btrfs_ctree_exit(void) 5107 { 5108 kmem_cache_destroy(btrfs_path_cachep); 5109 } 5110