1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int errno) 234 { 235 switch (errno) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 320 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 321 trans->transid != fs_info->running_transaction->transid); 322 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 323 trans->transid != root->last_trans); 324 325 level = btrfs_header_level(buf); 326 if (level == 0) 327 btrfs_item_key(buf, &disk_key, 0); 328 else 329 btrfs_node_key(buf, &disk_key, 0); 330 331 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 332 &disk_key, level, buf->start, 0, 333 BTRFS_NESTING_NEW_ROOT); 334 if (IS_ERR(cow)) 335 return PTR_ERR(cow); 336 337 copy_extent_buffer_full(cow, buf); 338 btrfs_set_header_bytenr(cow, cow->start); 339 btrfs_set_header_generation(cow, trans->transid); 340 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 341 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 342 BTRFS_HEADER_FLAG_RELOC); 343 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 344 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 345 else 346 btrfs_set_header_owner(cow, new_root_objectid); 347 348 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 349 350 WARN_ON(btrfs_header_generation(buf) > trans->transid); 351 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 352 ret = btrfs_inc_ref(trans, root, cow, 1); 353 else 354 ret = btrfs_inc_ref(trans, root, cow, 0); 355 if (ret) { 356 btrfs_tree_unlock(cow); 357 free_extent_buffer(cow); 358 btrfs_abort_transaction(trans, ret); 359 return ret; 360 } 361 362 btrfs_mark_buffer_dirty(cow); 363 *cow_ret = cow; 364 return 0; 365 } 366 367 /* 368 * check if the tree block can be shared by multiple trees 369 */ 370 int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, 371 struct btrfs_root *root, 372 struct extent_buffer *buf) 373 { 374 /* 375 * Tree blocks not in shareable trees and tree roots are never shared. 376 * If a block was allocated after the last snapshot and the block was 377 * not allocated by tree relocation, we know the block is not shared. 378 */ 379 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 380 buf != root->node && 381 (btrfs_header_generation(buf) <= 382 btrfs_root_last_snapshot(&root->root_item) || 383 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) { 384 if (buf != root->commit_root) 385 return 1; 386 /* 387 * An extent buffer that used to be the commit root may still be 388 * shared because the tree height may have increased and it 389 * became a child of a higher level root. This can happen when 390 * snapshotting a subvolume created in the current transaction. 391 */ 392 if (btrfs_header_generation(buf) == trans->transid) 393 return 1; 394 } 395 396 return 0; 397 } 398 399 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 400 struct btrfs_root *root, 401 struct extent_buffer *buf, 402 struct extent_buffer *cow, 403 int *last_ref) 404 { 405 struct btrfs_fs_info *fs_info = root->fs_info; 406 u64 refs; 407 u64 owner; 408 u64 flags; 409 u64 new_flags = 0; 410 int ret; 411 412 /* 413 * Backrefs update rules: 414 * 415 * Always use full backrefs for extent pointers in tree block 416 * allocated by tree relocation. 417 * 418 * If a shared tree block is no longer referenced by its owner 419 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 420 * use full backrefs for extent pointers in tree block. 421 * 422 * If a tree block is been relocating 423 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 424 * use full backrefs for extent pointers in tree block. 425 * The reason for this is some operations (such as drop tree) 426 * are only allowed for blocks use full backrefs. 427 */ 428 429 if (btrfs_block_can_be_shared(trans, root, buf)) { 430 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 431 btrfs_header_level(buf), 1, 432 &refs, &flags); 433 if (ret) 434 return ret; 435 if (unlikely(refs == 0)) { 436 btrfs_crit(fs_info, 437 "found 0 references for tree block at bytenr %llu level %d root %llu", 438 buf->start, btrfs_header_level(buf), 439 btrfs_root_id(root)); 440 ret = -EUCLEAN; 441 btrfs_abort_transaction(trans, ret); 442 return ret; 443 } 444 } else { 445 refs = 1; 446 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 447 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 448 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 449 else 450 flags = 0; 451 } 452 453 owner = btrfs_header_owner(buf); 454 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 455 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 456 457 if (refs > 1) { 458 if ((owner == root->root_key.objectid || 459 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 460 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 461 ret = btrfs_inc_ref(trans, root, buf, 1); 462 if (ret) 463 return ret; 464 465 if (root->root_key.objectid == 466 BTRFS_TREE_RELOC_OBJECTID) { 467 ret = btrfs_dec_ref(trans, root, buf, 0); 468 if (ret) 469 return ret; 470 ret = btrfs_inc_ref(trans, root, cow, 1); 471 if (ret) 472 return ret; 473 } 474 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 475 } else { 476 477 if (root->root_key.objectid == 478 BTRFS_TREE_RELOC_OBJECTID) 479 ret = btrfs_inc_ref(trans, root, cow, 1); 480 else 481 ret = btrfs_inc_ref(trans, root, cow, 0); 482 if (ret) 483 return ret; 484 } 485 if (new_flags != 0) { 486 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags); 487 if (ret) 488 return ret; 489 } 490 } else { 491 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 492 if (root->root_key.objectid == 493 BTRFS_TREE_RELOC_OBJECTID) 494 ret = btrfs_inc_ref(trans, root, cow, 1); 495 else 496 ret = btrfs_inc_ref(trans, root, cow, 0); 497 if (ret) 498 return ret; 499 ret = btrfs_dec_ref(trans, root, buf, 1); 500 if (ret) 501 return ret; 502 } 503 btrfs_clear_buffer_dirty(trans, buf); 504 *last_ref = 1; 505 } 506 return 0; 507 } 508 509 /* 510 * does the dirty work in cow of a single block. The parent block (if 511 * supplied) is updated to point to the new cow copy. The new buffer is marked 512 * dirty and returned locked. If you modify the block it needs to be marked 513 * dirty again. 514 * 515 * search_start -- an allocation hint for the new block 516 * 517 * empty_size -- a hint that you plan on doing more cow. This is the size in 518 * bytes the allocator should try to find free next to the block it returns. 519 * This is just a hint and may be ignored by the allocator. 520 */ 521 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 522 struct btrfs_root *root, 523 struct extent_buffer *buf, 524 struct extent_buffer *parent, int parent_slot, 525 struct extent_buffer **cow_ret, 526 u64 search_start, u64 empty_size, 527 enum btrfs_lock_nesting nest) 528 { 529 struct btrfs_fs_info *fs_info = root->fs_info; 530 struct btrfs_disk_key disk_key; 531 struct extent_buffer *cow; 532 int level, ret; 533 int last_ref = 0; 534 int unlock_orig = 0; 535 u64 parent_start = 0; 536 537 if (*cow_ret == buf) 538 unlock_orig = 1; 539 540 btrfs_assert_tree_write_locked(buf); 541 542 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 543 trans->transid != fs_info->running_transaction->transid); 544 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 545 trans->transid != root->last_trans); 546 547 level = btrfs_header_level(buf); 548 549 if (level == 0) 550 btrfs_item_key(buf, &disk_key, 0); 551 else 552 btrfs_node_key(buf, &disk_key, 0); 553 554 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 555 parent_start = parent->start; 556 557 cow = btrfs_alloc_tree_block(trans, root, parent_start, 558 root->root_key.objectid, &disk_key, level, 559 search_start, empty_size, nest); 560 if (IS_ERR(cow)) 561 return PTR_ERR(cow); 562 563 /* cow is set to blocking by btrfs_init_new_buffer */ 564 565 copy_extent_buffer_full(cow, buf); 566 btrfs_set_header_bytenr(cow, cow->start); 567 btrfs_set_header_generation(cow, trans->transid); 568 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 569 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 570 BTRFS_HEADER_FLAG_RELOC); 571 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 572 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 573 else 574 btrfs_set_header_owner(cow, root->root_key.objectid); 575 576 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 577 578 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 579 if (ret) { 580 btrfs_tree_unlock(cow); 581 free_extent_buffer(cow); 582 btrfs_abort_transaction(trans, ret); 583 return ret; 584 } 585 586 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 587 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 588 if (ret) { 589 btrfs_tree_unlock(cow); 590 free_extent_buffer(cow); 591 btrfs_abort_transaction(trans, ret); 592 return ret; 593 } 594 } 595 596 if (buf == root->node) { 597 WARN_ON(parent && parent != buf); 598 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 599 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 600 parent_start = buf->start; 601 602 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 603 if (ret < 0) { 604 btrfs_tree_unlock(cow); 605 free_extent_buffer(cow); 606 btrfs_abort_transaction(trans, ret); 607 return ret; 608 } 609 atomic_inc(&cow->refs); 610 rcu_assign_pointer(root->node, cow); 611 612 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 613 parent_start, last_ref); 614 free_extent_buffer(buf); 615 add_root_to_dirty_list(root); 616 } else { 617 WARN_ON(trans->transid != btrfs_header_generation(parent)); 618 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 619 BTRFS_MOD_LOG_KEY_REPLACE); 620 if (ret) { 621 btrfs_tree_unlock(cow); 622 free_extent_buffer(cow); 623 btrfs_abort_transaction(trans, ret); 624 return ret; 625 } 626 btrfs_set_node_blockptr(parent, parent_slot, 627 cow->start); 628 btrfs_set_node_ptr_generation(parent, parent_slot, 629 trans->transid); 630 btrfs_mark_buffer_dirty(parent); 631 if (last_ref) { 632 ret = btrfs_tree_mod_log_free_eb(buf); 633 if (ret) { 634 btrfs_tree_unlock(cow); 635 free_extent_buffer(cow); 636 btrfs_abort_transaction(trans, ret); 637 return ret; 638 } 639 } 640 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 641 parent_start, last_ref); 642 } 643 if (unlock_orig) 644 btrfs_tree_unlock(buf); 645 free_extent_buffer_stale(buf); 646 btrfs_mark_buffer_dirty(cow); 647 *cow_ret = cow; 648 return 0; 649 } 650 651 static inline int should_cow_block(struct btrfs_trans_handle *trans, 652 struct btrfs_root *root, 653 struct extent_buffer *buf) 654 { 655 if (btrfs_is_testing(root->fs_info)) 656 return 0; 657 658 /* Ensure we can see the FORCE_COW bit */ 659 smp_mb__before_atomic(); 660 661 /* 662 * We do not need to cow a block if 663 * 1) this block is not created or changed in this transaction; 664 * 2) this block does not belong to TREE_RELOC tree; 665 * 3) the root is not forced COW. 666 * 667 * What is forced COW: 668 * when we create snapshot during committing the transaction, 669 * after we've finished copying src root, we must COW the shared 670 * block to ensure the metadata consistency. 671 */ 672 if (btrfs_header_generation(buf) == trans->transid && 673 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 674 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 675 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 676 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 677 return 0; 678 return 1; 679 } 680 681 /* 682 * cows a single block, see __btrfs_cow_block for the real work. 683 * This version of it has extra checks so that a block isn't COWed more than 684 * once per transaction, as long as it hasn't been written yet 685 */ 686 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 687 struct btrfs_root *root, struct extent_buffer *buf, 688 struct extent_buffer *parent, int parent_slot, 689 struct extent_buffer **cow_ret, 690 enum btrfs_lock_nesting nest) 691 { 692 struct btrfs_fs_info *fs_info = root->fs_info; 693 u64 search_start; 694 int ret; 695 696 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { 697 btrfs_abort_transaction(trans, -EUCLEAN); 698 btrfs_crit(fs_info, 699 "attempt to COW block %llu on root %llu that is being deleted", 700 buf->start, btrfs_root_id(root)); 701 return -EUCLEAN; 702 } 703 704 /* 705 * COWing must happen through a running transaction, which always 706 * matches the current fs generation (it's a transaction with a state 707 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs 708 * into error state to prevent the commit of any transaction. 709 */ 710 if (unlikely(trans->transaction != fs_info->running_transaction || 711 trans->transid != fs_info->generation)) { 712 btrfs_abort_transaction(trans, -EUCLEAN); 713 btrfs_crit(fs_info, 714 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu", 715 buf->start, btrfs_root_id(root), trans->transid, 716 fs_info->running_transaction->transid, 717 fs_info->generation); 718 return -EUCLEAN; 719 } 720 721 if (!should_cow_block(trans, root, buf)) { 722 *cow_ret = buf; 723 return 0; 724 } 725 726 search_start = buf->start & ~((u64)SZ_1G - 1); 727 728 /* 729 * Before CoWing this block for later modification, check if it's 730 * the subtree root and do the delayed subtree trace if needed. 731 * 732 * Also We don't care about the error, as it's handled internally. 733 */ 734 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 735 ret = __btrfs_cow_block(trans, root, buf, parent, 736 parent_slot, cow_ret, search_start, 0, nest); 737 738 trace_btrfs_cow_block(root, buf, *cow_ret); 739 740 return ret; 741 } 742 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 743 744 /* 745 * helper function for defrag to decide if two blocks pointed to by a 746 * node are actually close by 747 */ 748 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 749 { 750 if (blocknr < other && other - (blocknr + blocksize) < 32768) 751 return 1; 752 if (blocknr > other && blocknr - (other + blocksize) < 32768) 753 return 1; 754 return 0; 755 } 756 757 #ifdef __LITTLE_ENDIAN 758 759 /* 760 * Compare two keys, on little-endian the disk order is same as CPU order and 761 * we can avoid the conversion. 762 */ 763 static int comp_keys(const struct btrfs_disk_key *disk_key, 764 const struct btrfs_key *k2) 765 { 766 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 767 768 return btrfs_comp_cpu_keys(k1, k2); 769 } 770 771 #else 772 773 /* 774 * compare two keys in a memcmp fashion 775 */ 776 static int comp_keys(const struct btrfs_disk_key *disk, 777 const struct btrfs_key *k2) 778 { 779 struct btrfs_key k1; 780 781 btrfs_disk_key_to_cpu(&k1, disk); 782 783 return btrfs_comp_cpu_keys(&k1, k2); 784 } 785 #endif 786 787 /* 788 * same as comp_keys only with two btrfs_key's 789 */ 790 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 791 { 792 if (k1->objectid > k2->objectid) 793 return 1; 794 if (k1->objectid < k2->objectid) 795 return -1; 796 if (k1->type > k2->type) 797 return 1; 798 if (k1->type < k2->type) 799 return -1; 800 if (k1->offset > k2->offset) 801 return 1; 802 if (k1->offset < k2->offset) 803 return -1; 804 return 0; 805 } 806 807 /* 808 * this is used by the defrag code to go through all the 809 * leaves pointed to by a node and reallocate them so that 810 * disk order is close to key order 811 */ 812 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 813 struct btrfs_root *root, struct extent_buffer *parent, 814 int start_slot, u64 *last_ret, 815 struct btrfs_key *progress) 816 { 817 struct btrfs_fs_info *fs_info = root->fs_info; 818 struct extent_buffer *cur; 819 u64 blocknr; 820 u64 search_start = *last_ret; 821 u64 last_block = 0; 822 u64 other; 823 u32 parent_nritems; 824 int end_slot; 825 int i; 826 int err = 0; 827 u32 blocksize; 828 int progress_passed = 0; 829 struct btrfs_disk_key disk_key; 830 831 /* 832 * COWing must happen through a running transaction, which always 833 * matches the current fs generation (it's a transaction with a state 834 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs 835 * into error state to prevent the commit of any transaction. 836 */ 837 if (unlikely(trans->transaction != fs_info->running_transaction || 838 trans->transid != fs_info->generation)) { 839 btrfs_abort_transaction(trans, -EUCLEAN); 840 btrfs_crit(fs_info, 841 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu", 842 parent->start, btrfs_root_id(root), trans->transid, 843 fs_info->running_transaction->transid, 844 fs_info->generation); 845 return -EUCLEAN; 846 } 847 848 parent_nritems = btrfs_header_nritems(parent); 849 blocksize = fs_info->nodesize; 850 end_slot = parent_nritems - 1; 851 852 if (parent_nritems <= 1) 853 return 0; 854 855 for (i = start_slot; i <= end_slot; i++) { 856 int close = 1; 857 858 btrfs_node_key(parent, &disk_key, i); 859 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 860 continue; 861 862 progress_passed = 1; 863 blocknr = btrfs_node_blockptr(parent, i); 864 if (last_block == 0) 865 last_block = blocknr; 866 867 if (i > 0) { 868 other = btrfs_node_blockptr(parent, i - 1); 869 close = close_blocks(blocknr, other, blocksize); 870 } 871 if (!close && i < end_slot) { 872 other = btrfs_node_blockptr(parent, i + 1); 873 close = close_blocks(blocknr, other, blocksize); 874 } 875 if (close) { 876 last_block = blocknr; 877 continue; 878 } 879 880 cur = btrfs_read_node_slot(parent, i); 881 if (IS_ERR(cur)) 882 return PTR_ERR(cur); 883 if (search_start == 0) 884 search_start = last_block; 885 886 btrfs_tree_lock(cur); 887 err = __btrfs_cow_block(trans, root, cur, parent, i, 888 &cur, search_start, 889 min(16 * blocksize, 890 (end_slot - i) * blocksize), 891 BTRFS_NESTING_COW); 892 if (err) { 893 btrfs_tree_unlock(cur); 894 free_extent_buffer(cur); 895 break; 896 } 897 search_start = cur->start; 898 last_block = cur->start; 899 *last_ret = search_start; 900 btrfs_tree_unlock(cur); 901 free_extent_buffer(cur); 902 } 903 return err; 904 } 905 906 /* 907 * Search for a key in the given extent_buffer. 908 * 909 * The lower boundary for the search is specified by the slot number @first_slot. 910 * Use a value of 0 to search over the whole extent buffer. Works for both 911 * leaves and nodes. 912 * 913 * The slot in the extent buffer is returned via @slot. If the key exists in the 914 * extent buffer, then @slot will point to the slot where the key is, otherwise 915 * it points to the slot where you would insert the key. 916 * 917 * Slot may point to the total number of items (i.e. one position beyond the last 918 * key) if the key is bigger than the last key in the extent buffer. 919 */ 920 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 921 const struct btrfs_key *key, int *slot) 922 { 923 unsigned long p; 924 int item_size; 925 /* 926 * Use unsigned types for the low and high slots, so that we get a more 927 * efficient division in the search loop below. 928 */ 929 u32 low = first_slot; 930 u32 high = btrfs_header_nritems(eb); 931 int ret; 932 const int key_size = sizeof(struct btrfs_disk_key); 933 934 if (unlikely(low > high)) { 935 btrfs_err(eb->fs_info, 936 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 937 __func__, low, high, eb->start, 938 btrfs_header_owner(eb), btrfs_header_level(eb)); 939 return -EINVAL; 940 } 941 942 if (btrfs_header_level(eb) == 0) { 943 p = offsetof(struct btrfs_leaf, items); 944 item_size = sizeof(struct btrfs_item); 945 } else { 946 p = offsetof(struct btrfs_node, ptrs); 947 item_size = sizeof(struct btrfs_key_ptr); 948 } 949 950 while (low < high) { 951 unsigned long oip; 952 unsigned long offset; 953 struct btrfs_disk_key *tmp; 954 struct btrfs_disk_key unaligned; 955 int mid; 956 957 mid = (low + high) / 2; 958 offset = p + mid * item_size; 959 oip = offset_in_page(offset); 960 961 if (oip + key_size <= PAGE_SIZE) { 962 const unsigned long idx = get_eb_page_index(offset); 963 char *kaddr = page_address(eb->pages[idx]); 964 965 oip = get_eb_offset_in_page(eb, offset); 966 tmp = (struct btrfs_disk_key *)(kaddr + oip); 967 } else { 968 read_extent_buffer(eb, &unaligned, offset, key_size); 969 tmp = &unaligned; 970 } 971 972 ret = comp_keys(tmp, key); 973 974 if (ret < 0) 975 low = mid + 1; 976 else if (ret > 0) 977 high = mid; 978 else { 979 *slot = mid; 980 return 0; 981 } 982 } 983 *slot = low; 984 return 1; 985 } 986 987 static void root_add_used(struct btrfs_root *root, u32 size) 988 { 989 spin_lock(&root->accounting_lock); 990 btrfs_set_root_used(&root->root_item, 991 btrfs_root_used(&root->root_item) + size); 992 spin_unlock(&root->accounting_lock); 993 } 994 995 static void root_sub_used(struct btrfs_root *root, u32 size) 996 { 997 spin_lock(&root->accounting_lock); 998 btrfs_set_root_used(&root->root_item, 999 btrfs_root_used(&root->root_item) - size); 1000 spin_unlock(&root->accounting_lock); 1001 } 1002 1003 /* given a node and slot number, this reads the blocks it points to. The 1004 * extent buffer is returned with a reference taken (but unlocked). 1005 */ 1006 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 1007 int slot) 1008 { 1009 int level = btrfs_header_level(parent); 1010 struct btrfs_tree_parent_check check = { 0 }; 1011 struct extent_buffer *eb; 1012 1013 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 1014 return ERR_PTR(-ENOENT); 1015 1016 ASSERT(level); 1017 1018 check.level = level - 1; 1019 check.transid = btrfs_node_ptr_generation(parent, slot); 1020 check.owner_root = btrfs_header_owner(parent); 1021 check.has_first_key = true; 1022 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 1023 1024 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 1025 &check); 1026 if (IS_ERR(eb)) 1027 return eb; 1028 if (!extent_buffer_uptodate(eb)) { 1029 free_extent_buffer(eb); 1030 return ERR_PTR(-EIO); 1031 } 1032 1033 return eb; 1034 } 1035 1036 /* 1037 * node level balancing, used to make sure nodes are in proper order for 1038 * item deletion. We balance from the top down, so we have to make sure 1039 * that a deletion won't leave an node completely empty later on. 1040 */ 1041 static noinline int balance_level(struct btrfs_trans_handle *trans, 1042 struct btrfs_root *root, 1043 struct btrfs_path *path, int level) 1044 { 1045 struct btrfs_fs_info *fs_info = root->fs_info; 1046 struct extent_buffer *right = NULL; 1047 struct extent_buffer *mid; 1048 struct extent_buffer *left = NULL; 1049 struct extent_buffer *parent = NULL; 1050 int ret = 0; 1051 int wret; 1052 int pslot; 1053 int orig_slot = path->slots[level]; 1054 u64 orig_ptr; 1055 1056 ASSERT(level > 0); 1057 1058 mid = path->nodes[level]; 1059 1060 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1061 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1062 1063 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1064 1065 if (level < BTRFS_MAX_LEVEL - 1) { 1066 parent = path->nodes[level + 1]; 1067 pslot = path->slots[level + 1]; 1068 } 1069 1070 /* 1071 * deal with the case where there is only one pointer in the root 1072 * by promoting the node below to a root 1073 */ 1074 if (!parent) { 1075 struct extent_buffer *child; 1076 1077 if (btrfs_header_nritems(mid) != 1) 1078 return 0; 1079 1080 /* promote the child to a root */ 1081 child = btrfs_read_node_slot(mid, 0); 1082 if (IS_ERR(child)) { 1083 ret = PTR_ERR(child); 1084 goto out; 1085 } 1086 1087 btrfs_tree_lock(child); 1088 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1089 BTRFS_NESTING_COW); 1090 if (ret) { 1091 btrfs_tree_unlock(child); 1092 free_extent_buffer(child); 1093 goto out; 1094 } 1095 1096 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 1097 if (ret < 0) { 1098 btrfs_tree_unlock(child); 1099 free_extent_buffer(child); 1100 btrfs_abort_transaction(trans, ret); 1101 goto out; 1102 } 1103 rcu_assign_pointer(root->node, child); 1104 1105 add_root_to_dirty_list(root); 1106 btrfs_tree_unlock(child); 1107 1108 path->locks[level] = 0; 1109 path->nodes[level] = NULL; 1110 btrfs_clear_buffer_dirty(trans, mid); 1111 btrfs_tree_unlock(mid); 1112 /* once for the path */ 1113 free_extent_buffer(mid); 1114 1115 root_sub_used(root, mid->len); 1116 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1117 /* once for the root ptr */ 1118 free_extent_buffer_stale(mid); 1119 return 0; 1120 } 1121 if (btrfs_header_nritems(mid) > 1122 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1123 return 0; 1124 1125 if (pslot) { 1126 left = btrfs_read_node_slot(parent, pslot - 1); 1127 if (IS_ERR(left)) { 1128 ret = PTR_ERR(left); 1129 left = NULL; 1130 goto out; 1131 } 1132 1133 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1134 wret = btrfs_cow_block(trans, root, left, 1135 parent, pslot - 1, &left, 1136 BTRFS_NESTING_LEFT_COW); 1137 if (wret) { 1138 ret = wret; 1139 goto out; 1140 } 1141 } 1142 1143 if (pslot + 1 < btrfs_header_nritems(parent)) { 1144 right = btrfs_read_node_slot(parent, pslot + 1); 1145 if (IS_ERR(right)) { 1146 ret = PTR_ERR(right); 1147 right = NULL; 1148 goto out; 1149 } 1150 1151 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1152 wret = btrfs_cow_block(trans, root, right, 1153 parent, pslot + 1, &right, 1154 BTRFS_NESTING_RIGHT_COW); 1155 if (wret) { 1156 ret = wret; 1157 goto out; 1158 } 1159 } 1160 1161 /* first, try to make some room in the middle buffer */ 1162 if (left) { 1163 orig_slot += btrfs_header_nritems(left); 1164 wret = push_node_left(trans, left, mid, 1); 1165 if (wret < 0) 1166 ret = wret; 1167 } 1168 1169 /* 1170 * then try to empty the right most buffer into the middle 1171 */ 1172 if (right) { 1173 wret = push_node_left(trans, mid, right, 1); 1174 if (wret < 0 && wret != -ENOSPC) 1175 ret = wret; 1176 if (btrfs_header_nritems(right) == 0) { 1177 btrfs_clear_buffer_dirty(trans, right); 1178 btrfs_tree_unlock(right); 1179 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1); 1180 if (ret < 0) { 1181 free_extent_buffer_stale(right); 1182 right = NULL; 1183 goto out; 1184 } 1185 root_sub_used(root, right->len); 1186 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1187 0, 1); 1188 free_extent_buffer_stale(right); 1189 right = NULL; 1190 } else { 1191 struct btrfs_disk_key right_key; 1192 btrfs_node_key(right, &right_key, 0); 1193 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1194 BTRFS_MOD_LOG_KEY_REPLACE); 1195 if (ret < 0) { 1196 btrfs_abort_transaction(trans, ret); 1197 goto out; 1198 } 1199 btrfs_set_node_key(parent, &right_key, pslot + 1); 1200 btrfs_mark_buffer_dirty(parent); 1201 } 1202 } 1203 if (btrfs_header_nritems(mid) == 1) { 1204 /* 1205 * we're not allowed to leave a node with one item in the 1206 * tree during a delete. A deletion from lower in the tree 1207 * could try to delete the only pointer in this node. 1208 * So, pull some keys from the left. 1209 * There has to be a left pointer at this point because 1210 * otherwise we would have pulled some pointers from the 1211 * right 1212 */ 1213 if (unlikely(!left)) { 1214 btrfs_crit(fs_info, 1215 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu", 1216 parent->start, btrfs_header_level(parent), 1217 mid->start, btrfs_root_id(root)); 1218 ret = -EUCLEAN; 1219 btrfs_abort_transaction(trans, ret); 1220 goto out; 1221 } 1222 wret = balance_node_right(trans, mid, left); 1223 if (wret < 0) { 1224 ret = wret; 1225 goto out; 1226 } 1227 if (wret == 1) { 1228 wret = push_node_left(trans, left, mid, 1); 1229 if (wret < 0) 1230 ret = wret; 1231 } 1232 BUG_ON(wret == 1); 1233 } 1234 if (btrfs_header_nritems(mid) == 0) { 1235 btrfs_clear_buffer_dirty(trans, mid); 1236 btrfs_tree_unlock(mid); 1237 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot); 1238 if (ret < 0) { 1239 free_extent_buffer_stale(mid); 1240 mid = NULL; 1241 goto out; 1242 } 1243 root_sub_used(root, mid->len); 1244 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1245 free_extent_buffer_stale(mid); 1246 mid = NULL; 1247 } else { 1248 /* update the parent key to reflect our changes */ 1249 struct btrfs_disk_key mid_key; 1250 btrfs_node_key(mid, &mid_key, 0); 1251 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1252 BTRFS_MOD_LOG_KEY_REPLACE); 1253 if (ret < 0) { 1254 btrfs_abort_transaction(trans, ret); 1255 goto out; 1256 } 1257 btrfs_set_node_key(parent, &mid_key, pslot); 1258 btrfs_mark_buffer_dirty(parent); 1259 } 1260 1261 /* update the path */ 1262 if (left) { 1263 if (btrfs_header_nritems(left) > orig_slot) { 1264 atomic_inc(&left->refs); 1265 /* left was locked after cow */ 1266 path->nodes[level] = left; 1267 path->slots[level + 1] -= 1; 1268 path->slots[level] = orig_slot; 1269 if (mid) { 1270 btrfs_tree_unlock(mid); 1271 free_extent_buffer(mid); 1272 } 1273 } else { 1274 orig_slot -= btrfs_header_nritems(left); 1275 path->slots[level] = orig_slot; 1276 } 1277 } 1278 /* double check we haven't messed things up */ 1279 if (orig_ptr != 1280 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1281 BUG(); 1282 out: 1283 if (right) { 1284 btrfs_tree_unlock(right); 1285 free_extent_buffer(right); 1286 } 1287 if (left) { 1288 if (path->nodes[level] != left) 1289 btrfs_tree_unlock(left); 1290 free_extent_buffer(left); 1291 } 1292 return ret; 1293 } 1294 1295 /* Node balancing for insertion. Here we only split or push nodes around 1296 * when they are completely full. This is also done top down, so we 1297 * have to be pessimistic. 1298 */ 1299 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1300 struct btrfs_root *root, 1301 struct btrfs_path *path, int level) 1302 { 1303 struct btrfs_fs_info *fs_info = root->fs_info; 1304 struct extent_buffer *right = NULL; 1305 struct extent_buffer *mid; 1306 struct extent_buffer *left = NULL; 1307 struct extent_buffer *parent = NULL; 1308 int ret = 0; 1309 int wret; 1310 int pslot; 1311 int orig_slot = path->slots[level]; 1312 1313 if (level == 0) 1314 return 1; 1315 1316 mid = path->nodes[level]; 1317 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1318 1319 if (level < BTRFS_MAX_LEVEL - 1) { 1320 parent = path->nodes[level + 1]; 1321 pslot = path->slots[level + 1]; 1322 } 1323 1324 if (!parent) 1325 return 1; 1326 1327 /* first, try to make some room in the middle buffer */ 1328 if (pslot) { 1329 u32 left_nr; 1330 1331 left = btrfs_read_node_slot(parent, pslot - 1); 1332 if (IS_ERR(left)) 1333 return PTR_ERR(left); 1334 1335 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1336 1337 left_nr = btrfs_header_nritems(left); 1338 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1339 wret = 1; 1340 } else { 1341 ret = btrfs_cow_block(trans, root, left, parent, 1342 pslot - 1, &left, 1343 BTRFS_NESTING_LEFT_COW); 1344 if (ret) 1345 wret = 1; 1346 else { 1347 wret = push_node_left(trans, left, mid, 0); 1348 } 1349 } 1350 if (wret < 0) 1351 ret = wret; 1352 if (wret == 0) { 1353 struct btrfs_disk_key disk_key; 1354 orig_slot += left_nr; 1355 btrfs_node_key(mid, &disk_key, 0); 1356 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1357 BTRFS_MOD_LOG_KEY_REPLACE); 1358 if (ret < 0) { 1359 btrfs_tree_unlock(left); 1360 free_extent_buffer(left); 1361 btrfs_abort_transaction(trans, ret); 1362 return ret; 1363 } 1364 btrfs_set_node_key(parent, &disk_key, pslot); 1365 btrfs_mark_buffer_dirty(parent); 1366 if (btrfs_header_nritems(left) > orig_slot) { 1367 path->nodes[level] = left; 1368 path->slots[level + 1] -= 1; 1369 path->slots[level] = orig_slot; 1370 btrfs_tree_unlock(mid); 1371 free_extent_buffer(mid); 1372 } else { 1373 orig_slot -= 1374 btrfs_header_nritems(left); 1375 path->slots[level] = orig_slot; 1376 btrfs_tree_unlock(left); 1377 free_extent_buffer(left); 1378 } 1379 return 0; 1380 } 1381 btrfs_tree_unlock(left); 1382 free_extent_buffer(left); 1383 } 1384 1385 /* 1386 * then try to empty the right most buffer into the middle 1387 */ 1388 if (pslot + 1 < btrfs_header_nritems(parent)) { 1389 u32 right_nr; 1390 1391 right = btrfs_read_node_slot(parent, pslot + 1); 1392 if (IS_ERR(right)) 1393 return PTR_ERR(right); 1394 1395 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1396 1397 right_nr = btrfs_header_nritems(right); 1398 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1399 wret = 1; 1400 } else { 1401 ret = btrfs_cow_block(trans, root, right, 1402 parent, pslot + 1, 1403 &right, BTRFS_NESTING_RIGHT_COW); 1404 if (ret) 1405 wret = 1; 1406 else { 1407 wret = balance_node_right(trans, right, mid); 1408 } 1409 } 1410 if (wret < 0) 1411 ret = wret; 1412 if (wret == 0) { 1413 struct btrfs_disk_key disk_key; 1414 1415 btrfs_node_key(right, &disk_key, 0); 1416 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1417 BTRFS_MOD_LOG_KEY_REPLACE); 1418 if (ret < 0) { 1419 btrfs_tree_unlock(right); 1420 free_extent_buffer(right); 1421 btrfs_abort_transaction(trans, ret); 1422 return ret; 1423 } 1424 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1425 btrfs_mark_buffer_dirty(parent); 1426 1427 if (btrfs_header_nritems(mid) <= orig_slot) { 1428 path->nodes[level] = right; 1429 path->slots[level + 1] += 1; 1430 path->slots[level] = orig_slot - 1431 btrfs_header_nritems(mid); 1432 btrfs_tree_unlock(mid); 1433 free_extent_buffer(mid); 1434 } else { 1435 btrfs_tree_unlock(right); 1436 free_extent_buffer(right); 1437 } 1438 return 0; 1439 } 1440 btrfs_tree_unlock(right); 1441 free_extent_buffer(right); 1442 } 1443 return 1; 1444 } 1445 1446 /* 1447 * readahead one full node of leaves, finding things that are close 1448 * to the block in 'slot', and triggering ra on them. 1449 */ 1450 static void reada_for_search(struct btrfs_fs_info *fs_info, 1451 struct btrfs_path *path, 1452 int level, int slot, u64 objectid) 1453 { 1454 struct extent_buffer *node; 1455 struct btrfs_disk_key disk_key; 1456 u32 nritems; 1457 u64 search; 1458 u64 target; 1459 u64 nread = 0; 1460 u64 nread_max; 1461 u32 nr; 1462 u32 blocksize; 1463 u32 nscan = 0; 1464 1465 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1466 return; 1467 1468 if (!path->nodes[level]) 1469 return; 1470 1471 node = path->nodes[level]; 1472 1473 /* 1474 * Since the time between visiting leaves is much shorter than the time 1475 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1476 * much IO at once (possibly random). 1477 */ 1478 if (path->reada == READA_FORWARD_ALWAYS) { 1479 if (level > 1) 1480 nread_max = node->fs_info->nodesize; 1481 else 1482 nread_max = SZ_128K; 1483 } else { 1484 nread_max = SZ_64K; 1485 } 1486 1487 search = btrfs_node_blockptr(node, slot); 1488 blocksize = fs_info->nodesize; 1489 if (path->reada != READA_FORWARD_ALWAYS) { 1490 struct extent_buffer *eb; 1491 1492 eb = find_extent_buffer(fs_info, search); 1493 if (eb) { 1494 free_extent_buffer(eb); 1495 return; 1496 } 1497 } 1498 1499 target = search; 1500 1501 nritems = btrfs_header_nritems(node); 1502 nr = slot; 1503 1504 while (1) { 1505 if (path->reada == READA_BACK) { 1506 if (nr == 0) 1507 break; 1508 nr--; 1509 } else if (path->reada == READA_FORWARD || 1510 path->reada == READA_FORWARD_ALWAYS) { 1511 nr++; 1512 if (nr >= nritems) 1513 break; 1514 } 1515 if (path->reada == READA_BACK && objectid) { 1516 btrfs_node_key(node, &disk_key, nr); 1517 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1518 break; 1519 } 1520 search = btrfs_node_blockptr(node, nr); 1521 if (path->reada == READA_FORWARD_ALWAYS || 1522 (search <= target && target - search <= 65536) || 1523 (search > target && search - target <= 65536)) { 1524 btrfs_readahead_node_child(node, nr); 1525 nread += blocksize; 1526 } 1527 nscan++; 1528 if (nread > nread_max || nscan > 32) 1529 break; 1530 } 1531 } 1532 1533 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1534 { 1535 struct extent_buffer *parent; 1536 int slot; 1537 int nritems; 1538 1539 parent = path->nodes[level + 1]; 1540 if (!parent) 1541 return; 1542 1543 nritems = btrfs_header_nritems(parent); 1544 slot = path->slots[level + 1]; 1545 1546 if (slot > 0) 1547 btrfs_readahead_node_child(parent, slot - 1); 1548 if (slot + 1 < nritems) 1549 btrfs_readahead_node_child(parent, slot + 1); 1550 } 1551 1552 1553 /* 1554 * when we walk down the tree, it is usually safe to unlock the higher layers 1555 * in the tree. The exceptions are when our path goes through slot 0, because 1556 * operations on the tree might require changing key pointers higher up in the 1557 * tree. 1558 * 1559 * callers might also have set path->keep_locks, which tells this code to keep 1560 * the lock if the path points to the last slot in the block. This is part of 1561 * walking through the tree, and selecting the next slot in the higher block. 1562 * 1563 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1564 * if lowest_unlock is 1, level 0 won't be unlocked 1565 */ 1566 static noinline void unlock_up(struct btrfs_path *path, int level, 1567 int lowest_unlock, int min_write_lock_level, 1568 int *write_lock_level) 1569 { 1570 int i; 1571 int skip_level = level; 1572 bool check_skip = true; 1573 1574 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1575 if (!path->nodes[i]) 1576 break; 1577 if (!path->locks[i]) 1578 break; 1579 1580 if (check_skip) { 1581 if (path->slots[i] == 0) { 1582 skip_level = i + 1; 1583 continue; 1584 } 1585 1586 if (path->keep_locks) { 1587 u32 nritems; 1588 1589 nritems = btrfs_header_nritems(path->nodes[i]); 1590 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1591 skip_level = i + 1; 1592 continue; 1593 } 1594 } 1595 } 1596 1597 if (i >= lowest_unlock && i > skip_level) { 1598 check_skip = false; 1599 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1600 path->locks[i] = 0; 1601 if (write_lock_level && 1602 i > min_write_lock_level && 1603 i <= *write_lock_level) { 1604 *write_lock_level = i - 1; 1605 } 1606 } 1607 } 1608 } 1609 1610 /* 1611 * Helper function for btrfs_search_slot() and other functions that do a search 1612 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1613 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1614 * its pages from disk. 1615 * 1616 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1617 * whole btree search, starting again from the current root node. 1618 */ 1619 static int 1620 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1621 struct extent_buffer **eb_ret, int level, int slot, 1622 const struct btrfs_key *key) 1623 { 1624 struct btrfs_fs_info *fs_info = root->fs_info; 1625 struct btrfs_tree_parent_check check = { 0 }; 1626 u64 blocknr; 1627 u64 gen; 1628 struct extent_buffer *tmp; 1629 int ret; 1630 int parent_level; 1631 bool unlock_up; 1632 1633 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1634 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1635 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1636 parent_level = btrfs_header_level(*eb_ret); 1637 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1638 check.has_first_key = true; 1639 check.level = parent_level - 1; 1640 check.transid = gen; 1641 check.owner_root = root->root_key.objectid; 1642 1643 /* 1644 * If we need to read an extent buffer from disk and we are holding locks 1645 * on upper level nodes, we unlock all the upper nodes before reading the 1646 * extent buffer, and then return -EAGAIN to the caller as it needs to 1647 * restart the search. We don't release the lock on the current level 1648 * because we need to walk this node to figure out which blocks to read. 1649 */ 1650 tmp = find_extent_buffer(fs_info, blocknr); 1651 if (tmp) { 1652 if (p->reada == READA_FORWARD_ALWAYS) 1653 reada_for_search(fs_info, p, level, slot, key->objectid); 1654 1655 /* first we do an atomic uptodate check */ 1656 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1657 /* 1658 * Do extra check for first_key, eb can be stale due to 1659 * being cached, read from scrub, or have multiple 1660 * parents (shared tree blocks). 1661 */ 1662 if (btrfs_verify_level_key(tmp, 1663 parent_level - 1, &check.first_key, gen)) { 1664 free_extent_buffer(tmp); 1665 return -EUCLEAN; 1666 } 1667 *eb_ret = tmp; 1668 return 0; 1669 } 1670 1671 if (p->nowait) { 1672 free_extent_buffer(tmp); 1673 return -EAGAIN; 1674 } 1675 1676 if (unlock_up) 1677 btrfs_unlock_up_safe(p, level + 1); 1678 1679 /* now we're allowed to do a blocking uptodate check */ 1680 ret = btrfs_read_extent_buffer(tmp, &check); 1681 if (ret) { 1682 free_extent_buffer(tmp); 1683 btrfs_release_path(p); 1684 return -EIO; 1685 } 1686 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1687 free_extent_buffer(tmp); 1688 btrfs_release_path(p); 1689 return -EUCLEAN; 1690 } 1691 1692 if (unlock_up) 1693 ret = -EAGAIN; 1694 1695 goto out; 1696 } else if (p->nowait) { 1697 return -EAGAIN; 1698 } 1699 1700 if (unlock_up) { 1701 btrfs_unlock_up_safe(p, level + 1); 1702 ret = -EAGAIN; 1703 } else { 1704 ret = 0; 1705 } 1706 1707 if (p->reada != READA_NONE) 1708 reada_for_search(fs_info, p, level, slot, key->objectid); 1709 1710 tmp = read_tree_block(fs_info, blocknr, &check); 1711 if (IS_ERR(tmp)) { 1712 btrfs_release_path(p); 1713 return PTR_ERR(tmp); 1714 } 1715 /* 1716 * If the read above didn't mark this buffer up to date, 1717 * it will never end up being up to date. Set ret to EIO now 1718 * and give up so that our caller doesn't loop forever 1719 * on our EAGAINs. 1720 */ 1721 if (!extent_buffer_uptodate(tmp)) 1722 ret = -EIO; 1723 1724 out: 1725 if (ret == 0) { 1726 *eb_ret = tmp; 1727 } else { 1728 free_extent_buffer(tmp); 1729 btrfs_release_path(p); 1730 } 1731 1732 return ret; 1733 } 1734 1735 /* 1736 * helper function for btrfs_search_slot. This does all of the checks 1737 * for node-level blocks and does any balancing required based on 1738 * the ins_len. 1739 * 1740 * If no extra work was required, zero is returned. If we had to 1741 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1742 * start over 1743 */ 1744 static int 1745 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1746 struct btrfs_root *root, struct btrfs_path *p, 1747 struct extent_buffer *b, int level, int ins_len, 1748 int *write_lock_level) 1749 { 1750 struct btrfs_fs_info *fs_info = root->fs_info; 1751 int ret = 0; 1752 1753 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1754 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1755 1756 if (*write_lock_level < level + 1) { 1757 *write_lock_level = level + 1; 1758 btrfs_release_path(p); 1759 return -EAGAIN; 1760 } 1761 1762 reada_for_balance(p, level); 1763 ret = split_node(trans, root, p, level); 1764 1765 b = p->nodes[level]; 1766 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1767 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1768 1769 if (*write_lock_level < level + 1) { 1770 *write_lock_level = level + 1; 1771 btrfs_release_path(p); 1772 return -EAGAIN; 1773 } 1774 1775 reada_for_balance(p, level); 1776 ret = balance_level(trans, root, p, level); 1777 if (ret) 1778 return ret; 1779 1780 b = p->nodes[level]; 1781 if (!b) { 1782 btrfs_release_path(p); 1783 return -EAGAIN; 1784 } 1785 BUG_ON(btrfs_header_nritems(b) == 1); 1786 } 1787 return ret; 1788 } 1789 1790 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1791 u64 iobjectid, u64 ioff, u8 key_type, 1792 struct btrfs_key *found_key) 1793 { 1794 int ret; 1795 struct btrfs_key key; 1796 struct extent_buffer *eb; 1797 1798 ASSERT(path); 1799 ASSERT(found_key); 1800 1801 key.type = key_type; 1802 key.objectid = iobjectid; 1803 key.offset = ioff; 1804 1805 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1806 if (ret < 0) 1807 return ret; 1808 1809 eb = path->nodes[0]; 1810 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1811 ret = btrfs_next_leaf(fs_root, path); 1812 if (ret) 1813 return ret; 1814 eb = path->nodes[0]; 1815 } 1816 1817 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1818 if (found_key->type != key.type || 1819 found_key->objectid != key.objectid) 1820 return 1; 1821 1822 return 0; 1823 } 1824 1825 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1826 struct btrfs_path *p, 1827 int write_lock_level) 1828 { 1829 struct extent_buffer *b; 1830 int root_lock = 0; 1831 int level = 0; 1832 1833 if (p->search_commit_root) { 1834 b = root->commit_root; 1835 atomic_inc(&b->refs); 1836 level = btrfs_header_level(b); 1837 /* 1838 * Ensure that all callers have set skip_locking when 1839 * p->search_commit_root = 1. 1840 */ 1841 ASSERT(p->skip_locking == 1); 1842 1843 goto out; 1844 } 1845 1846 if (p->skip_locking) { 1847 b = btrfs_root_node(root); 1848 level = btrfs_header_level(b); 1849 goto out; 1850 } 1851 1852 /* We try very hard to do read locks on the root */ 1853 root_lock = BTRFS_READ_LOCK; 1854 1855 /* 1856 * If the level is set to maximum, we can skip trying to get the read 1857 * lock. 1858 */ 1859 if (write_lock_level < BTRFS_MAX_LEVEL) { 1860 /* 1861 * We don't know the level of the root node until we actually 1862 * have it read locked 1863 */ 1864 if (p->nowait) { 1865 b = btrfs_try_read_lock_root_node(root); 1866 if (IS_ERR(b)) 1867 return b; 1868 } else { 1869 b = btrfs_read_lock_root_node(root); 1870 } 1871 level = btrfs_header_level(b); 1872 if (level > write_lock_level) 1873 goto out; 1874 1875 /* Whoops, must trade for write lock */ 1876 btrfs_tree_read_unlock(b); 1877 free_extent_buffer(b); 1878 } 1879 1880 b = btrfs_lock_root_node(root); 1881 root_lock = BTRFS_WRITE_LOCK; 1882 1883 /* The level might have changed, check again */ 1884 level = btrfs_header_level(b); 1885 1886 out: 1887 /* 1888 * The root may have failed to write out at some point, and thus is no 1889 * longer valid, return an error in this case. 1890 */ 1891 if (!extent_buffer_uptodate(b)) { 1892 if (root_lock) 1893 btrfs_tree_unlock_rw(b, root_lock); 1894 free_extent_buffer(b); 1895 return ERR_PTR(-EIO); 1896 } 1897 1898 p->nodes[level] = b; 1899 if (!p->skip_locking) 1900 p->locks[level] = root_lock; 1901 /* 1902 * Callers are responsible for dropping b's references. 1903 */ 1904 return b; 1905 } 1906 1907 /* 1908 * Replace the extent buffer at the lowest level of the path with a cloned 1909 * version. The purpose is to be able to use it safely, after releasing the 1910 * commit root semaphore, even if relocation is happening in parallel, the 1911 * transaction used for relocation is committed and the extent buffer is 1912 * reallocated in the next transaction. 1913 * 1914 * This is used in a context where the caller does not prevent transaction 1915 * commits from happening, either by holding a transaction handle or holding 1916 * some lock, while it's doing searches through a commit root. 1917 * At the moment it's only used for send operations. 1918 */ 1919 static int finish_need_commit_sem_search(struct btrfs_path *path) 1920 { 1921 const int i = path->lowest_level; 1922 const int slot = path->slots[i]; 1923 struct extent_buffer *lowest = path->nodes[i]; 1924 struct extent_buffer *clone; 1925 1926 ASSERT(path->need_commit_sem); 1927 1928 if (!lowest) 1929 return 0; 1930 1931 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1932 1933 clone = btrfs_clone_extent_buffer(lowest); 1934 if (!clone) 1935 return -ENOMEM; 1936 1937 btrfs_release_path(path); 1938 path->nodes[i] = clone; 1939 path->slots[i] = slot; 1940 1941 return 0; 1942 } 1943 1944 static inline int search_for_key_slot(struct extent_buffer *eb, 1945 int search_low_slot, 1946 const struct btrfs_key *key, 1947 int prev_cmp, 1948 int *slot) 1949 { 1950 /* 1951 * If a previous call to btrfs_bin_search() on a parent node returned an 1952 * exact match (prev_cmp == 0), we can safely assume the target key will 1953 * always be at slot 0 on lower levels, since each key pointer 1954 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1955 * subtree it points to. Thus we can skip searching lower levels. 1956 */ 1957 if (prev_cmp == 0) { 1958 *slot = 0; 1959 return 0; 1960 } 1961 1962 return btrfs_bin_search(eb, search_low_slot, key, slot); 1963 } 1964 1965 static int search_leaf(struct btrfs_trans_handle *trans, 1966 struct btrfs_root *root, 1967 const struct btrfs_key *key, 1968 struct btrfs_path *path, 1969 int ins_len, 1970 int prev_cmp) 1971 { 1972 struct extent_buffer *leaf = path->nodes[0]; 1973 int leaf_free_space = -1; 1974 int search_low_slot = 0; 1975 int ret; 1976 bool do_bin_search = true; 1977 1978 /* 1979 * If we are doing an insertion, the leaf has enough free space and the 1980 * destination slot for the key is not slot 0, then we can unlock our 1981 * write lock on the parent, and any other upper nodes, before doing the 1982 * binary search on the leaf (with search_for_key_slot()), allowing other 1983 * tasks to lock the parent and any other upper nodes. 1984 */ 1985 if (ins_len > 0) { 1986 /* 1987 * Cache the leaf free space, since we will need it later and it 1988 * will not change until then. 1989 */ 1990 leaf_free_space = btrfs_leaf_free_space(leaf); 1991 1992 /* 1993 * !path->locks[1] means we have a single node tree, the leaf is 1994 * the root of the tree. 1995 */ 1996 if (path->locks[1] && leaf_free_space >= ins_len) { 1997 struct btrfs_disk_key first_key; 1998 1999 ASSERT(btrfs_header_nritems(leaf) > 0); 2000 btrfs_item_key(leaf, &first_key, 0); 2001 2002 /* 2003 * Doing the extra comparison with the first key is cheap, 2004 * taking into account that the first key is very likely 2005 * already in a cache line because it immediately follows 2006 * the extent buffer's header and we have recently accessed 2007 * the header's level field. 2008 */ 2009 ret = comp_keys(&first_key, key); 2010 if (ret < 0) { 2011 /* 2012 * The first key is smaller than the key we want 2013 * to insert, so we are safe to unlock all upper 2014 * nodes and we have to do the binary search. 2015 * 2016 * We do use btrfs_unlock_up_safe() and not 2017 * unlock_up() because the later does not unlock 2018 * nodes with a slot of 0 - we can safely unlock 2019 * any node even if its slot is 0 since in this 2020 * case the key does not end up at slot 0 of the 2021 * leaf and there's no need to split the leaf. 2022 */ 2023 btrfs_unlock_up_safe(path, 1); 2024 search_low_slot = 1; 2025 } else { 2026 /* 2027 * The first key is >= then the key we want to 2028 * insert, so we can skip the binary search as 2029 * the target key will be at slot 0. 2030 * 2031 * We can not unlock upper nodes when the key is 2032 * less than the first key, because we will need 2033 * to update the key at slot 0 of the parent node 2034 * and possibly of other upper nodes too. 2035 * If the key matches the first key, then we can 2036 * unlock all the upper nodes, using 2037 * btrfs_unlock_up_safe() instead of unlock_up() 2038 * as stated above. 2039 */ 2040 if (ret == 0) 2041 btrfs_unlock_up_safe(path, 1); 2042 /* 2043 * ret is already 0 or 1, matching the result of 2044 * a btrfs_bin_search() call, so there is no need 2045 * to adjust it. 2046 */ 2047 do_bin_search = false; 2048 path->slots[0] = 0; 2049 } 2050 } 2051 } 2052 2053 if (do_bin_search) { 2054 ret = search_for_key_slot(leaf, search_low_slot, key, 2055 prev_cmp, &path->slots[0]); 2056 if (ret < 0) 2057 return ret; 2058 } 2059 2060 if (ins_len > 0) { 2061 /* 2062 * Item key already exists. In this case, if we are allowed to 2063 * insert the item (for example, in dir_item case, item key 2064 * collision is allowed), it will be merged with the original 2065 * item. Only the item size grows, no new btrfs item will be 2066 * added. If search_for_extension is not set, ins_len already 2067 * accounts the size btrfs_item, deduct it here so leaf space 2068 * check will be correct. 2069 */ 2070 if (ret == 0 && !path->search_for_extension) { 2071 ASSERT(ins_len >= sizeof(struct btrfs_item)); 2072 ins_len -= sizeof(struct btrfs_item); 2073 } 2074 2075 ASSERT(leaf_free_space >= 0); 2076 2077 if (leaf_free_space < ins_len) { 2078 int err; 2079 2080 err = split_leaf(trans, root, key, path, ins_len, 2081 (ret == 0)); 2082 ASSERT(err <= 0); 2083 if (WARN_ON(err > 0)) 2084 err = -EUCLEAN; 2085 if (err) 2086 ret = err; 2087 } 2088 } 2089 2090 return ret; 2091 } 2092 2093 /* 2094 * btrfs_search_slot - look for a key in a tree and perform necessary 2095 * modifications to preserve tree invariants. 2096 * 2097 * @trans: Handle of transaction, used when modifying the tree 2098 * @p: Holds all btree nodes along the search path 2099 * @root: The root node of the tree 2100 * @key: The key we are looking for 2101 * @ins_len: Indicates purpose of search: 2102 * >0 for inserts it's size of item inserted (*) 2103 * <0 for deletions 2104 * 0 for plain searches, not modifying the tree 2105 * 2106 * (*) If size of item inserted doesn't include 2107 * sizeof(struct btrfs_item), then p->search_for_extension must 2108 * be set. 2109 * @cow: boolean should CoW operations be performed. Must always be 1 2110 * when modifying the tree. 2111 * 2112 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2113 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2114 * 2115 * If @key is found, 0 is returned and you can find the item in the leaf level 2116 * of the path (level 0) 2117 * 2118 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2119 * points to the slot where it should be inserted 2120 * 2121 * If an error is encountered while searching the tree a negative error number 2122 * is returned 2123 */ 2124 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2125 const struct btrfs_key *key, struct btrfs_path *p, 2126 int ins_len, int cow) 2127 { 2128 struct btrfs_fs_info *fs_info = root->fs_info; 2129 struct extent_buffer *b; 2130 int slot; 2131 int ret; 2132 int err; 2133 int level; 2134 int lowest_unlock = 1; 2135 /* everything at write_lock_level or lower must be write locked */ 2136 int write_lock_level = 0; 2137 u8 lowest_level = 0; 2138 int min_write_lock_level; 2139 int prev_cmp; 2140 2141 might_sleep(); 2142 2143 lowest_level = p->lowest_level; 2144 WARN_ON(lowest_level && ins_len > 0); 2145 WARN_ON(p->nodes[0] != NULL); 2146 BUG_ON(!cow && ins_len); 2147 2148 /* 2149 * For now only allow nowait for read only operations. There's no 2150 * strict reason why we can't, we just only need it for reads so it's 2151 * only implemented for reads. 2152 */ 2153 ASSERT(!p->nowait || !cow); 2154 2155 if (ins_len < 0) { 2156 lowest_unlock = 2; 2157 2158 /* when we are removing items, we might have to go up to level 2159 * two as we update tree pointers Make sure we keep write 2160 * for those levels as well 2161 */ 2162 write_lock_level = 2; 2163 } else if (ins_len > 0) { 2164 /* 2165 * for inserting items, make sure we have a write lock on 2166 * level 1 so we can update keys 2167 */ 2168 write_lock_level = 1; 2169 } 2170 2171 if (!cow) 2172 write_lock_level = -1; 2173 2174 if (cow && (p->keep_locks || p->lowest_level)) 2175 write_lock_level = BTRFS_MAX_LEVEL; 2176 2177 min_write_lock_level = write_lock_level; 2178 2179 if (p->need_commit_sem) { 2180 ASSERT(p->search_commit_root); 2181 if (p->nowait) { 2182 if (!down_read_trylock(&fs_info->commit_root_sem)) 2183 return -EAGAIN; 2184 } else { 2185 down_read(&fs_info->commit_root_sem); 2186 } 2187 } 2188 2189 again: 2190 prev_cmp = -1; 2191 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2192 if (IS_ERR(b)) { 2193 ret = PTR_ERR(b); 2194 goto done; 2195 } 2196 2197 while (b) { 2198 int dec = 0; 2199 2200 level = btrfs_header_level(b); 2201 2202 if (cow) { 2203 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2204 2205 /* 2206 * if we don't really need to cow this block 2207 * then we don't want to set the path blocking, 2208 * so we test it here 2209 */ 2210 if (!should_cow_block(trans, root, b)) 2211 goto cow_done; 2212 2213 /* 2214 * must have write locks on this node and the 2215 * parent 2216 */ 2217 if (level > write_lock_level || 2218 (level + 1 > write_lock_level && 2219 level + 1 < BTRFS_MAX_LEVEL && 2220 p->nodes[level + 1])) { 2221 write_lock_level = level + 1; 2222 btrfs_release_path(p); 2223 goto again; 2224 } 2225 2226 if (last_level) 2227 err = btrfs_cow_block(trans, root, b, NULL, 0, 2228 &b, 2229 BTRFS_NESTING_COW); 2230 else 2231 err = btrfs_cow_block(trans, root, b, 2232 p->nodes[level + 1], 2233 p->slots[level + 1], &b, 2234 BTRFS_NESTING_COW); 2235 if (err) { 2236 ret = err; 2237 goto done; 2238 } 2239 } 2240 cow_done: 2241 p->nodes[level] = b; 2242 2243 /* 2244 * we have a lock on b and as long as we aren't changing 2245 * the tree, there is no way to for the items in b to change. 2246 * It is safe to drop the lock on our parent before we 2247 * go through the expensive btree search on b. 2248 * 2249 * If we're inserting or deleting (ins_len != 0), then we might 2250 * be changing slot zero, which may require changing the parent. 2251 * So, we can't drop the lock until after we know which slot 2252 * we're operating on. 2253 */ 2254 if (!ins_len && !p->keep_locks) { 2255 int u = level + 1; 2256 2257 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2258 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2259 p->locks[u] = 0; 2260 } 2261 } 2262 2263 if (level == 0) { 2264 if (ins_len > 0) 2265 ASSERT(write_lock_level >= 1); 2266 2267 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2268 if (!p->search_for_split) 2269 unlock_up(p, level, lowest_unlock, 2270 min_write_lock_level, NULL); 2271 goto done; 2272 } 2273 2274 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2275 if (ret < 0) 2276 goto done; 2277 prev_cmp = ret; 2278 2279 if (ret && slot > 0) { 2280 dec = 1; 2281 slot--; 2282 } 2283 p->slots[level] = slot; 2284 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2285 &write_lock_level); 2286 if (err == -EAGAIN) 2287 goto again; 2288 if (err) { 2289 ret = err; 2290 goto done; 2291 } 2292 b = p->nodes[level]; 2293 slot = p->slots[level]; 2294 2295 /* 2296 * Slot 0 is special, if we change the key we have to update 2297 * the parent pointer which means we must have a write lock on 2298 * the parent 2299 */ 2300 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2301 write_lock_level = level + 1; 2302 btrfs_release_path(p); 2303 goto again; 2304 } 2305 2306 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2307 &write_lock_level); 2308 2309 if (level == lowest_level) { 2310 if (dec) 2311 p->slots[level]++; 2312 goto done; 2313 } 2314 2315 err = read_block_for_search(root, p, &b, level, slot, key); 2316 if (err == -EAGAIN) 2317 goto again; 2318 if (err) { 2319 ret = err; 2320 goto done; 2321 } 2322 2323 if (!p->skip_locking) { 2324 level = btrfs_header_level(b); 2325 2326 btrfs_maybe_reset_lockdep_class(root, b); 2327 2328 if (level <= write_lock_level) { 2329 btrfs_tree_lock(b); 2330 p->locks[level] = BTRFS_WRITE_LOCK; 2331 } else { 2332 if (p->nowait) { 2333 if (!btrfs_try_tree_read_lock(b)) { 2334 free_extent_buffer(b); 2335 ret = -EAGAIN; 2336 goto done; 2337 } 2338 } else { 2339 btrfs_tree_read_lock(b); 2340 } 2341 p->locks[level] = BTRFS_READ_LOCK; 2342 } 2343 p->nodes[level] = b; 2344 } 2345 } 2346 ret = 1; 2347 done: 2348 if (ret < 0 && !p->skip_release_on_error) 2349 btrfs_release_path(p); 2350 2351 if (p->need_commit_sem) { 2352 int ret2; 2353 2354 ret2 = finish_need_commit_sem_search(p); 2355 up_read(&fs_info->commit_root_sem); 2356 if (ret2) 2357 ret = ret2; 2358 } 2359 2360 return ret; 2361 } 2362 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2363 2364 /* 2365 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2366 * current state of the tree together with the operations recorded in the tree 2367 * modification log to search for the key in a previous version of this tree, as 2368 * denoted by the time_seq parameter. 2369 * 2370 * Naturally, there is no support for insert, delete or cow operations. 2371 * 2372 * The resulting path and return value will be set up as if we called 2373 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2374 */ 2375 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2376 struct btrfs_path *p, u64 time_seq) 2377 { 2378 struct btrfs_fs_info *fs_info = root->fs_info; 2379 struct extent_buffer *b; 2380 int slot; 2381 int ret; 2382 int err; 2383 int level; 2384 int lowest_unlock = 1; 2385 u8 lowest_level = 0; 2386 2387 lowest_level = p->lowest_level; 2388 WARN_ON(p->nodes[0] != NULL); 2389 ASSERT(!p->nowait); 2390 2391 if (p->search_commit_root) { 2392 BUG_ON(time_seq); 2393 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2394 } 2395 2396 again: 2397 b = btrfs_get_old_root(root, time_seq); 2398 if (!b) { 2399 ret = -EIO; 2400 goto done; 2401 } 2402 level = btrfs_header_level(b); 2403 p->locks[level] = BTRFS_READ_LOCK; 2404 2405 while (b) { 2406 int dec = 0; 2407 2408 level = btrfs_header_level(b); 2409 p->nodes[level] = b; 2410 2411 /* 2412 * we have a lock on b and as long as we aren't changing 2413 * the tree, there is no way to for the items in b to change. 2414 * It is safe to drop the lock on our parent before we 2415 * go through the expensive btree search on b. 2416 */ 2417 btrfs_unlock_up_safe(p, level + 1); 2418 2419 ret = btrfs_bin_search(b, 0, key, &slot); 2420 if (ret < 0) 2421 goto done; 2422 2423 if (level == 0) { 2424 p->slots[level] = slot; 2425 unlock_up(p, level, lowest_unlock, 0, NULL); 2426 goto done; 2427 } 2428 2429 if (ret && slot > 0) { 2430 dec = 1; 2431 slot--; 2432 } 2433 p->slots[level] = slot; 2434 unlock_up(p, level, lowest_unlock, 0, NULL); 2435 2436 if (level == lowest_level) { 2437 if (dec) 2438 p->slots[level]++; 2439 goto done; 2440 } 2441 2442 err = read_block_for_search(root, p, &b, level, slot, key); 2443 if (err == -EAGAIN) 2444 goto again; 2445 if (err) { 2446 ret = err; 2447 goto done; 2448 } 2449 2450 level = btrfs_header_level(b); 2451 btrfs_tree_read_lock(b); 2452 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2453 if (!b) { 2454 ret = -ENOMEM; 2455 goto done; 2456 } 2457 p->locks[level] = BTRFS_READ_LOCK; 2458 p->nodes[level] = b; 2459 } 2460 ret = 1; 2461 done: 2462 if (ret < 0) 2463 btrfs_release_path(p); 2464 2465 return ret; 2466 } 2467 2468 /* 2469 * Search the tree again to find a leaf with smaller keys. 2470 * Returns 0 if it found something. 2471 * Returns 1 if there are no smaller keys. 2472 * Returns < 0 on error. 2473 * 2474 * This may release the path, and so you may lose any locks held at the 2475 * time you call it. 2476 */ 2477 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2478 { 2479 struct btrfs_key key; 2480 struct btrfs_key orig_key; 2481 struct btrfs_disk_key found_key; 2482 int ret; 2483 2484 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2485 orig_key = key; 2486 2487 if (key.offset > 0) { 2488 key.offset--; 2489 } else if (key.type > 0) { 2490 key.type--; 2491 key.offset = (u64)-1; 2492 } else if (key.objectid > 0) { 2493 key.objectid--; 2494 key.type = (u8)-1; 2495 key.offset = (u64)-1; 2496 } else { 2497 return 1; 2498 } 2499 2500 btrfs_release_path(path); 2501 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2502 if (ret <= 0) 2503 return ret; 2504 2505 /* 2506 * Previous key not found. Even if we were at slot 0 of the leaf we had 2507 * before releasing the path and calling btrfs_search_slot(), we now may 2508 * be in a slot pointing to the same original key - this can happen if 2509 * after we released the path, one of more items were moved from a 2510 * sibling leaf into the front of the leaf we had due to an insertion 2511 * (see push_leaf_right()). 2512 * If we hit this case and our slot is > 0 and just decrement the slot 2513 * so that the caller does not process the same key again, which may or 2514 * may not break the caller, depending on its logic. 2515 */ 2516 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2517 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2518 ret = comp_keys(&found_key, &orig_key); 2519 if (ret == 0) { 2520 if (path->slots[0] > 0) { 2521 path->slots[0]--; 2522 return 0; 2523 } 2524 /* 2525 * At slot 0, same key as before, it means orig_key is 2526 * the lowest, leftmost, key in the tree. We're done. 2527 */ 2528 return 1; 2529 } 2530 } 2531 2532 btrfs_item_key(path->nodes[0], &found_key, 0); 2533 ret = comp_keys(&found_key, &key); 2534 /* 2535 * We might have had an item with the previous key in the tree right 2536 * before we released our path. And after we released our path, that 2537 * item might have been pushed to the first slot (0) of the leaf we 2538 * were holding due to a tree balance. Alternatively, an item with the 2539 * previous key can exist as the only element of a leaf (big fat item). 2540 * Therefore account for these 2 cases, so that our callers (like 2541 * btrfs_previous_item) don't miss an existing item with a key matching 2542 * the previous key we computed above. 2543 */ 2544 if (ret <= 0) 2545 return 0; 2546 return 1; 2547 } 2548 2549 /* 2550 * helper to use instead of search slot if no exact match is needed but 2551 * instead the next or previous item should be returned. 2552 * When find_higher is true, the next higher item is returned, the next lower 2553 * otherwise. 2554 * When return_any and find_higher are both true, and no higher item is found, 2555 * return the next lower instead. 2556 * When return_any is true and find_higher is false, and no lower item is found, 2557 * return the next higher instead. 2558 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2559 * < 0 on error 2560 */ 2561 int btrfs_search_slot_for_read(struct btrfs_root *root, 2562 const struct btrfs_key *key, 2563 struct btrfs_path *p, int find_higher, 2564 int return_any) 2565 { 2566 int ret; 2567 struct extent_buffer *leaf; 2568 2569 again: 2570 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2571 if (ret <= 0) 2572 return ret; 2573 /* 2574 * a return value of 1 means the path is at the position where the 2575 * item should be inserted. Normally this is the next bigger item, 2576 * but in case the previous item is the last in a leaf, path points 2577 * to the first free slot in the previous leaf, i.e. at an invalid 2578 * item. 2579 */ 2580 leaf = p->nodes[0]; 2581 2582 if (find_higher) { 2583 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2584 ret = btrfs_next_leaf(root, p); 2585 if (ret <= 0) 2586 return ret; 2587 if (!return_any) 2588 return 1; 2589 /* 2590 * no higher item found, return the next 2591 * lower instead 2592 */ 2593 return_any = 0; 2594 find_higher = 0; 2595 btrfs_release_path(p); 2596 goto again; 2597 } 2598 } else { 2599 if (p->slots[0] == 0) { 2600 ret = btrfs_prev_leaf(root, p); 2601 if (ret < 0) 2602 return ret; 2603 if (!ret) { 2604 leaf = p->nodes[0]; 2605 if (p->slots[0] == btrfs_header_nritems(leaf)) 2606 p->slots[0]--; 2607 return 0; 2608 } 2609 if (!return_any) 2610 return 1; 2611 /* 2612 * no lower item found, return the next 2613 * higher instead 2614 */ 2615 return_any = 0; 2616 find_higher = 1; 2617 btrfs_release_path(p); 2618 goto again; 2619 } else { 2620 --p->slots[0]; 2621 } 2622 } 2623 return 0; 2624 } 2625 2626 /* 2627 * Execute search and call btrfs_previous_item to traverse backwards if the item 2628 * was not found. 2629 * 2630 * Return 0 if found, 1 if not found and < 0 if error. 2631 */ 2632 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2633 struct btrfs_path *path) 2634 { 2635 int ret; 2636 2637 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2638 if (ret > 0) 2639 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2640 2641 if (ret == 0) 2642 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2643 2644 return ret; 2645 } 2646 2647 /* 2648 * Search for a valid slot for the given path. 2649 * 2650 * @root: The root node of the tree. 2651 * @key: Will contain a valid item if found. 2652 * @path: The starting point to validate the slot. 2653 * 2654 * Return: 0 if the item is valid 2655 * 1 if not found 2656 * <0 if error. 2657 */ 2658 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2659 struct btrfs_path *path) 2660 { 2661 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2662 int ret; 2663 2664 ret = btrfs_next_leaf(root, path); 2665 if (ret) 2666 return ret; 2667 } 2668 2669 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2670 return 0; 2671 } 2672 2673 /* 2674 * adjust the pointers going up the tree, starting at level 2675 * making sure the right key of each node is points to 'key'. 2676 * This is used after shifting pointers to the left, so it stops 2677 * fixing up pointers when a given leaf/node is not in slot 0 of the 2678 * higher levels 2679 * 2680 */ 2681 static void fixup_low_keys(struct btrfs_path *path, 2682 struct btrfs_disk_key *key, int level) 2683 { 2684 int i; 2685 struct extent_buffer *t; 2686 int ret; 2687 2688 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2689 int tslot = path->slots[i]; 2690 2691 if (!path->nodes[i]) 2692 break; 2693 t = path->nodes[i]; 2694 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2695 BTRFS_MOD_LOG_KEY_REPLACE); 2696 BUG_ON(ret < 0); 2697 btrfs_set_node_key(t, key, tslot); 2698 btrfs_mark_buffer_dirty(path->nodes[i]); 2699 if (tslot != 0) 2700 break; 2701 } 2702 } 2703 2704 /* 2705 * update item key. 2706 * 2707 * This function isn't completely safe. It's the caller's responsibility 2708 * that the new key won't break the order 2709 */ 2710 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2711 struct btrfs_path *path, 2712 const struct btrfs_key *new_key) 2713 { 2714 struct btrfs_disk_key disk_key; 2715 struct extent_buffer *eb; 2716 int slot; 2717 2718 eb = path->nodes[0]; 2719 slot = path->slots[0]; 2720 if (slot > 0) { 2721 btrfs_item_key(eb, &disk_key, slot - 1); 2722 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2723 btrfs_print_leaf(eb); 2724 btrfs_crit(fs_info, 2725 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2726 slot, btrfs_disk_key_objectid(&disk_key), 2727 btrfs_disk_key_type(&disk_key), 2728 btrfs_disk_key_offset(&disk_key), 2729 new_key->objectid, new_key->type, 2730 new_key->offset); 2731 BUG(); 2732 } 2733 } 2734 if (slot < btrfs_header_nritems(eb) - 1) { 2735 btrfs_item_key(eb, &disk_key, slot + 1); 2736 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2737 btrfs_print_leaf(eb); 2738 btrfs_crit(fs_info, 2739 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2740 slot, btrfs_disk_key_objectid(&disk_key), 2741 btrfs_disk_key_type(&disk_key), 2742 btrfs_disk_key_offset(&disk_key), 2743 new_key->objectid, new_key->type, 2744 new_key->offset); 2745 BUG(); 2746 } 2747 } 2748 2749 btrfs_cpu_key_to_disk(&disk_key, new_key); 2750 btrfs_set_item_key(eb, &disk_key, slot); 2751 btrfs_mark_buffer_dirty(eb); 2752 if (slot == 0) 2753 fixup_low_keys(path, &disk_key, 1); 2754 } 2755 2756 /* 2757 * Check key order of two sibling extent buffers. 2758 * 2759 * Return true if something is wrong. 2760 * Return false if everything is fine. 2761 * 2762 * Tree-checker only works inside one tree block, thus the following 2763 * corruption can not be detected by tree-checker: 2764 * 2765 * Leaf @left | Leaf @right 2766 * -------------------------------------------------------------- 2767 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2768 * 2769 * Key f6 in leaf @left itself is valid, but not valid when the next 2770 * key in leaf @right is 7. 2771 * This can only be checked at tree block merge time. 2772 * And since tree checker has ensured all key order in each tree block 2773 * is correct, we only need to bother the last key of @left and the first 2774 * key of @right. 2775 */ 2776 static bool check_sibling_keys(struct extent_buffer *left, 2777 struct extent_buffer *right) 2778 { 2779 struct btrfs_key left_last; 2780 struct btrfs_key right_first; 2781 int level = btrfs_header_level(left); 2782 int nr_left = btrfs_header_nritems(left); 2783 int nr_right = btrfs_header_nritems(right); 2784 2785 /* No key to check in one of the tree blocks */ 2786 if (!nr_left || !nr_right) 2787 return false; 2788 2789 if (level) { 2790 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2791 btrfs_node_key_to_cpu(right, &right_first, 0); 2792 } else { 2793 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2794 btrfs_item_key_to_cpu(right, &right_first, 0); 2795 } 2796 2797 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2798 btrfs_crit(left->fs_info, "left extent buffer:"); 2799 btrfs_print_tree(left, false); 2800 btrfs_crit(left->fs_info, "right extent buffer:"); 2801 btrfs_print_tree(right, false); 2802 btrfs_crit(left->fs_info, 2803 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2804 left_last.objectid, left_last.type, 2805 left_last.offset, right_first.objectid, 2806 right_first.type, right_first.offset); 2807 return true; 2808 } 2809 return false; 2810 } 2811 2812 /* 2813 * try to push data from one node into the next node left in the 2814 * tree. 2815 * 2816 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2817 * error, and > 0 if there was no room in the left hand block. 2818 */ 2819 static int push_node_left(struct btrfs_trans_handle *trans, 2820 struct extent_buffer *dst, 2821 struct extent_buffer *src, int empty) 2822 { 2823 struct btrfs_fs_info *fs_info = trans->fs_info; 2824 int push_items = 0; 2825 int src_nritems; 2826 int dst_nritems; 2827 int ret = 0; 2828 2829 src_nritems = btrfs_header_nritems(src); 2830 dst_nritems = btrfs_header_nritems(dst); 2831 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2832 WARN_ON(btrfs_header_generation(src) != trans->transid); 2833 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2834 2835 if (!empty && src_nritems <= 8) 2836 return 1; 2837 2838 if (push_items <= 0) 2839 return 1; 2840 2841 if (empty) { 2842 push_items = min(src_nritems, push_items); 2843 if (push_items < src_nritems) { 2844 /* leave at least 8 pointers in the node if 2845 * we aren't going to empty it 2846 */ 2847 if (src_nritems - push_items < 8) { 2848 if (push_items <= 8) 2849 return 1; 2850 push_items -= 8; 2851 } 2852 } 2853 } else 2854 push_items = min(src_nritems - 8, push_items); 2855 2856 /* dst is the left eb, src is the middle eb */ 2857 if (check_sibling_keys(dst, src)) { 2858 ret = -EUCLEAN; 2859 btrfs_abort_transaction(trans, ret); 2860 return ret; 2861 } 2862 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2863 if (ret) { 2864 btrfs_abort_transaction(trans, ret); 2865 return ret; 2866 } 2867 copy_extent_buffer(dst, src, 2868 btrfs_node_key_ptr_offset(dst, dst_nritems), 2869 btrfs_node_key_ptr_offset(src, 0), 2870 push_items * sizeof(struct btrfs_key_ptr)); 2871 2872 if (push_items < src_nritems) { 2873 /* 2874 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2875 * don't need to do an explicit tree mod log operation for it. 2876 */ 2877 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2878 btrfs_node_key_ptr_offset(src, push_items), 2879 (src_nritems - push_items) * 2880 sizeof(struct btrfs_key_ptr)); 2881 } 2882 btrfs_set_header_nritems(src, src_nritems - push_items); 2883 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2884 btrfs_mark_buffer_dirty(src); 2885 btrfs_mark_buffer_dirty(dst); 2886 2887 return ret; 2888 } 2889 2890 /* 2891 * try to push data from one node into the next node right in the 2892 * tree. 2893 * 2894 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2895 * error, and > 0 if there was no room in the right hand block. 2896 * 2897 * this will only push up to 1/2 the contents of the left node over 2898 */ 2899 static int balance_node_right(struct btrfs_trans_handle *trans, 2900 struct extent_buffer *dst, 2901 struct extent_buffer *src) 2902 { 2903 struct btrfs_fs_info *fs_info = trans->fs_info; 2904 int push_items = 0; 2905 int max_push; 2906 int src_nritems; 2907 int dst_nritems; 2908 int ret = 0; 2909 2910 WARN_ON(btrfs_header_generation(src) != trans->transid); 2911 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2912 2913 src_nritems = btrfs_header_nritems(src); 2914 dst_nritems = btrfs_header_nritems(dst); 2915 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2916 if (push_items <= 0) 2917 return 1; 2918 2919 if (src_nritems < 4) 2920 return 1; 2921 2922 max_push = src_nritems / 2 + 1; 2923 /* don't try to empty the node */ 2924 if (max_push >= src_nritems) 2925 return 1; 2926 2927 if (max_push < push_items) 2928 push_items = max_push; 2929 2930 /* dst is the right eb, src is the middle eb */ 2931 if (check_sibling_keys(src, dst)) { 2932 ret = -EUCLEAN; 2933 btrfs_abort_transaction(trans, ret); 2934 return ret; 2935 } 2936 2937 /* 2938 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2939 * need to do an explicit tree mod log operation for it. 2940 */ 2941 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2942 btrfs_node_key_ptr_offset(dst, 0), 2943 (dst_nritems) * 2944 sizeof(struct btrfs_key_ptr)); 2945 2946 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2947 push_items); 2948 if (ret) { 2949 btrfs_abort_transaction(trans, ret); 2950 return ret; 2951 } 2952 copy_extent_buffer(dst, src, 2953 btrfs_node_key_ptr_offset(dst, 0), 2954 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2955 push_items * sizeof(struct btrfs_key_ptr)); 2956 2957 btrfs_set_header_nritems(src, src_nritems - push_items); 2958 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2959 2960 btrfs_mark_buffer_dirty(src); 2961 btrfs_mark_buffer_dirty(dst); 2962 2963 return ret; 2964 } 2965 2966 /* 2967 * helper function to insert a new root level in the tree. 2968 * A new node is allocated, and a single item is inserted to 2969 * point to the existing root 2970 * 2971 * returns zero on success or < 0 on failure. 2972 */ 2973 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2974 struct btrfs_root *root, 2975 struct btrfs_path *path, int level) 2976 { 2977 struct btrfs_fs_info *fs_info = root->fs_info; 2978 u64 lower_gen; 2979 struct extent_buffer *lower; 2980 struct extent_buffer *c; 2981 struct extent_buffer *old; 2982 struct btrfs_disk_key lower_key; 2983 int ret; 2984 2985 BUG_ON(path->nodes[level]); 2986 BUG_ON(path->nodes[level-1] != root->node); 2987 2988 lower = path->nodes[level-1]; 2989 if (level == 1) 2990 btrfs_item_key(lower, &lower_key, 0); 2991 else 2992 btrfs_node_key(lower, &lower_key, 0); 2993 2994 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2995 &lower_key, level, root->node->start, 0, 2996 BTRFS_NESTING_NEW_ROOT); 2997 if (IS_ERR(c)) 2998 return PTR_ERR(c); 2999 3000 root_add_used(root, fs_info->nodesize); 3001 3002 btrfs_set_header_nritems(c, 1); 3003 btrfs_set_node_key(c, &lower_key, 0); 3004 btrfs_set_node_blockptr(c, 0, lower->start); 3005 lower_gen = btrfs_header_generation(lower); 3006 WARN_ON(lower_gen != trans->transid); 3007 3008 btrfs_set_node_ptr_generation(c, 0, lower_gen); 3009 3010 btrfs_mark_buffer_dirty(c); 3011 3012 old = root->node; 3013 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 3014 if (ret < 0) { 3015 btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); 3016 btrfs_tree_unlock(c); 3017 free_extent_buffer(c); 3018 return ret; 3019 } 3020 rcu_assign_pointer(root->node, c); 3021 3022 /* the super has an extra ref to root->node */ 3023 free_extent_buffer(old); 3024 3025 add_root_to_dirty_list(root); 3026 atomic_inc(&c->refs); 3027 path->nodes[level] = c; 3028 path->locks[level] = BTRFS_WRITE_LOCK; 3029 path->slots[level] = 0; 3030 return 0; 3031 } 3032 3033 /* 3034 * worker function to insert a single pointer in a node. 3035 * the node should have enough room for the pointer already 3036 * 3037 * slot and level indicate where you want the key to go, and 3038 * blocknr is the block the key points to. 3039 */ 3040 static int insert_ptr(struct btrfs_trans_handle *trans, 3041 struct btrfs_path *path, 3042 struct btrfs_disk_key *key, u64 bytenr, 3043 int slot, int level) 3044 { 3045 struct extent_buffer *lower; 3046 int nritems; 3047 int ret; 3048 3049 BUG_ON(!path->nodes[level]); 3050 btrfs_assert_tree_write_locked(path->nodes[level]); 3051 lower = path->nodes[level]; 3052 nritems = btrfs_header_nritems(lower); 3053 BUG_ON(slot > nritems); 3054 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 3055 if (slot != nritems) { 3056 if (level) { 3057 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 3058 slot, nritems - slot); 3059 if (ret < 0) { 3060 btrfs_abort_transaction(trans, ret); 3061 return ret; 3062 } 3063 } 3064 memmove_extent_buffer(lower, 3065 btrfs_node_key_ptr_offset(lower, slot + 1), 3066 btrfs_node_key_ptr_offset(lower, slot), 3067 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3068 } 3069 if (level) { 3070 ret = btrfs_tree_mod_log_insert_key(lower, slot, 3071 BTRFS_MOD_LOG_KEY_ADD); 3072 if (ret < 0) { 3073 btrfs_abort_transaction(trans, ret); 3074 return ret; 3075 } 3076 } 3077 btrfs_set_node_key(lower, key, slot); 3078 btrfs_set_node_blockptr(lower, slot, bytenr); 3079 WARN_ON(trans->transid == 0); 3080 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3081 btrfs_set_header_nritems(lower, nritems + 1); 3082 btrfs_mark_buffer_dirty(lower); 3083 3084 return 0; 3085 } 3086 3087 /* 3088 * split the node at the specified level in path in two. 3089 * The path is corrected to point to the appropriate node after the split 3090 * 3091 * Before splitting this tries to make some room in the node by pushing 3092 * left and right, if either one works, it returns right away. 3093 * 3094 * returns 0 on success and < 0 on failure 3095 */ 3096 static noinline int split_node(struct btrfs_trans_handle *trans, 3097 struct btrfs_root *root, 3098 struct btrfs_path *path, int level) 3099 { 3100 struct btrfs_fs_info *fs_info = root->fs_info; 3101 struct extent_buffer *c; 3102 struct extent_buffer *split; 3103 struct btrfs_disk_key disk_key; 3104 int mid; 3105 int ret; 3106 u32 c_nritems; 3107 3108 c = path->nodes[level]; 3109 WARN_ON(btrfs_header_generation(c) != trans->transid); 3110 if (c == root->node) { 3111 /* 3112 * trying to split the root, lets make a new one 3113 * 3114 * tree mod log: We don't log_removal old root in 3115 * insert_new_root, because that root buffer will be kept as a 3116 * normal node. We are going to log removal of half of the 3117 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3118 * holding a tree lock on the buffer, which is why we cannot 3119 * race with other tree_mod_log users. 3120 */ 3121 ret = insert_new_root(trans, root, path, level + 1); 3122 if (ret) 3123 return ret; 3124 } else { 3125 ret = push_nodes_for_insert(trans, root, path, level); 3126 c = path->nodes[level]; 3127 if (!ret && btrfs_header_nritems(c) < 3128 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3129 return 0; 3130 if (ret < 0) 3131 return ret; 3132 } 3133 3134 c_nritems = btrfs_header_nritems(c); 3135 mid = (c_nritems + 1) / 2; 3136 btrfs_node_key(c, &disk_key, mid); 3137 3138 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3139 &disk_key, level, c->start, 0, 3140 BTRFS_NESTING_SPLIT); 3141 if (IS_ERR(split)) 3142 return PTR_ERR(split); 3143 3144 root_add_used(root, fs_info->nodesize); 3145 ASSERT(btrfs_header_level(c) == level); 3146 3147 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3148 if (ret) { 3149 btrfs_tree_unlock(split); 3150 free_extent_buffer(split); 3151 btrfs_abort_transaction(trans, ret); 3152 return ret; 3153 } 3154 copy_extent_buffer(split, c, 3155 btrfs_node_key_ptr_offset(split, 0), 3156 btrfs_node_key_ptr_offset(c, mid), 3157 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3158 btrfs_set_header_nritems(split, c_nritems - mid); 3159 btrfs_set_header_nritems(c, mid); 3160 3161 btrfs_mark_buffer_dirty(c); 3162 btrfs_mark_buffer_dirty(split); 3163 3164 ret = insert_ptr(trans, path, &disk_key, split->start, 3165 path->slots[level + 1] + 1, level + 1); 3166 if (ret < 0) { 3167 btrfs_tree_unlock(split); 3168 free_extent_buffer(split); 3169 return ret; 3170 } 3171 3172 if (path->slots[level] >= mid) { 3173 path->slots[level] -= mid; 3174 btrfs_tree_unlock(c); 3175 free_extent_buffer(c); 3176 path->nodes[level] = split; 3177 path->slots[level + 1] += 1; 3178 } else { 3179 btrfs_tree_unlock(split); 3180 free_extent_buffer(split); 3181 } 3182 return 0; 3183 } 3184 3185 /* 3186 * how many bytes are required to store the items in a leaf. start 3187 * and nr indicate which items in the leaf to check. This totals up the 3188 * space used both by the item structs and the item data 3189 */ 3190 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3191 { 3192 int data_len; 3193 int nritems = btrfs_header_nritems(l); 3194 int end = min(nritems, start + nr) - 1; 3195 3196 if (!nr) 3197 return 0; 3198 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3199 data_len = data_len - btrfs_item_offset(l, end); 3200 data_len += sizeof(struct btrfs_item) * nr; 3201 WARN_ON(data_len < 0); 3202 return data_len; 3203 } 3204 3205 /* 3206 * The space between the end of the leaf items and 3207 * the start of the leaf data. IOW, how much room 3208 * the leaf has left for both items and data 3209 */ 3210 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3211 { 3212 struct btrfs_fs_info *fs_info = leaf->fs_info; 3213 int nritems = btrfs_header_nritems(leaf); 3214 int ret; 3215 3216 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3217 if (ret < 0) { 3218 btrfs_crit(fs_info, 3219 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3220 ret, 3221 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3222 leaf_space_used(leaf, 0, nritems), nritems); 3223 } 3224 return ret; 3225 } 3226 3227 /* 3228 * min slot controls the lowest index we're willing to push to the 3229 * right. We'll push up to and including min_slot, but no lower 3230 */ 3231 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3232 struct btrfs_path *path, 3233 int data_size, int empty, 3234 struct extent_buffer *right, 3235 int free_space, u32 left_nritems, 3236 u32 min_slot) 3237 { 3238 struct btrfs_fs_info *fs_info = right->fs_info; 3239 struct extent_buffer *left = path->nodes[0]; 3240 struct extent_buffer *upper = path->nodes[1]; 3241 struct btrfs_map_token token; 3242 struct btrfs_disk_key disk_key; 3243 int slot; 3244 u32 i; 3245 int push_space = 0; 3246 int push_items = 0; 3247 u32 nr; 3248 u32 right_nritems; 3249 u32 data_end; 3250 u32 this_item_size; 3251 3252 if (empty) 3253 nr = 0; 3254 else 3255 nr = max_t(u32, 1, min_slot); 3256 3257 if (path->slots[0] >= left_nritems) 3258 push_space += data_size; 3259 3260 slot = path->slots[1]; 3261 i = left_nritems - 1; 3262 while (i >= nr) { 3263 if (!empty && push_items > 0) { 3264 if (path->slots[0] > i) 3265 break; 3266 if (path->slots[0] == i) { 3267 int space = btrfs_leaf_free_space(left); 3268 3269 if (space + push_space * 2 > free_space) 3270 break; 3271 } 3272 } 3273 3274 if (path->slots[0] == i) 3275 push_space += data_size; 3276 3277 this_item_size = btrfs_item_size(left, i); 3278 if (this_item_size + sizeof(struct btrfs_item) + 3279 push_space > free_space) 3280 break; 3281 3282 push_items++; 3283 push_space += this_item_size + sizeof(struct btrfs_item); 3284 if (i == 0) 3285 break; 3286 i--; 3287 } 3288 3289 if (push_items == 0) 3290 goto out_unlock; 3291 3292 WARN_ON(!empty && push_items == left_nritems); 3293 3294 /* push left to right */ 3295 right_nritems = btrfs_header_nritems(right); 3296 3297 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3298 push_space -= leaf_data_end(left); 3299 3300 /* make room in the right data area */ 3301 data_end = leaf_data_end(right); 3302 memmove_leaf_data(right, data_end - push_space, data_end, 3303 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3304 3305 /* copy from the left data area */ 3306 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3307 leaf_data_end(left), push_space); 3308 3309 memmove_leaf_items(right, push_items, 0, right_nritems); 3310 3311 /* copy the items from left to right */ 3312 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3313 3314 /* update the item pointers */ 3315 btrfs_init_map_token(&token, right); 3316 right_nritems += push_items; 3317 btrfs_set_header_nritems(right, right_nritems); 3318 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3319 for (i = 0; i < right_nritems; i++) { 3320 push_space -= btrfs_token_item_size(&token, i); 3321 btrfs_set_token_item_offset(&token, i, push_space); 3322 } 3323 3324 left_nritems -= push_items; 3325 btrfs_set_header_nritems(left, left_nritems); 3326 3327 if (left_nritems) 3328 btrfs_mark_buffer_dirty(left); 3329 else 3330 btrfs_clear_buffer_dirty(trans, left); 3331 3332 btrfs_mark_buffer_dirty(right); 3333 3334 btrfs_item_key(right, &disk_key, 0); 3335 btrfs_set_node_key(upper, &disk_key, slot + 1); 3336 btrfs_mark_buffer_dirty(upper); 3337 3338 /* then fixup the leaf pointer in the path */ 3339 if (path->slots[0] >= left_nritems) { 3340 path->slots[0] -= left_nritems; 3341 if (btrfs_header_nritems(path->nodes[0]) == 0) 3342 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3343 btrfs_tree_unlock(path->nodes[0]); 3344 free_extent_buffer(path->nodes[0]); 3345 path->nodes[0] = right; 3346 path->slots[1] += 1; 3347 } else { 3348 btrfs_tree_unlock(right); 3349 free_extent_buffer(right); 3350 } 3351 return 0; 3352 3353 out_unlock: 3354 btrfs_tree_unlock(right); 3355 free_extent_buffer(right); 3356 return 1; 3357 } 3358 3359 /* 3360 * push some data in the path leaf to the right, trying to free up at 3361 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3362 * 3363 * returns 1 if the push failed because the other node didn't have enough 3364 * room, 0 if everything worked out and < 0 if there were major errors. 3365 * 3366 * this will push starting from min_slot to the end of the leaf. It won't 3367 * push any slot lower than min_slot 3368 */ 3369 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3370 *root, struct btrfs_path *path, 3371 int min_data_size, int data_size, 3372 int empty, u32 min_slot) 3373 { 3374 struct extent_buffer *left = path->nodes[0]; 3375 struct extent_buffer *right; 3376 struct extent_buffer *upper; 3377 int slot; 3378 int free_space; 3379 u32 left_nritems; 3380 int ret; 3381 3382 if (!path->nodes[1]) 3383 return 1; 3384 3385 slot = path->slots[1]; 3386 upper = path->nodes[1]; 3387 if (slot >= btrfs_header_nritems(upper) - 1) 3388 return 1; 3389 3390 btrfs_assert_tree_write_locked(path->nodes[1]); 3391 3392 right = btrfs_read_node_slot(upper, slot + 1); 3393 if (IS_ERR(right)) 3394 return PTR_ERR(right); 3395 3396 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3397 3398 free_space = btrfs_leaf_free_space(right); 3399 if (free_space < data_size) 3400 goto out_unlock; 3401 3402 ret = btrfs_cow_block(trans, root, right, upper, 3403 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3404 if (ret) 3405 goto out_unlock; 3406 3407 left_nritems = btrfs_header_nritems(left); 3408 if (left_nritems == 0) 3409 goto out_unlock; 3410 3411 if (check_sibling_keys(left, right)) { 3412 ret = -EUCLEAN; 3413 btrfs_abort_transaction(trans, ret); 3414 btrfs_tree_unlock(right); 3415 free_extent_buffer(right); 3416 return ret; 3417 } 3418 if (path->slots[0] == left_nritems && !empty) { 3419 /* Key greater than all keys in the leaf, right neighbor has 3420 * enough room for it and we're not emptying our leaf to delete 3421 * it, therefore use right neighbor to insert the new item and 3422 * no need to touch/dirty our left leaf. */ 3423 btrfs_tree_unlock(left); 3424 free_extent_buffer(left); 3425 path->nodes[0] = right; 3426 path->slots[0] = 0; 3427 path->slots[1]++; 3428 return 0; 3429 } 3430 3431 return __push_leaf_right(trans, path, min_data_size, empty, right, 3432 free_space, left_nritems, min_slot); 3433 out_unlock: 3434 btrfs_tree_unlock(right); 3435 free_extent_buffer(right); 3436 return 1; 3437 } 3438 3439 /* 3440 * push some data in the path leaf to the left, trying to free up at 3441 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3442 * 3443 * max_slot can put a limit on how far into the leaf we'll push items. The 3444 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3445 * items 3446 */ 3447 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3448 struct btrfs_path *path, int data_size, 3449 int empty, struct extent_buffer *left, 3450 int free_space, u32 right_nritems, 3451 u32 max_slot) 3452 { 3453 struct btrfs_fs_info *fs_info = left->fs_info; 3454 struct btrfs_disk_key disk_key; 3455 struct extent_buffer *right = path->nodes[0]; 3456 int i; 3457 int push_space = 0; 3458 int push_items = 0; 3459 u32 old_left_nritems; 3460 u32 nr; 3461 int ret = 0; 3462 u32 this_item_size; 3463 u32 old_left_item_size; 3464 struct btrfs_map_token token; 3465 3466 if (empty) 3467 nr = min(right_nritems, max_slot); 3468 else 3469 nr = min(right_nritems - 1, max_slot); 3470 3471 for (i = 0; i < nr; i++) { 3472 if (!empty && push_items > 0) { 3473 if (path->slots[0] < i) 3474 break; 3475 if (path->slots[0] == i) { 3476 int space = btrfs_leaf_free_space(right); 3477 3478 if (space + push_space * 2 > free_space) 3479 break; 3480 } 3481 } 3482 3483 if (path->slots[0] == i) 3484 push_space += data_size; 3485 3486 this_item_size = btrfs_item_size(right, i); 3487 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3488 free_space) 3489 break; 3490 3491 push_items++; 3492 push_space += this_item_size + sizeof(struct btrfs_item); 3493 } 3494 3495 if (push_items == 0) { 3496 ret = 1; 3497 goto out; 3498 } 3499 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3500 3501 /* push data from right to left */ 3502 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3503 3504 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3505 btrfs_item_offset(right, push_items - 1); 3506 3507 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3508 btrfs_item_offset(right, push_items - 1), push_space); 3509 old_left_nritems = btrfs_header_nritems(left); 3510 BUG_ON(old_left_nritems <= 0); 3511 3512 btrfs_init_map_token(&token, left); 3513 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3514 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3515 u32 ioff; 3516 3517 ioff = btrfs_token_item_offset(&token, i); 3518 btrfs_set_token_item_offset(&token, i, 3519 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3520 } 3521 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3522 3523 /* fixup right node */ 3524 if (push_items > right_nritems) 3525 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3526 right_nritems); 3527 3528 if (push_items < right_nritems) { 3529 push_space = btrfs_item_offset(right, push_items - 1) - 3530 leaf_data_end(right); 3531 memmove_leaf_data(right, 3532 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3533 leaf_data_end(right), push_space); 3534 3535 memmove_leaf_items(right, 0, push_items, 3536 btrfs_header_nritems(right) - push_items); 3537 } 3538 3539 btrfs_init_map_token(&token, right); 3540 right_nritems -= push_items; 3541 btrfs_set_header_nritems(right, right_nritems); 3542 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3543 for (i = 0; i < right_nritems; i++) { 3544 push_space = push_space - btrfs_token_item_size(&token, i); 3545 btrfs_set_token_item_offset(&token, i, push_space); 3546 } 3547 3548 btrfs_mark_buffer_dirty(left); 3549 if (right_nritems) 3550 btrfs_mark_buffer_dirty(right); 3551 else 3552 btrfs_clear_buffer_dirty(trans, right); 3553 3554 btrfs_item_key(right, &disk_key, 0); 3555 fixup_low_keys(path, &disk_key, 1); 3556 3557 /* then fixup the leaf pointer in the path */ 3558 if (path->slots[0] < push_items) { 3559 path->slots[0] += old_left_nritems; 3560 btrfs_tree_unlock(path->nodes[0]); 3561 free_extent_buffer(path->nodes[0]); 3562 path->nodes[0] = left; 3563 path->slots[1] -= 1; 3564 } else { 3565 btrfs_tree_unlock(left); 3566 free_extent_buffer(left); 3567 path->slots[0] -= push_items; 3568 } 3569 BUG_ON(path->slots[0] < 0); 3570 return ret; 3571 out: 3572 btrfs_tree_unlock(left); 3573 free_extent_buffer(left); 3574 return ret; 3575 } 3576 3577 /* 3578 * push some data in the path leaf to the left, trying to free up at 3579 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3580 * 3581 * max_slot can put a limit on how far into the leaf we'll push items. The 3582 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3583 * items 3584 */ 3585 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3586 *root, struct btrfs_path *path, int min_data_size, 3587 int data_size, int empty, u32 max_slot) 3588 { 3589 struct extent_buffer *right = path->nodes[0]; 3590 struct extent_buffer *left; 3591 int slot; 3592 int free_space; 3593 u32 right_nritems; 3594 int ret = 0; 3595 3596 slot = path->slots[1]; 3597 if (slot == 0) 3598 return 1; 3599 if (!path->nodes[1]) 3600 return 1; 3601 3602 right_nritems = btrfs_header_nritems(right); 3603 if (right_nritems == 0) 3604 return 1; 3605 3606 btrfs_assert_tree_write_locked(path->nodes[1]); 3607 3608 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3609 if (IS_ERR(left)) 3610 return PTR_ERR(left); 3611 3612 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3613 3614 free_space = btrfs_leaf_free_space(left); 3615 if (free_space < data_size) { 3616 ret = 1; 3617 goto out; 3618 } 3619 3620 ret = btrfs_cow_block(trans, root, left, 3621 path->nodes[1], slot - 1, &left, 3622 BTRFS_NESTING_LEFT_COW); 3623 if (ret) { 3624 /* we hit -ENOSPC, but it isn't fatal here */ 3625 if (ret == -ENOSPC) 3626 ret = 1; 3627 goto out; 3628 } 3629 3630 if (check_sibling_keys(left, right)) { 3631 ret = -EUCLEAN; 3632 btrfs_abort_transaction(trans, ret); 3633 goto out; 3634 } 3635 return __push_leaf_left(trans, path, min_data_size, empty, left, 3636 free_space, right_nritems, max_slot); 3637 out: 3638 btrfs_tree_unlock(left); 3639 free_extent_buffer(left); 3640 return ret; 3641 } 3642 3643 /* 3644 * split the path's leaf in two, making sure there is at least data_size 3645 * available for the resulting leaf level of the path. 3646 */ 3647 static noinline int copy_for_split(struct btrfs_trans_handle *trans, 3648 struct btrfs_path *path, 3649 struct extent_buffer *l, 3650 struct extent_buffer *right, 3651 int slot, int mid, int nritems) 3652 { 3653 struct btrfs_fs_info *fs_info = trans->fs_info; 3654 int data_copy_size; 3655 int rt_data_off; 3656 int i; 3657 int ret; 3658 struct btrfs_disk_key disk_key; 3659 struct btrfs_map_token token; 3660 3661 nritems = nritems - mid; 3662 btrfs_set_header_nritems(right, nritems); 3663 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3664 3665 copy_leaf_items(right, l, 0, mid, nritems); 3666 3667 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3668 leaf_data_end(l), data_copy_size); 3669 3670 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3671 3672 btrfs_init_map_token(&token, right); 3673 for (i = 0; i < nritems; i++) { 3674 u32 ioff; 3675 3676 ioff = btrfs_token_item_offset(&token, i); 3677 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3678 } 3679 3680 btrfs_set_header_nritems(l, mid); 3681 btrfs_item_key(right, &disk_key, 0); 3682 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3683 if (ret < 0) 3684 return ret; 3685 3686 btrfs_mark_buffer_dirty(right); 3687 btrfs_mark_buffer_dirty(l); 3688 BUG_ON(path->slots[0] != slot); 3689 3690 if (mid <= slot) { 3691 btrfs_tree_unlock(path->nodes[0]); 3692 free_extent_buffer(path->nodes[0]); 3693 path->nodes[0] = right; 3694 path->slots[0] -= mid; 3695 path->slots[1] += 1; 3696 } else { 3697 btrfs_tree_unlock(right); 3698 free_extent_buffer(right); 3699 } 3700 3701 BUG_ON(path->slots[0] < 0); 3702 3703 return 0; 3704 } 3705 3706 /* 3707 * double splits happen when we need to insert a big item in the middle 3708 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3709 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3710 * A B C 3711 * 3712 * We avoid this by trying to push the items on either side of our target 3713 * into the adjacent leaves. If all goes well we can avoid the double split 3714 * completely. 3715 */ 3716 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3717 struct btrfs_root *root, 3718 struct btrfs_path *path, 3719 int data_size) 3720 { 3721 int ret; 3722 int progress = 0; 3723 int slot; 3724 u32 nritems; 3725 int space_needed = data_size; 3726 3727 slot = path->slots[0]; 3728 if (slot < btrfs_header_nritems(path->nodes[0])) 3729 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3730 3731 /* 3732 * try to push all the items after our slot into the 3733 * right leaf 3734 */ 3735 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3736 if (ret < 0) 3737 return ret; 3738 3739 if (ret == 0) 3740 progress++; 3741 3742 nritems = btrfs_header_nritems(path->nodes[0]); 3743 /* 3744 * our goal is to get our slot at the start or end of a leaf. If 3745 * we've done so we're done 3746 */ 3747 if (path->slots[0] == 0 || path->slots[0] == nritems) 3748 return 0; 3749 3750 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3751 return 0; 3752 3753 /* try to push all the items before our slot into the next leaf */ 3754 slot = path->slots[0]; 3755 space_needed = data_size; 3756 if (slot > 0) 3757 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3758 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3759 if (ret < 0) 3760 return ret; 3761 3762 if (ret == 0) 3763 progress++; 3764 3765 if (progress) 3766 return 0; 3767 return 1; 3768 } 3769 3770 /* 3771 * split the path's leaf in two, making sure there is at least data_size 3772 * available for the resulting leaf level of the path. 3773 * 3774 * returns 0 if all went well and < 0 on failure. 3775 */ 3776 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3777 struct btrfs_root *root, 3778 const struct btrfs_key *ins_key, 3779 struct btrfs_path *path, int data_size, 3780 int extend) 3781 { 3782 struct btrfs_disk_key disk_key; 3783 struct extent_buffer *l; 3784 u32 nritems; 3785 int mid; 3786 int slot; 3787 struct extent_buffer *right; 3788 struct btrfs_fs_info *fs_info = root->fs_info; 3789 int ret = 0; 3790 int wret; 3791 int split; 3792 int num_doubles = 0; 3793 int tried_avoid_double = 0; 3794 3795 l = path->nodes[0]; 3796 slot = path->slots[0]; 3797 if (extend && data_size + btrfs_item_size(l, slot) + 3798 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3799 return -EOVERFLOW; 3800 3801 /* first try to make some room by pushing left and right */ 3802 if (data_size && path->nodes[1]) { 3803 int space_needed = data_size; 3804 3805 if (slot < btrfs_header_nritems(l)) 3806 space_needed -= btrfs_leaf_free_space(l); 3807 3808 wret = push_leaf_right(trans, root, path, space_needed, 3809 space_needed, 0, 0); 3810 if (wret < 0) 3811 return wret; 3812 if (wret) { 3813 space_needed = data_size; 3814 if (slot > 0) 3815 space_needed -= btrfs_leaf_free_space(l); 3816 wret = push_leaf_left(trans, root, path, space_needed, 3817 space_needed, 0, (u32)-1); 3818 if (wret < 0) 3819 return wret; 3820 } 3821 l = path->nodes[0]; 3822 3823 /* did the pushes work? */ 3824 if (btrfs_leaf_free_space(l) >= data_size) 3825 return 0; 3826 } 3827 3828 if (!path->nodes[1]) { 3829 ret = insert_new_root(trans, root, path, 1); 3830 if (ret) 3831 return ret; 3832 } 3833 again: 3834 split = 1; 3835 l = path->nodes[0]; 3836 slot = path->slots[0]; 3837 nritems = btrfs_header_nritems(l); 3838 mid = (nritems + 1) / 2; 3839 3840 if (mid <= slot) { 3841 if (nritems == 1 || 3842 leaf_space_used(l, mid, nritems - mid) + data_size > 3843 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3844 if (slot >= nritems) { 3845 split = 0; 3846 } else { 3847 mid = slot; 3848 if (mid != nritems && 3849 leaf_space_used(l, mid, nritems - mid) + 3850 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3851 if (data_size && !tried_avoid_double) 3852 goto push_for_double; 3853 split = 2; 3854 } 3855 } 3856 } 3857 } else { 3858 if (leaf_space_used(l, 0, mid) + data_size > 3859 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3860 if (!extend && data_size && slot == 0) { 3861 split = 0; 3862 } else if ((extend || !data_size) && slot == 0) { 3863 mid = 1; 3864 } else { 3865 mid = slot; 3866 if (mid != nritems && 3867 leaf_space_used(l, mid, nritems - mid) + 3868 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3869 if (data_size && !tried_avoid_double) 3870 goto push_for_double; 3871 split = 2; 3872 } 3873 } 3874 } 3875 } 3876 3877 if (split == 0) 3878 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3879 else 3880 btrfs_item_key(l, &disk_key, mid); 3881 3882 /* 3883 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3884 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3885 * subclasses, which is 8 at the time of this patch, and we've maxed it 3886 * out. In the future we could add a 3887 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3888 * use BTRFS_NESTING_NEW_ROOT. 3889 */ 3890 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3891 &disk_key, 0, l->start, 0, 3892 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3893 BTRFS_NESTING_SPLIT); 3894 if (IS_ERR(right)) 3895 return PTR_ERR(right); 3896 3897 root_add_used(root, fs_info->nodesize); 3898 3899 if (split == 0) { 3900 if (mid <= slot) { 3901 btrfs_set_header_nritems(right, 0); 3902 ret = insert_ptr(trans, path, &disk_key, 3903 right->start, path->slots[1] + 1, 1); 3904 if (ret < 0) { 3905 btrfs_tree_unlock(right); 3906 free_extent_buffer(right); 3907 return ret; 3908 } 3909 btrfs_tree_unlock(path->nodes[0]); 3910 free_extent_buffer(path->nodes[0]); 3911 path->nodes[0] = right; 3912 path->slots[0] = 0; 3913 path->slots[1] += 1; 3914 } else { 3915 btrfs_set_header_nritems(right, 0); 3916 ret = insert_ptr(trans, path, &disk_key, 3917 right->start, path->slots[1], 1); 3918 if (ret < 0) { 3919 btrfs_tree_unlock(right); 3920 free_extent_buffer(right); 3921 return ret; 3922 } 3923 btrfs_tree_unlock(path->nodes[0]); 3924 free_extent_buffer(path->nodes[0]); 3925 path->nodes[0] = right; 3926 path->slots[0] = 0; 3927 if (path->slots[1] == 0) 3928 fixup_low_keys(path, &disk_key, 1); 3929 } 3930 /* 3931 * We create a new leaf 'right' for the required ins_len and 3932 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3933 * the content of ins_len to 'right'. 3934 */ 3935 return ret; 3936 } 3937 3938 ret = copy_for_split(trans, path, l, right, slot, mid, nritems); 3939 if (ret < 0) { 3940 btrfs_tree_unlock(right); 3941 free_extent_buffer(right); 3942 return ret; 3943 } 3944 3945 if (split == 2) { 3946 BUG_ON(num_doubles != 0); 3947 num_doubles++; 3948 goto again; 3949 } 3950 3951 return 0; 3952 3953 push_for_double: 3954 push_for_double_split(trans, root, path, data_size); 3955 tried_avoid_double = 1; 3956 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3957 return 0; 3958 goto again; 3959 } 3960 3961 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3962 struct btrfs_root *root, 3963 struct btrfs_path *path, int ins_len) 3964 { 3965 struct btrfs_key key; 3966 struct extent_buffer *leaf; 3967 struct btrfs_file_extent_item *fi; 3968 u64 extent_len = 0; 3969 u32 item_size; 3970 int ret; 3971 3972 leaf = path->nodes[0]; 3973 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3974 3975 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3976 key.type != BTRFS_EXTENT_CSUM_KEY); 3977 3978 if (btrfs_leaf_free_space(leaf) >= ins_len) 3979 return 0; 3980 3981 item_size = btrfs_item_size(leaf, path->slots[0]); 3982 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3983 fi = btrfs_item_ptr(leaf, path->slots[0], 3984 struct btrfs_file_extent_item); 3985 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3986 } 3987 btrfs_release_path(path); 3988 3989 path->keep_locks = 1; 3990 path->search_for_split = 1; 3991 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3992 path->search_for_split = 0; 3993 if (ret > 0) 3994 ret = -EAGAIN; 3995 if (ret < 0) 3996 goto err; 3997 3998 ret = -EAGAIN; 3999 leaf = path->nodes[0]; 4000 /* if our item isn't there, return now */ 4001 if (item_size != btrfs_item_size(leaf, path->slots[0])) 4002 goto err; 4003 4004 /* the leaf has changed, it now has room. return now */ 4005 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 4006 goto err; 4007 4008 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4009 fi = btrfs_item_ptr(leaf, path->slots[0], 4010 struct btrfs_file_extent_item); 4011 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 4012 goto err; 4013 } 4014 4015 ret = split_leaf(trans, root, &key, path, ins_len, 1); 4016 if (ret) 4017 goto err; 4018 4019 path->keep_locks = 0; 4020 btrfs_unlock_up_safe(path, 1); 4021 return 0; 4022 err: 4023 path->keep_locks = 0; 4024 return ret; 4025 } 4026 4027 static noinline int split_item(struct btrfs_path *path, 4028 const struct btrfs_key *new_key, 4029 unsigned long split_offset) 4030 { 4031 struct extent_buffer *leaf; 4032 int orig_slot, slot; 4033 char *buf; 4034 u32 nritems; 4035 u32 item_size; 4036 u32 orig_offset; 4037 struct btrfs_disk_key disk_key; 4038 4039 leaf = path->nodes[0]; 4040 /* 4041 * Shouldn't happen because the caller must have previously called 4042 * setup_leaf_for_split() to make room for the new item in the leaf. 4043 */ 4044 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item))) 4045 return -ENOSPC; 4046 4047 orig_slot = path->slots[0]; 4048 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 4049 item_size = btrfs_item_size(leaf, path->slots[0]); 4050 4051 buf = kmalloc(item_size, GFP_NOFS); 4052 if (!buf) 4053 return -ENOMEM; 4054 4055 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 4056 path->slots[0]), item_size); 4057 4058 slot = path->slots[0] + 1; 4059 nritems = btrfs_header_nritems(leaf); 4060 if (slot != nritems) { 4061 /* shift the items */ 4062 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 4063 } 4064 4065 btrfs_cpu_key_to_disk(&disk_key, new_key); 4066 btrfs_set_item_key(leaf, &disk_key, slot); 4067 4068 btrfs_set_item_offset(leaf, slot, orig_offset); 4069 btrfs_set_item_size(leaf, slot, item_size - split_offset); 4070 4071 btrfs_set_item_offset(leaf, orig_slot, 4072 orig_offset + item_size - split_offset); 4073 btrfs_set_item_size(leaf, orig_slot, split_offset); 4074 4075 btrfs_set_header_nritems(leaf, nritems + 1); 4076 4077 /* write the data for the start of the original item */ 4078 write_extent_buffer(leaf, buf, 4079 btrfs_item_ptr_offset(leaf, path->slots[0]), 4080 split_offset); 4081 4082 /* write the data for the new item */ 4083 write_extent_buffer(leaf, buf + split_offset, 4084 btrfs_item_ptr_offset(leaf, slot), 4085 item_size - split_offset); 4086 btrfs_mark_buffer_dirty(leaf); 4087 4088 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 4089 kfree(buf); 4090 return 0; 4091 } 4092 4093 /* 4094 * This function splits a single item into two items, 4095 * giving 'new_key' to the new item and splitting the 4096 * old one at split_offset (from the start of the item). 4097 * 4098 * The path may be released by this operation. After 4099 * the split, the path is pointing to the old item. The 4100 * new item is going to be in the same node as the old one. 4101 * 4102 * Note, the item being split must be smaller enough to live alone on 4103 * a tree block with room for one extra struct btrfs_item 4104 * 4105 * This allows us to split the item in place, keeping a lock on the 4106 * leaf the entire time. 4107 */ 4108 int btrfs_split_item(struct btrfs_trans_handle *trans, 4109 struct btrfs_root *root, 4110 struct btrfs_path *path, 4111 const struct btrfs_key *new_key, 4112 unsigned long split_offset) 4113 { 4114 int ret; 4115 ret = setup_leaf_for_split(trans, root, path, 4116 sizeof(struct btrfs_item)); 4117 if (ret) 4118 return ret; 4119 4120 ret = split_item(path, new_key, split_offset); 4121 return ret; 4122 } 4123 4124 /* 4125 * make the item pointed to by the path smaller. new_size indicates 4126 * how small to make it, and from_end tells us if we just chop bytes 4127 * off the end of the item or if we shift the item to chop bytes off 4128 * the front. 4129 */ 4130 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 4131 { 4132 int slot; 4133 struct extent_buffer *leaf; 4134 u32 nritems; 4135 unsigned int data_end; 4136 unsigned int old_data_start; 4137 unsigned int old_size; 4138 unsigned int size_diff; 4139 int i; 4140 struct btrfs_map_token token; 4141 4142 leaf = path->nodes[0]; 4143 slot = path->slots[0]; 4144 4145 old_size = btrfs_item_size(leaf, slot); 4146 if (old_size == new_size) 4147 return; 4148 4149 nritems = btrfs_header_nritems(leaf); 4150 data_end = leaf_data_end(leaf); 4151 4152 old_data_start = btrfs_item_offset(leaf, slot); 4153 4154 size_diff = old_size - new_size; 4155 4156 BUG_ON(slot < 0); 4157 BUG_ON(slot >= nritems); 4158 4159 /* 4160 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4161 */ 4162 /* first correct the data pointers */ 4163 btrfs_init_map_token(&token, leaf); 4164 for (i = slot; i < nritems; i++) { 4165 u32 ioff; 4166 4167 ioff = btrfs_token_item_offset(&token, i); 4168 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4169 } 4170 4171 /* shift the data */ 4172 if (from_end) { 4173 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4174 old_data_start + new_size - data_end); 4175 } else { 4176 struct btrfs_disk_key disk_key; 4177 u64 offset; 4178 4179 btrfs_item_key(leaf, &disk_key, slot); 4180 4181 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4182 unsigned long ptr; 4183 struct btrfs_file_extent_item *fi; 4184 4185 fi = btrfs_item_ptr(leaf, slot, 4186 struct btrfs_file_extent_item); 4187 fi = (struct btrfs_file_extent_item *)( 4188 (unsigned long)fi - size_diff); 4189 4190 if (btrfs_file_extent_type(leaf, fi) == 4191 BTRFS_FILE_EXTENT_INLINE) { 4192 ptr = btrfs_item_ptr_offset(leaf, slot); 4193 memmove_extent_buffer(leaf, ptr, 4194 (unsigned long)fi, 4195 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4196 } 4197 } 4198 4199 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4200 old_data_start - data_end); 4201 4202 offset = btrfs_disk_key_offset(&disk_key); 4203 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4204 btrfs_set_item_key(leaf, &disk_key, slot); 4205 if (slot == 0) 4206 fixup_low_keys(path, &disk_key, 1); 4207 } 4208 4209 btrfs_set_item_size(leaf, slot, new_size); 4210 btrfs_mark_buffer_dirty(leaf); 4211 4212 if (btrfs_leaf_free_space(leaf) < 0) { 4213 btrfs_print_leaf(leaf); 4214 BUG(); 4215 } 4216 } 4217 4218 /* 4219 * make the item pointed to by the path bigger, data_size is the added size. 4220 */ 4221 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4222 { 4223 int slot; 4224 struct extent_buffer *leaf; 4225 u32 nritems; 4226 unsigned int data_end; 4227 unsigned int old_data; 4228 unsigned int old_size; 4229 int i; 4230 struct btrfs_map_token token; 4231 4232 leaf = path->nodes[0]; 4233 4234 nritems = btrfs_header_nritems(leaf); 4235 data_end = leaf_data_end(leaf); 4236 4237 if (btrfs_leaf_free_space(leaf) < data_size) { 4238 btrfs_print_leaf(leaf); 4239 BUG(); 4240 } 4241 slot = path->slots[0]; 4242 old_data = btrfs_item_data_end(leaf, slot); 4243 4244 BUG_ON(slot < 0); 4245 if (slot >= nritems) { 4246 btrfs_print_leaf(leaf); 4247 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4248 slot, nritems); 4249 BUG(); 4250 } 4251 4252 /* 4253 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4254 */ 4255 /* first correct the data pointers */ 4256 btrfs_init_map_token(&token, leaf); 4257 for (i = slot; i < nritems; i++) { 4258 u32 ioff; 4259 4260 ioff = btrfs_token_item_offset(&token, i); 4261 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4262 } 4263 4264 /* shift the data */ 4265 memmove_leaf_data(leaf, data_end - data_size, data_end, 4266 old_data - data_end); 4267 4268 data_end = old_data; 4269 old_size = btrfs_item_size(leaf, slot); 4270 btrfs_set_item_size(leaf, slot, old_size + data_size); 4271 btrfs_mark_buffer_dirty(leaf); 4272 4273 if (btrfs_leaf_free_space(leaf) < 0) { 4274 btrfs_print_leaf(leaf); 4275 BUG(); 4276 } 4277 } 4278 4279 /* 4280 * Make space in the node before inserting one or more items. 4281 * 4282 * @root: root we are inserting items to 4283 * @path: points to the leaf/slot where we are going to insert new items 4284 * @batch: information about the batch of items to insert 4285 * 4286 * Main purpose is to save stack depth by doing the bulk of the work in a 4287 * function that doesn't call btrfs_search_slot 4288 */ 4289 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4290 const struct btrfs_item_batch *batch) 4291 { 4292 struct btrfs_fs_info *fs_info = root->fs_info; 4293 int i; 4294 u32 nritems; 4295 unsigned int data_end; 4296 struct btrfs_disk_key disk_key; 4297 struct extent_buffer *leaf; 4298 int slot; 4299 struct btrfs_map_token token; 4300 u32 total_size; 4301 4302 /* 4303 * Before anything else, update keys in the parent and other ancestors 4304 * if needed, then release the write locks on them, so that other tasks 4305 * can use them while we modify the leaf. 4306 */ 4307 if (path->slots[0] == 0) { 4308 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4309 fixup_low_keys(path, &disk_key, 1); 4310 } 4311 btrfs_unlock_up_safe(path, 1); 4312 4313 leaf = path->nodes[0]; 4314 slot = path->slots[0]; 4315 4316 nritems = btrfs_header_nritems(leaf); 4317 data_end = leaf_data_end(leaf); 4318 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4319 4320 if (btrfs_leaf_free_space(leaf) < total_size) { 4321 btrfs_print_leaf(leaf); 4322 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4323 total_size, btrfs_leaf_free_space(leaf)); 4324 BUG(); 4325 } 4326 4327 btrfs_init_map_token(&token, leaf); 4328 if (slot != nritems) { 4329 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4330 4331 if (old_data < data_end) { 4332 btrfs_print_leaf(leaf); 4333 btrfs_crit(fs_info, 4334 "item at slot %d with data offset %u beyond data end of leaf %u", 4335 slot, old_data, data_end); 4336 BUG(); 4337 } 4338 /* 4339 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4340 */ 4341 /* first correct the data pointers */ 4342 for (i = slot; i < nritems; i++) { 4343 u32 ioff; 4344 4345 ioff = btrfs_token_item_offset(&token, i); 4346 btrfs_set_token_item_offset(&token, i, 4347 ioff - batch->total_data_size); 4348 } 4349 /* shift the items */ 4350 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4351 4352 /* shift the data */ 4353 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4354 data_end, old_data - data_end); 4355 data_end = old_data; 4356 } 4357 4358 /* setup the item for the new data */ 4359 for (i = 0; i < batch->nr; i++) { 4360 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4361 btrfs_set_item_key(leaf, &disk_key, slot + i); 4362 data_end -= batch->data_sizes[i]; 4363 btrfs_set_token_item_offset(&token, slot + i, data_end); 4364 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4365 } 4366 4367 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4368 btrfs_mark_buffer_dirty(leaf); 4369 4370 if (btrfs_leaf_free_space(leaf) < 0) { 4371 btrfs_print_leaf(leaf); 4372 BUG(); 4373 } 4374 } 4375 4376 /* 4377 * Insert a new item into a leaf. 4378 * 4379 * @root: The root of the btree. 4380 * @path: A path pointing to the target leaf and slot. 4381 * @key: The key of the new item. 4382 * @data_size: The size of the data associated with the new key. 4383 */ 4384 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4385 struct btrfs_path *path, 4386 const struct btrfs_key *key, 4387 u32 data_size) 4388 { 4389 struct btrfs_item_batch batch; 4390 4391 batch.keys = key; 4392 batch.data_sizes = &data_size; 4393 batch.total_data_size = data_size; 4394 batch.nr = 1; 4395 4396 setup_items_for_insert(root, path, &batch); 4397 } 4398 4399 /* 4400 * Given a key and some data, insert items into the tree. 4401 * This does all the path init required, making room in the tree if needed. 4402 */ 4403 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4404 struct btrfs_root *root, 4405 struct btrfs_path *path, 4406 const struct btrfs_item_batch *batch) 4407 { 4408 int ret = 0; 4409 int slot; 4410 u32 total_size; 4411 4412 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4413 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4414 if (ret == 0) 4415 return -EEXIST; 4416 if (ret < 0) 4417 return ret; 4418 4419 slot = path->slots[0]; 4420 BUG_ON(slot < 0); 4421 4422 setup_items_for_insert(root, path, batch); 4423 return 0; 4424 } 4425 4426 /* 4427 * Given a key and some data, insert an item into the tree. 4428 * This does all the path init required, making room in the tree if needed. 4429 */ 4430 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4431 const struct btrfs_key *cpu_key, void *data, 4432 u32 data_size) 4433 { 4434 int ret = 0; 4435 struct btrfs_path *path; 4436 struct extent_buffer *leaf; 4437 unsigned long ptr; 4438 4439 path = btrfs_alloc_path(); 4440 if (!path) 4441 return -ENOMEM; 4442 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4443 if (!ret) { 4444 leaf = path->nodes[0]; 4445 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4446 write_extent_buffer(leaf, data, ptr, data_size); 4447 btrfs_mark_buffer_dirty(leaf); 4448 } 4449 btrfs_free_path(path); 4450 return ret; 4451 } 4452 4453 /* 4454 * This function duplicates an item, giving 'new_key' to the new item. 4455 * It guarantees both items live in the same tree leaf and the new item is 4456 * contiguous with the original item. 4457 * 4458 * This allows us to split a file extent in place, keeping a lock on the leaf 4459 * the entire time. 4460 */ 4461 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4462 struct btrfs_root *root, 4463 struct btrfs_path *path, 4464 const struct btrfs_key *new_key) 4465 { 4466 struct extent_buffer *leaf; 4467 int ret; 4468 u32 item_size; 4469 4470 leaf = path->nodes[0]; 4471 item_size = btrfs_item_size(leaf, path->slots[0]); 4472 ret = setup_leaf_for_split(trans, root, path, 4473 item_size + sizeof(struct btrfs_item)); 4474 if (ret) 4475 return ret; 4476 4477 path->slots[0]++; 4478 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4479 leaf = path->nodes[0]; 4480 memcpy_extent_buffer(leaf, 4481 btrfs_item_ptr_offset(leaf, path->slots[0]), 4482 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4483 item_size); 4484 return 0; 4485 } 4486 4487 /* 4488 * delete the pointer from a given node. 4489 * 4490 * the tree should have been previously balanced so the deletion does not 4491 * empty a node. 4492 * 4493 * This is exported for use inside btrfs-progs, don't un-export it. 4494 */ 4495 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4496 struct btrfs_path *path, int level, int slot) 4497 { 4498 struct extent_buffer *parent = path->nodes[level]; 4499 u32 nritems; 4500 int ret; 4501 4502 nritems = btrfs_header_nritems(parent); 4503 if (slot != nritems - 1) { 4504 if (level) { 4505 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4506 slot + 1, nritems - slot - 1); 4507 if (ret < 0) { 4508 btrfs_abort_transaction(trans, ret); 4509 return ret; 4510 } 4511 } 4512 memmove_extent_buffer(parent, 4513 btrfs_node_key_ptr_offset(parent, slot), 4514 btrfs_node_key_ptr_offset(parent, slot + 1), 4515 sizeof(struct btrfs_key_ptr) * 4516 (nritems - slot - 1)); 4517 } else if (level) { 4518 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4519 BTRFS_MOD_LOG_KEY_REMOVE); 4520 if (ret < 0) { 4521 btrfs_abort_transaction(trans, ret); 4522 return ret; 4523 } 4524 } 4525 4526 nritems--; 4527 btrfs_set_header_nritems(parent, nritems); 4528 if (nritems == 0 && parent == root->node) { 4529 BUG_ON(btrfs_header_level(root->node) != 1); 4530 /* just turn the root into a leaf and break */ 4531 btrfs_set_header_level(root->node, 0); 4532 } else if (slot == 0) { 4533 struct btrfs_disk_key disk_key; 4534 4535 btrfs_node_key(parent, &disk_key, 0); 4536 fixup_low_keys(path, &disk_key, level + 1); 4537 } 4538 btrfs_mark_buffer_dirty(parent); 4539 return 0; 4540 } 4541 4542 /* 4543 * a helper function to delete the leaf pointed to by path->slots[1] and 4544 * path->nodes[1]. 4545 * 4546 * This deletes the pointer in path->nodes[1] and frees the leaf 4547 * block extent. zero is returned if it all worked out, < 0 otherwise. 4548 * 4549 * The path must have already been setup for deleting the leaf, including 4550 * all the proper balancing. path->nodes[1] must be locked. 4551 */ 4552 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 4553 struct btrfs_root *root, 4554 struct btrfs_path *path, 4555 struct extent_buffer *leaf) 4556 { 4557 int ret; 4558 4559 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4560 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]); 4561 if (ret < 0) 4562 return ret; 4563 4564 /* 4565 * btrfs_free_extent is expensive, we want to make sure we 4566 * aren't holding any locks when we call it 4567 */ 4568 btrfs_unlock_up_safe(path, 0); 4569 4570 root_sub_used(root, leaf->len); 4571 4572 atomic_inc(&leaf->refs); 4573 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4574 free_extent_buffer_stale(leaf); 4575 return 0; 4576 } 4577 /* 4578 * delete the item at the leaf level in path. If that empties 4579 * the leaf, remove it from the tree 4580 */ 4581 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4582 struct btrfs_path *path, int slot, int nr) 4583 { 4584 struct btrfs_fs_info *fs_info = root->fs_info; 4585 struct extent_buffer *leaf; 4586 int ret = 0; 4587 int wret; 4588 u32 nritems; 4589 4590 leaf = path->nodes[0]; 4591 nritems = btrfs_header_nritems(leaf); 4592 4593 if (slot + nr != nritems) { 4594 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4595 const int data_end = leaf_data_end(leaf); 4596 struct btrfs_map_token token; 4597 u32 dsize = 0; 4598 int i; 4599 4600 for (i = 0; i < nr; i++) 4601 dsize += btrfs_item_size(leaf, slot + i); 4602 4603 memmove_leaf_data(leaf, data_end + dsize, data_end, 4604 last_off - data_end); 4605 4606 btrfs_init_map_token(&token, leaf); 4607 for (i = slot + nr; i < nritems; i++) { 4608 u32 ioff; 4609 4610 ioff = btrfs_token_item_offset(&token, i); 4611 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4612 } 4613 4614 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4615 } 4616 btrfs_set_header_nritems(leaf, nritems - nr); 4617 nritems -= nr; 4618 4619 /* delete the leaf if we've emptied it */ 4620 if (nritems == 0) { 4621 if (leaf == root->node) { 4622 btrfs_set_header_level(leaf, 0); 4623 } else { 4624 btrfs_clear_buffer_dirty(trans, leaf); 4625 ret = btrfs_del_leaf(trans, root, path, leaf); 4626 if (ret < 0) 4627 return ret; 4628 } 4629 } else { 4630 int used = leaf_space_used(leaf, 0, nritems); 4631 if (slot == 0) { 4632 struct btrfs_disk_key disk_key; 4633 4634 btrfs_item_key(leaf, &disk_key, 0); 4635 fixup_low_keys(path, &disk_key, 1); 4636 } 4637 4638 /* 4639 * Try to delete the leaf if it is mostly empty. We do this by 4640 * trying to move all its items into its left and right neighbours. 4641 * If we can't move all the items, then we don't delete it - it's 4642 * not ideal, but future insertions might fill the leaf with more 4643 * items, or items from other leaves might be moved later into our 4644 * leaf due to deletions on those leaves. 4645 */ 4646 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4647 u32 min_push_space; 4648 4649 /* push_leaf_left fixes the path. 4650 * make sure the path still points to our leaf 4651 * for possible call to btrfs_del_ptr below 4652 */ 4653 slot = path->slots[1]; 4654 atomic_inc(&leaf->refs); 4655 /* 4656 * We want to be able to at least push one item to the 4657 * left neighbour leaf, and that's the first item. 4658 */ 4659 min_push_space = sizeof(struct btrfs_item) + 4660 btrfs_item_size(leaf, 0); 4661 wret = push_leaf_left(trans, root, path, 0, 4662 min_push_space, 1, (u32)-1); 4663 if (wret < 0 && wret != -ENOSPC) 4664 ret = wret; 4665 4666 if (path->nodes[0] == leaf && 4667 btrfs_header_nritems(leaf)) { 4668 /* 4669 * If we were not able to push all items from our 4670 * leaf to its left neighbour, then attempt to 4671 * either push all the remaining items to the 4672 * right neighbour or none. There's no advantage 4673 * in pushing only some items, instead of all, as 4674 * it's pointless to end up with a leaf having 4675 * too few items while the neighbours can be full 4676 * or nearly full. 4677 */ 4678 nritems = btrfs_header_nritems(leaf); 4679 min_push_space = leaf_space_used(leaf, 0, nritems); 4680 wret = push_leaf_right(trans, root, path, 0, 4681 min_push_space, 1, 0); 4682 if (wret < 0 && wret != -ENOSPC) 4683 ret = wret; 4684 } 4685 4686 if (btrfs_header_nritems(leaf) == 0) { 4687 path->slots[1] = slot; 4688 ret = btrfs_del_leaf(trans, root, path, leaf); 4689 if (ret < 0) 4690 return ret; 4691 free_extent_buffer(leaf); 4692 ret = 0; 4693 } else { 4694 /* if we're still in the path, make sure 4695 * we're dirty. Otherwise, one of the 4696 * push_leaf functions must have already 4697 * dirtied this buffer 4698 */ 4699 if (path->nodes[0] == leaf) 4700 btrfs_mark_buffer_dirty(leaf); 4701 free_extent_buffer(leaf); 4702 } 4703 } else { 4704 btrfs_mark_buffer_dirty(leaf); 4705 } 4706 } 4707 return ret; 4708 } 4709 4710 /* 4711 * A helper function to walk down the tree starting at min_key, and looking 4712 * for nodes or leaves that are have a minimum transaction id. 4713 * This is used by the btree defrag code, and tree logging 4714 * 4715 * This does not cow, but it does stuff the starting key it finds back 4716 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4717 * key and get a writable path. 4718 * 4719 * This honors path->lowest_level to prevent descent past a given level 4720 * of the tree. 4721 * 4722 * min_trans indicates the oldest transaction that you are interested 4723 * in walking through. Any nodes or leaves older than min_trans are 4724 * skipped over (without reading them). 4725 * 4726 * returns zero if something useful was found, < 0 on error and 1 if there 4727 * was nothing in the tree that matched the search criteria. 4728 */ 4729 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4730 struct btrfs_path *path, 4731 u64 min_trans) 4732 { 4733 struct extent_buffer *cur; 4734 struct btrfs_key found_key; 4735 int slot; 4736 int sret; 4737 u32 nritems; 4738 int level; 4739 int ret = 1; 4740 int keep_locks = path->keep_locks; 4741 4742 ASSERT(!path->nowait); 4743 path->keep_locks = 1; 4744 again: 4745 cur = btrfs_read_lock_root_node(root); 4746 level = btrfs_header_level(cur); 4747 WARN_ON(path->nodes[level]); 4748 path->nodes[level] = cur; 4749 path->locks[level] = BTRFS_READ_LOCK; 4750 4751 if (btrfs_header_generation(cur) < min_trans) { 4752 ret = 1; 4753 goto out; 4754 } 4755 while (1) { 4756 nritems = btrfs_header_nritems(cur); 4757 level = btrfs_header_level(cur); 4758 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4759 if (sret < 0) { 4760 ret = sret; 4761 goto out; 4762 } 4763 4764 /* at the lowest level, we're done, setup the path and exit */ 4765 if (level == path->lowest_level) { 4766 if (slot >= nritems) 4767 goto find_next_key; 4768 ret = 0; 4769 path->slots[level] = slot; 4770 btrfs_item_key_to_cpu(cur, &found_key, slot); 4771 goto out; 4772 } 4773 if (sret && slot > 0) 4774 slot--; 4775 /* 4776 * check this node pointer against the min_trans parameters. 4777 * If it is too old, skip to the next one. 4778 */ 4779 while (slot < nritems) { 4780 u64 gen; 4781 4782 gen = btrfs_node_ptr_generation(cur, slot); 4783 if (gen < min_trans) { 4784 slot++; 4785 continue; 4786 } 4787 break; 4788 } 4789 find_next_key: 4790 /* 4791 * we didn't find a candidate key in this node, walk forward 4792 * and find another one 4793 */ 4794 if (slot >= nritems) { 4795 path->slots[level] = slot; 4796 sret = btrfs_find_next_key(root, path, min_key, level, 4797 min_trans); 4798 if (sret == 0) { 4799 btrfs_release_path(path); 4800 goto again; 4801 } else { 4802 goto out; 4803 } 4804 } 4805 /* save our key for returning back */ 4806 btrfs_node_key_to_cpu(cur, &found_key, slot); 4807 path->slots[level] = slot; 4808 if (level == path->lowest_level) { 4809 ret = 0; 4810 goto out; 4811 } 4812 cur = btrfs_read_node_slot(cur, slot); 4813 if (IS_ERR(cur)) { 4814 ret = PTR_ERR(cur); 4815 goto out; 4816 } 4817 4818 btrfs_tree_read_lock(cur); 4819 4820 path->locks[level - 1] = BTRFS_READ_LOCK; 4821 path->nodes[level - 1] = cur; 4822 unlock_up(path, level, 1, 0, NULL); 4823 } 4824 out: 4825 path->keep_locks = keep_locks; 4826 if (ret == 0) { 4827 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4828 memcpy(min_key, &found_key, sizeof(found_key)); 4829 } 4830 return ret; 4831 } 4832 4833 /* 4834 * this is similar to btrfs_next_leaf, but does not try to preserve 4835 * and fixup the path. It looks for and returns the next key in the 4836 * tree based on the current path and the min_trans parameters. 4837 * 4838 * 0 is returned if another key is found, < 0 if there are any errors 4839 * and 1 is returned if there are no higher keys in the tree 4840 * 4841 * path->keep_locks should be set to 1 on the search made before 4842 * calling this function. 4843 */ 4844 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4845 struct btrfs_key *key, int level, u64 min_trans) 4846 { 4847 int slot; 4848 struct extent_buffer *c; 4849 4850 WARN_ON(!path->keep_locks && !path->skip_locking); 4851 while (level < BTRFS_MAX_LEVEL) { 4852 if (!path->nodes[level]) 4853 return 1; 4854 4855 slot = path->slots[level] + 1; 4856 c = path->nodes[level]; 4857 next: 4858 if (slot >= btrfs_header_nritems(c)) { 4859 int ret; 4860 int orig_lowest; 4861 struct btrfs_key cur_key; 4862 if (level + 1 >= BTRFS_MAX_LEVEL || 4863 !path->nodes[level + 1]) 4864 return 1; 4865 4866 if (path->locks[level + 1] || path->skip_locking) { 4867 level++; 4868 continue; 4869 } 4870 4871 slot = btrfs_header_nritems(c) - 1; 4872 if (level == 0) 4873 btrfs_item_key_to_cpu(c, &cur_key, slot); 4874 else 4875 btrfs_node_key_to_cpu(c, &cur_key, slot); 4876 4877 orig_lowest = path->lowest_level; 4878 btrfs_release_path(path); 4879 path->lowest_level = level; 4880 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4881 0, 0); 4882 path->lowest_level = orig_lowest; 4883 if (ret < 0) 4884 return ret; 4885 4886 c = path->nodes[level]; 4887 slot = path->slots[level]; 4888 if (ret == 0) 4889 slot++; 4890 goto next; 4891 } 4892 4893 if (level == 0) 4894 btrfs_item_key_to_cpu(c, key, slot); 4895 else { 4896 u64 gen = btrfs_node_ptr_generation(c, slot); 4897 4898 if (gen < min_trans) { 4899 slot++; 4900 goto next; 4901 } 4902 btrfs_node_key_to_cpu(c, key, slot); 4903 } 4904 return 0; 4905 } 4906 return 1; 4907 } 4908 4909 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4910 u64 time_seq) 4911 { 4912 int slot; 4913 int level; 4914 struct extent_buffer *c; 4915 struct extent_buffer *next; 4916 struct btrfs_fs_info *fs_info = root->fs_info; 4917 struct btrfs_key key; 4918 bool need_commit_sem = false; 4919 u32 nritems; 4920 int ret; 4921 int i; 4922 4923 /* 4924 * The nowait semantics are used only for write paths, where we don't 4925 * use the tree mod log and sequence numbers. 4926 */ 4927 if (time_seq) 4928 ASSERT(!path->nowait); 4929 4930 nritems = btrfs_header_nritems(path->nodes[0]); 4931 if (nritems == 0) 4932 return 1; 4933 4934 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4935 again: 4936 level = 1; 4937 next = NULL; 4938 btrfs_release_path(path); 4939 4940 path->keep_locks = 1; 4941 4942 if (time_seq) { 4943 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4944 } else { 4945 if (path->need_commit_sem) { 4946 path->need_commit_sem = 0; 4947 need_commit_sem = true; 4948 if (path->nowait) { 4949 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4950 ret = -EAGAIN; 4951 goto done; 4952 } 4953 } else { 4954 down_read(&fs_info->commit_root_sem); 4955 } 4956 } 4957 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4958 } 4959 path->keep_locks = 0; 4960 4961 if (ret < 0) 4962 goto done; 4963 4964 nritems = btrfs_header_nritems(path->nodes[0]); 4965 /* 4966 * by releasing the path above we dropped all our locks. A balance 4967 * could have added more items next to the key that used to be 4968 * at the very end of the block. So, check again here and 4969 * advance the path if there are now more items available. 4970 */ 4971 if (nritems > 0 && path->slots[0] < nritems - 1) { 4972 if (ret == 0) 4973 path->slots[0]++; 4974 ret = 0; 4975 goto done; 4976 } 4977 /* 4978 * So the above check misses one case: 4979 * - after releasing the path above, someone has removed the item that 4980 * used to be at the very end of the block, and balance between leafs 4981 * gets another one with bigger key.offset to replace it. 4982 * 4983 * This one should be returned as well, or we can get leaf corruption 4984 * later(esp. in __btrfs_drop_extents()). 4985 * 4986 * And a bit more explanation about this check, 4987 * with ret > 0, the key isn't found, the path points to the slot 4988 * where it should be inserted, so the path->slots[0] item must be the 4989 * bigger one. 4990 */ 4991 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4992 ret = 0; 4993 goto done; 4994 } 4995 4996 while (level < BTRFS_MAX_LEVEL) { 4997 if (!path->nodes[level]) { 4998 ret = 1; 4999 goto done; 5000 } 5001 5002 slot = path->slots[level] + 1; 5003 c = path->nodes[level]; 5004 if (slot >= btrfs_header_nritems(c)) { 5005 level++; 5006 if (level == BTRFS_MAX_LEVEL) { 5007 ret = 1; 5008 goto done; 5009 } 5010 continue; 5011 } 5012 5013 5014 /* 5015 * Our current level is where we're going to start from, and to 5016 * make sure lockdep doesn't complain we need to drop our locks 5017 * and nodes from 0 to our current level. 5018 */ 5019 for (i = 0; i < level; i++) { 5020 if (path->locks[level]) { 5021 btrfs_tree_read_unlock(path->nodes[i]); 5022 path->locks[i] = 0; 5023 } 5024 free_extent_buffer(path->nodes[i]); 5025 path->nodes[i] = NULL; 5026 } 5027 5028 next = c; 5029 ret = read_block_for_search(root, path, &next, level, 5030 slot, &key); 5031 if (ret == -EAGAIN && !path->nowait) 5032 goto again; 5033 5034 if (ret < 0) { 5035 btrfs_release_path(path); 5036 goto done; 5037 } 5038 5039 if (!path->skip_locking) { 5040 ret = btrfs_try_tree_read_lock(next); 5041 if (!ret && path->nowait) { 5042 ret = -EAGAIN; 5043 goto done; 5044 } 5045 if (!ret && time_seq) { 5046 /* 5047 * If we don't get the lock, we may be racing 5048 * with push_leaf_left, holding that lock while 5049 * itself waiting for the leaf we've currently 5050 * locked. To solve this situation, we give up 5051 * on our lock and cycle. 5052 */ 5053 free_extent_buffer(next); 5054 btrfs_release_path(path); 5055 cond_resched(); 5056 goto again; 5057 } 5058 if (!ret) 5059 btrfs_tree_read_lock(next); 5060 } 5061 break; 5062 } 5063 path->slots[level] = slot; 5064 while (1) { 5065 level--; 5066 path->nodes[level] = next; 5067 path->slots[level] = 0; 5068 if (!path->skip_locking) 5069 path->locks[level] = BTRFS_READ_LOCK; 5070 if (!level) 5071 break; 5072 5073 ret = read_block_for_search(root, path, &next, level, 5074 0, &key); 5075 if (ret == -EAGAIN && !path->nowait) 5076 goto again; 5077 5078 if (ret < 0) { 5079 btrfs_release_path(path); 5080 goto done; 5081 } 5082 5083 if (!path->skip_locking) { 5084 if (path->nowait) { 5085 if (!btrfs_try_tree_read_lock(next)) { 5086 ret = -EAGAIN; 5087 goto done; 5088 } 5089 } else { 5090 btrfs_tree_read_lock(next); 5091 } 5092 } 5093 } 5094 ret = 0; 5095 done: 5096 unlock_up(path, 0, 1, 0, NULL); 5097 if (need_commit_sem) { 5098 int ret2; 5099 5100 path->need_commit_sem = 1; 5101 ret2 = finish_need_commit_sem_search(path); 5102 up_read(&fs_info->commit_root_sem); 5103 if (ret2) 5104 ret = ret2; 5105 } 5106 5107 return ret; 5108 } 5109 5110 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 5111 { 5112 path->slots[0]++; 5113 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 5114 return btrfs_next_old_leaf(root, path, time_seq); 5115 return 0; 5116 } 5117 5118 /* 5119 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5120 * searching until it gets past min_objectid or finds an item of 'type' 5121 * 5122 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5123 */ 5124 int btrfs_previous_item(struct btrfs_root *root, 5125 struct btrfs_path *path, u64 min_objectid, 5126 int type) 5127 { 5128 struct btrfs_key found_key; 5129 struct extent_buffer *leaf; 5130 u32 nritems; 5131 int ret; 5132 5133 while (1) { 5134 if (path->slots[0] == 0) { 5135 ret = btrfs_prev_leaf(root, path); 5136 if (ret != 0) 5137 return ret; 5138 } else { 5139 path->slots[0]--; 5140 } 5141 leaf = path->nodes[0]; 5142 nritems = btrfs_header_nritems(leaf); 5143 if (nritems == 0) 5144 return 1; 5145 if (path->slots[0] == nritems) 5146 path->slots[0]--; 5147 5148 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5149 if (found_key.objectid < min_objectid) 5150 break; 5151 if (found_key.type == type) 5152 return 0; 5153 if (found_key.objectid == min_objectid && 5154 found_key.type < type) 5155 break; 5156 } 5157 return 1; 5158 } 5159 5160 /* 5161 * search in extent tree to find a previous Metadata/Data extent item with 5162 * min objecitd. 5163 * 5164 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5165 */ 5166 int btrfs_previous_extent_item(struct btrfs_root *root, 5167 struct btrfs_path *path, u64 min_objectid) 5168 { 5169 struct btrfs_key found_key; 5170 struct extent_buffer *leaf; 5171 u32 nritems; 5172 int ret; 5173 5174 while (1) { 5175 if (path->slots[0] == 0) { 5176 ret = btrfs_prev_leaf(root, path); 5177 if (ret != 0) 5178 return ret; 5179 } else { 5180 path->slots[0]--; 5181 } 5182 leaf = path->nodes[0]; 5183 nritems = btrfs_header_nritems(leaf); 5184 if (nritems == 0) 5185 return 1; 5186 if (path->slots[0] == nritems) 5187 path->slots[0]--; 5188 5189 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5190 if (found_key.objectid < min_objectid) 5191 break; 5192 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5193 found_key.type == BTRFS_METADATA_ITEM_KEY) 5194 return 0; 5195 if (found_key.objectid == min_objectid && 5196 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5197 break; 5198 } 5199 return 1; 5200 } 5201 5202 int __init btrfs_ctree_init(void) 5203 { 5204 btrfs_path_cachep = kmem_cache_create("btrfs_path", 5205 sizeof(struct btrfs_path), 0, 5206 SLAB_MEM_SPREAD, NULL); 5207 if (!btrfs_path_cachep) 5208 return -ENOMEM; 5209 return 0; 5210 } 5211 5212 void __cold btrfs_ctree_exit(void) 5213 { 5214 kmem_cache_destroy(btrfs_path_cachep); 5215 } 5216