1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int errno) 234 { 235 switch (errno) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 320 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 321 trans->transid != fs_info->running_transaction->transid); 322 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 323 trans->transid != root->last_trans); 324 325 level = btrfs_header_level(buf); 326 if (level == 0) 327 btrfs_item_key(buf, &disk_key, 0); 328 else 329 btrfs_node_key(buf, &disk_key, 0); 330 331 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 332 &disk_key, level, buf->start, 0, 333 BTRFS_NESTING_NEW_ROOT); 334 if (IS_ERR(cow)) 335 return PTR_ERR(cow); 336 337 copy_extent_buffer_full(cow, buf); 338 btrfs_set_header_bytenr(cow, cow->start); 339 btrfs_set_header_generation(cow, trans->transid); 340 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 341 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 342 BTRFS_HEADER_FLAG_RELOC); 343 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 344 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 345 else 346 btrfs_set_header_owner(cow, new_root_objectid); 347 348 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 349 350 WARN_ON(btrfs_header_generation(buf) > trans->transid); 351 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 352 ret = btrfs_inc_ref(trans, root, cow, 1); 353 else 354 ret = btrfs_inc_ref(trans, root, cow, 0); 355 if (ret) { 356 btrfs_tree_unlock(cow); 357 free_extent_buffer(cow); 358 btrfs_abort_transaction(trans, ret); 359 return ret; 360 } 361 362 btrfs_mark_buffer_dirty(cow); 363 *cow_ret = cow; 364 return 0; 365 } 366 367 /* 368 * check if the tree block can be shared by multiple trees 369 */ 370 int btrfs_block_can_be_shared(struct btrfs_root *root, 371 struct extent_buffer *buf) 372 { 373 /* 374 * Tree blocks not in shareable trees and tree roots are never shared. 375 * If a block was allocated after the last snapshot and the block was 376 * not allocated by tree relocation, we know the block is not shared. 377 */ 378 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 379 buf != root->node && buf != root->commit_root && 380 (btrfs_header_generation(buf) <= 381 btrfs_root_last_snapshot(&root->root_item) || 382 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 383 return 1; 384 385 return 0; 386 } 387 388 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 389 struct btrfs_root *root, 390 struct extent_buffer *buf, 391 struct extent_buffer *cow, 392 int *last_ref) 393 { 394 struct btrfs_fs_info *fs_info = root->fs_info; 395 u64 refs; 396 u64 owner; 397 u64 flags; 398 u64 new_flags = 0; 399 int ret; 400 401 /* 402 * Backrefs update rules: 403 * 404 * Always use full backrefs for extent pointers in tree block 405 * allocated by tree relocation. 406 * 407 * If a shared tree block is no longer referenced by its owner 408 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 409 * use full backrefs for extent pointers in tree block. 410 * 411 * If a tree block is been relocating 412 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 413 * use full backrefs for extent pointers in tree block. 414 * The reason for this is some operations (such as drop tree) 415 * are only allowed for blocks use full backrefs. 416 */ 417 418 if (btrfs_block_can_be_shared(root, buf)) { 419 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 420 btrfs_header_level(buf), 1, 421 &refs, &flags); 422 if (ret) 423 return ret; 424 if (refs == 0) { 425 ret = -EROFS; 426 btrfs_handle_fs_error(fs_info, ret, NULL); 427 return ret; 428 } 429 } else { 430 refs = 1; 431 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 432 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 433 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 434 else 435 flags = 0; 436 } 437 438 owner = btrfs_header_owner(buf); 439 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 440 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 441 442 if (refs > 1) { 443 if ((owner == root->root_key.objectid || 444 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 445 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 446 ret = btrfs_inc_ref(trans, root, buf, 1); 447 if (ret) 448 return ret; 449 450 if (root->root_key.objectid == 451 BTRFS_TREE_RELOC_OBJECTID) { 452 ret = btrfs_dec_ref(trans, root, buf, 0); 453 if (ret) 454 return ret; 455 ret = btrfs_inc_ref(trans, root, cow, 1); 456 if (ret) 457 return ret; 458 } 459 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 460 } else { 461 462 if (root->root_key.objectid == 463 BTRFS_TREE_RELOC_OBJECTID) 464 ret = btrfs_inc_ref(trans, root, cow, 1); 465 else 466 ret = btrfs_inc_ref(trans, root, cow, 0); 467 if (ret) 468 return ret; 469 } 470 if (new_flags != 0) { 471 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags); 472 if (ret) 473 return ret; 474 } 475 } else { 476 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 477 if (root->root_key.objectid == 478 BTRFS_TREE_RELOC_OBJECTID) 479 ret = btrfs_inc_ref(trans, root, cow, 1); 480 else 481 ret = btrfs_inc_ref(trans, root, cow, 0); 482 if (ret) 483 return ret; 484 ret = btrfs_dec_ref(trans, root, buf, 1); 485 if (ret) 486 return ret; 487 } 488 btrfs_clear_buffer_dirty(trans, buf); 489 *last_ref = 1; 490 } 491 return 0; 492 } 493 494 /* 495 * does the dirty work in cow of a single block. The parent block (if 496 * supplied) is updated to point to the new cow copy. The new buffer is marked 497 * dirty and returned locked. If you modify the block it needs to be marked 498 * dirty again. 499 * 500 * search_start -- an allocation hint for the new block 501 * 502 * empty_size -- a hint that you plan on doing more cow. This is the size in 503 * bytes the allocator should try to find free next to the block it returns. 504 * This is just a hint and may be ignored by the allocator. 505 */ 506 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 507 struct btrfs_root *root, 508 struct extent_buffer *buf, 509 struct extent_buffer *parent, int parent_slot, 510 struct extent_buffer **cow_ret, 511 u64 search_start, u64 empty_size, 512 enum btrfs_lock_nesting nest) 513 { 514 struct btrfs_fs_info *fs_info = root->fs_info; 515 struct btrfs_disk_key disk_key; 516 struct extent_buffer *cow; 517 int level, ret; 518 int last_ref = 0; 519 int unlock_orig = 0; 520 u64 parent_start = 0; 521 522 if (*cow_ret == buf) 523 unlock_orig = 1; 524 525 btrfs_assert_tree_write_locked(buf); 526 527 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 528 trans->transid != fs_info->running_transaction->transid); 529 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 530 trans->transid != root->last_trans); 531 532 level = btrfs_header_level(buf); 533 534 if (level == 0) 535 btrfs_item_key(buf, &disk_key, 0); 536 else 537 btrfs_node_key(buf, &disk_key, 0); 538 539 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 540 parent_start = parent->start; 541 542 cow = btrfs_alloc_tree_block(trans, root, parent_start, 543 root->root_key.objectid, &disk_key, level, 544 search_start, empty_size, nest); 545 if (IS_ERR(cow)) 546 return PTR_ERR(cow); 547 548 /* cow is set to blocking by btrfs_init_new_buffer */ 549 550 copy_extent_buffer_full(cow, buf); 551 btrfs_set_header_bytenr(cow, cow->start); 552 btrfs_set_header_generation(cow, trans->transid); 553 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 554 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 555 BTRFS_HEADER_FLAG_RELOC); 556 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 557 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 558 else 559 btrfs_set_header_owner(cow, root->root_key.objectid); 560 561 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 562 563 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 564 if (ret) { 565 btrfs_tree_unlock(cow); 566 free_extent_buffer(cow); 567 btrfs_abort_transaction(trans, ret); 568 return ret; 569 } 570 571 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 572 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 573 if (ret) { 574 btrfs_tree_unlock(cow); 575 free_extent_buffer(cow); 576 btrfs_abort_transaction(trans, ret); 577 return ret; 578 } 579 } 580 581 if (buf == root->node) { 582 WARN_ON(parent && parent != buf); 583 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 584 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 585 parent_start = buf->start; 586 587 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 588 if (ret < 0) { 589 btrfs_tree_unlock(cow); 590 free_extent_buffer(cow); 591 btrfs_abort_transaction(trans, ret); 592 return ret; 593 } 594 atomic_inc(&cow->refs); 595 rcu_assign_pointer(root->node, cow); 596 597 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 598 parent_start, last_ref); 599 free_extent_buffer(buf); 600 add_root_to_dirty_list(root); 601 } else { 602 WARN_ON(trans->transid != btrfs_header_generation(parent)); 603 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 604 BTRFS_MOD_LOG_KEY_REPLACE); 605 if (ret) { 606 btrfs_tree_unlock(cow); 607 free_extent_buffer(cow); 608 btrfs_abort_transaction(trans, ret); 609 return ret; 610 } 611 btrfs_set_node_blockptr(parent, parent_slot, 612 cow->start); 613 btrfs_set_node_ptr_generation(parent, parent_slot, 614 trans->transid); 615 btrfs_mark_buffer_dirty(parent); 616 if (last_ref) { 617 ret = btrfs_tree_mod_log_free_eb(buf); 618 if (ret) { 619 btrfs_tree_unlock(cow); 620 free_extent_buffer(cow); 621 btrfs_abort_transaction(trans, ret); 622 return ret; 623 } 624 } 625 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 626 parent_start, last_ref); 627 } 628 if (unlock_orig) 629 btrfs_tree_unlock(buf); 630 free_extent_buffer_stale(buf); 631 btrfs_mark_buffer_dirty(cow); 632 *cow_ret = cow; 633 return 0; 634 } 635 636 static inline int should_cow_block(struct btrfs_trans_handle *trans, 637 struct btrfs_root *root, 638 struct extent_buffer *buf) 639 { 640 if (btrfs_is_testing(root->fs_info)) 641 return 0; 642 643 /* Ensure we can see the FORCE_COW bit */ 644 smp_mb__before_atomic(); 645 646 /* 647 * We do not need to cow a block if 648 * 1) this block is not created or changed in this transaction; 649 * 2) this block does not belong to TREE_RELOC tree; 650 * 3) the root is not forced COW. 651 * 652 * What is forced COW: 653 * when we create snapshot during committing the transaction, 654 * after we've finished copying src root, we must COW the shared 655 * block to ensure the metadata consistency. 656 */ 657 if (btrfs_header_generation(buf) == trans->transid && 658 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 659 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 660 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 661 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 662 return 0; 663 return 1; 664 } 665 666 /* 667 * cows a single block, see __btrfs_cow_block for the real work. 668 * This version of it has extra checks so that a block isn't COWed more than 669 * once per transaction, as long as it hasn't been written yet 670 */ 671 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 672 struct btrfs_root *root, struct extent_buffer *buf, 673 struct extent_buffer *parent, int parent_slot, 674 struct extent_buffer **cow_ret, 675 enum btrfs_lock_nesting nest) 676 { 677 struct btrfs_fs_info *fs_info = root->fs_info; 678 u64 search_start; 679 int ret; 680 681 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 682 btrfs_err(fs_info, 683 "COW'ing blocks on a fs root that's being dropped"); 684 685 if (trans->transaction != fs_info->running_transaction) 686 WARN(1, KERN_CRIT "trans %llu running %llu\n", 687 trans->transid, 688 fs_info->running_transaction->transid); 689 690 if (trans->transid != fs_info->generation) 691 WARN(1, KERN_CRIT "trans %llu running %llu\n", 692 trans->transid, fs_info->generation); 693 694 if (!should_cow_block(trans, root, buf)) { 695 *cow_ret = buf; 696 return 0; 697 } 698 699 search_start = buf->start & ~((u64)SZ_1G - 1); 700 701 /* 702 * Before CoWing this block for later modification, check if it's 703 * the subtree root and do the delayed subtree trace if needed. 704 * 705 * Also We don't care about the error, as it's handled internally. 706 */ 707 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 708 ret = __btrfs_cow_block(trans, root, buf, parent, 709 parent_slot, cow_ret, search_start, 0, nest); 710 711 trace_btrfs_cow_block(root, buf, *cow_ret); 712 713 return ret; 714 } 715 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 716 717 /* 718 * helper function for defrag to decide if two blocks pointed to by a 719 * node are actually close by 720 */ 721 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 722 { 723 if (blocknr < other && other - (blocknr + blocksize) < 32768) 724 return 1; 725 if (blocknr > other && blocknr - (other + blocksize) < 32768) 726 return 1; 727 return 0; 728 } 729 730 #ifdef __LITTLE_ENDIAN 731 732 /* 733 * Compare two keys, on little-endian the disk order is same as CPU order and 734 * we can avoid the conversion. 735 */ 736 static int comp_keys(const struct btrfs_disk_key *disk_key, 737 const struct btrfs_key *k2) 738 { 739 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 740 741 return btrfs_comp_cpu_keys(k1, k2); 742 } 743 744 #else 745 746 /* 747 * compare two keys in a memcmp fashion 748 */ 749 static int comp_keys(const struct btrfs_disk_key *disk, 750 const struct btrfs_key *k2) 751 { 752 struct btrfs_key k1; 753 754 btrfs_disk_key_to_cpu(&k1, disk); 755 756 return btrfs_comp_cpu_keys(&k1, k2); 757 } 758 #endif 759 760 /* 761 * same as comp_keys only with two btrfs_key's 762 */ 763 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 764 { 765 if (k1->objectid > k2->objectid) 766 return 1; 767 if (k1->objectid < k2->objectid) 768 return -1; 769 if (k1->type > k2->type) 770 return 1; 771 if (k1->type < k2->type) 772 return -1; 773 if (k1->offset > k2->offset) 774 return 1; 775 if (k1->offset < k2->offset) 776 return -1; 777 return 0; 778 } 779 780 /* 781 * this is used by the defrag code to go through all the 782 * leaves pointed to by a node and reallocate them so that 783 * disk order is close to key order 784 */ 785 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 786 struct btrfs_root *root, struct extent_buffer *parent, 787 int start_slot, u64 *last_ret, 788 struct btrfs_key *progress) 789 { 790 struct btrfs_fs_info *fs_info = root->fs_info; 791 struct extent_buffer *cur; 792 u64 blocknr; 793 u64 search_start = *last_ret; 794 u64 last_block = 0; 795 u64 other; 796 u32 parent_nritems; 797 int end_slot; 798 int i; 799 int err = 0; 800 u32 blocksize; 801 int progress_passed = 0; 802 struct btrfs_disk_key disk_key; 803 804 WARN_ON(trans->transaction != fs_info->running_transaction); 805 WARN_ON(trans->transid != fs_info->generation); 806 807 parent_nritems = btrfs_header_nritems(parent); 808 blocksize = fs_info->nodesize; 809 end_slot = parent_nritems - 1; 810 811 if (parent_nritems <= 1) 812 return 0; 813 814 for (i = start_slot; i <= end_slot; i++) { 815 int close = 1; 816 817 btrfs_node_key(parent, &disk_key, i); 818 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 819 continue; 820 821 progress_passed = 1; 822 blocknr = btrfs_node_blockptr(parent, i); 823 if (last_block == 0) 824 last_block = blocknr; 825 826 if (i > 0) { 827 other = btrfs_node_blockptr(parent, i - 1); 828 close = close_blocks(blocknr, other, blocksize); 829 } 830 if (!close && i < end_slot) { 831 other = btrfs_node_blockptr(parent, i + 1); 832 close = close_blocks(blocknr, other, blocksize); 833 } 834 if (close) { 835 last_block = blocknr; 836 continue; 837 } 838 839 cur = btrfs_read_node_slot(parent, i); 840 if (IS_ERR(cur)) 841 return PTR_ERR(cur); 842 if (search_start == 0) 843 search_start = last_block; 844 845 btrfs_tree_lock(cur); 846 err = __btrfs_cow_block(trans, root, cur, parent, i, 847 &cur, search_start, 848 min(16 * blocksize, 849 (end_slot - i) * blocksize), 850 BTRFS_NESTING_COW); 851 if (err) { 852 btrfs_tree_unlock(cur); 853 free_extent_buffer(cur); 854 break; 855 } 856 search_start = cur->start; 857 last_block = cur->start; 858 *last_ret = search_start; 859 btrfs_tree_unlock(cur); 860 free_extent_buffer(cur); 861 } 862 return err; 863 } 864 865 /* 866 * Search for a key in the given extent_buffer. 867 * 868 * The lower boundary for the search is specified by the slot number @first_slot. 869 * Use a value of 0 to search over the whole extent buffer. Works for both 870 * leaves and nodes. 871 * 872 * The slot in the extent buffer is returned via @slot. If the key exists in the 873 * extent buffer, then @slot will point to the slot where the key is, otherwise 874 * it points to the slot where you would insert the key. 875 * 876 * Slot may point to the total number of items (i.e. one position beyond the last 877 * key) if the key is bigger than the last key in the extent buffer. 878 */ 879 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 880 const struct btrfs_key *key, int *slot) 881 { 882 unsigned long p; 883 int item_size; 884 /* 885 * Use unsigned types for the low and high slots, so that we get a more 886 * efficient division in the search loop below. 887 */ 888 u32 low = first_slot; 889 u32 high = btrfs_header_nritems(eb); 890 int ret; 891 const int key_size = sizeof(struct btrfs_disk_key); 892 893 if (unlikely(low > high)) { 894 btrfs_err(eb->fs_info, 895 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 896 __func__, low, high, eb->start, 897 btrfs_header_owner(eb), btrfs_header_level(eb)); 898 return -EINVAL; 899 } 900 901 if (btrfs_header_level(eb) == 0) { 902 p = offsetof(struct btrfs_leaf, items); 903 item_size = sizeof(struct btrfs_item); 904 } else { 905 p = offsetof(struct btrfs_node, ptrs); 906 item_size = sizeof(struct btrfs_key_ptr); 907 } 908 909 while (low < high) { 910 unsigned long oip; 911 unsigned long offset; 912 struct btrfs_disk_key *tmp; 913 struct btrfs_disk_key unaligned; 914 int mid; 915 916 mid = (low + high) / 2; 917 offset = p + mid * item_size; 918 oip = offset_in_page(offset); 919 920 if (oip + key_size <= PAGE_SIZE) { 921 const unsigned long idx = get_eb_page_index(offset); 922 char *kaddr = page_address(eb->pages[idx]); 923 924 oip = get_eb_offset_in_page(eb, offset); 925 tmp = (struct btrfs_disk_key *)(kaddr + oip); 926 } else { 927 read_extent_buffer(eb, &unaligned, offset, key_size); 928 tmp = &unaligned; 929 } 930 931 ret = comp_keys(tmp, key); 932 933 if (ret < 0) 934 low = mid + 1; 935 else if (ret > 0) 936 high = mid; 937 else { 938 *slot = mid; 939 return 0; 940 } 941 } 942 *slot = low; 943 return 1; 944 } 945 946 static void root_add_used(struct btrfs_root *root, u32 size) 947 { 948 spin_lock(&root->accounting_lock); 949 btrfs_set_root_used(&root->root_item, 950 btrfs_root_used(&root->root_item) + size); 951 spin_unlock(&root->accounting_lock); 952 } 953 954 static void root_sub_used(struct btrfs_root *root, u32 size) 955 { 956 spin_lock(&root->accounting_lock); 957 btrfs_set_root_used(&root->root_item, 958 btrfs_root_used(&root->root_item) - size); 959 spin_unlock(&root->accounting_lock); 960 } 961 962 /* given a node and slot number, this reads the blocks it points to. The 963 * extent buffer is returned with a reference taken (but unlocked). 964 */ 965 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 966 int slot) 967 { 968 int level = btrfs_header_level(parent); 969 struct btrfs_tree_parent_check check = { 0 }; 970 struct extent_buffer *eb; 971 972 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 973 return ERR_PTR(-ENOENT); 974 975 ASSERT(level); 976 977 check.level = level - 1; 978 check.transid = btrfs_node_ptr_generation(parent, slot); 979 check.owner_root = btrfs_header_owner(parent); 980 check.has_first_key = true; 981 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 982 983 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 984 &check); 985 if (IS_ERR(eb)) 986 return eb; 987 if (!extent_buffer_uptodate(eb)) { 988 free_extent_buffer(eb); 989 return ERR_PTR(-EIO); 990 } 991 992 return eb; 993 } 994 995 /* 996 * node level balancing, used to make sure nodes are in proper order for 997 * item deletion. We balance from the top down, so we have to make sure 998 * that a deletion won't leave an node completely empty later on. 999 */ 1000 static noinline int balance_level(struct btrfs_trans_handle *trans, 1001 struct btrfs_root *root, 1002 struct btrfs_path *path, int level) 1003 { 1004 struct btrfs_fs_info *fs_info = root->fs_info; 1005 struct extent_buffer *right = NULL; 1006 struct extent_buffer *mid; 1007 struct extent_buffer *left = NULL; 1008 struct extent_buffer *parent = NULL; 1009 int ret = 0; 1010 int wret; 1011 int pslot; 1012 int orig_slot = path->slots[level]; 1013 u64 orig_ptr; 1014 1015 ASSERT(level > 0); 1016 1017 mid = path->nodes[level]; 1018 1019 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1020 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1021 1022 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1023 1024 if (level < BTRFS_MAX_LEVEL - 1) { 1025 parent = path->nodes[level + 1]; 1026 pslot = path->slots[level + 1]; 1027 } 1028 1029 /* 1030 * deal with the case where there is only one pointer in the root 1031 * by promoting the node below to a root 1032 */ 1033 if (!parent) { 1034 struct extent_buffer *child; 1035 1036 if (btrfs_header_nritems(mid) != 1) 1037 return 0; 1038 1039 /* promote the child to a root */ 1040 child = btrfs_read_node_slot(mid, 0); 1041 if (IS_ERR(child)) { 1042 ret = PTR_ERR(child); 1043 btrfs_handle_fs_error(fs_info, ret, NULL); 1044 goto out; 1045 } 1046 1047 btrfs_tree_lock(child); 1048 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1049 BTRFS_NESTING_COW); 1050 if (ret) { 1051 btrfs_tree_unlock(child); 1052 free_extent_buffer(child); 1053 goto out; 1054 } 1055 1056 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 1057 if (ret < 0) { 1058 btrfs_tree_unlock(child); 1059 free_extent_buffer(child); 1060 btrfs_abort_transaction(trans, ret); 1061 goto out; 1062 } 1063 rcu_assign_pointer(root->node, child); 1064 1065 add_root_to_dirty_list(root); 1066 btrfs_tree_unlock(child); 1067 1068 path->locks[level] = 0; 1069 path->nodes[level] = NULL; 1070 btrfs_clear_buffer_dirty(trans, mid); 1071 btrfs_tree_unlock(mid); 1072 /* once for the path */ 1073 free_extent_buffer(mid); 1074 1075 root_sub_used(root, mid->len); 1076 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1077 /* once for the root ptr */ 1078 free_extent_buffer_stale(mid); 1079 return 0; 1080 } 1081 if (btrfs_header_nritems(mid) > 1082 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1083 return 0; 1084 1085 if (pslot) { 1086 left = btrfs_read_node_slot(parent, pslot - 1); 1087 if (IS_ERR(left)) { 1088 ret = PTR_ERR(left); 1089 left = NULL; 1090 goto out; 1091 } 1092 1093 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1094 wret = btrfs_cow_block(trans, root, left, 1095 parent, pslot - 1, &left, 1096 BTRFS_NESTING_LEFT_COW); 1097 if (wret) { 1098 ret = wret; 1099 goto out; 1100 } 1101 } 1102 1103 if (pslot + 1 < btrfs_header_nritems(parent)) { 1104 right = btrfs_read_node_slot(parent, pslot + 1); 1105 if (IS_ERR(right)) { 1106 ret = PTR_ERR(right); 1107 right = NULL; 1108 goto out; 1109 } 1110 1111 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1112 wret = btrfs_cow_block(trans, root, right, 1113 parent, pslot + 1, &right, 1114 BTRFS_NESTING_RIGHT_COW); 1115 if (wret) { 1116 ret = wret; 1117 goto out; 1118 } 1119 } 1120 1121 /* first, try to make some room in the middle buffer */ 1122 if (left) { 1123 orig_slot += btrfs_header_nritems(left); 1124 wret = push_node_left(trans, left, mid, 1); 1125 if (wret < 0) 1126 ret = wret; 1127 } 1128 1129 /* 1130 * then try to empty the right most buffer into the middle 1131 */ 1132 if (right) { 1133 wret = push_node_left(trans, mid, right, 1); 1134 if (wret < 0 && wret != -ENOSPC) 1135 ret = wret; 1136 if (btrfs_header_nritems(right) == 0) { 1137 btrfs_clear_buffer_dirty(trans, right); 1138 btrfs_tree_unlock(right); 1139 btrfs_del_ptr(root, path, level + 1, pslot + 1); 1140 root_sub_used(root, right->len); 1141 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1142 0, 1); 1143 free_extent_buffer_stale(right); 1144 right = NULL; 1145 } else { 1146 struct btrfs_disk_key right_key; 1147 btrfs_node_key(right, &right_key, 0); 1148 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1149 BTRFS_MOD_LOG_KEY_REPLACE); 1150 if (ret < 0) { 1151 btrfs_abort_transaction(trans, ret); 1152 goto out; 1153 } 1154 btrfs_set_node_key(parent, &right_key, pslot + 1); 1155 btrfs_mark_buffer_dirty(parent); 1156 } 1157 } 1158 if (btrfs_header_nritems(mid) == 1) { 1159 /* 1160 * we're not allowed to leave a node with one item in the 1161 * tree during a delete. A deletion from lower in the tree 1162 * could try to delete the only pointer in this node. 1163 * So, pull some keys from the left. 1164 * There has to be a left pointer at this point because 1165 * otherwise we would have pulled some pointers from the 1166 * right 1167 */ 1168 if (!left) { 1169 ret = -EROFS; 1170 btrfs_handle_fs_error(fs_info, ret, NULL); 1171 goto out; 1172 } 1173 wret = balance_node_right(trans, mid, left); 1174 if (wret < 0) { 1175 ret = wret; 1176 goto out; 1177 } 1178 if (wret == 1) { 1179 wret = push_node_left(trans, left, mid, 1); 1180 if (wret < 0) 1181 ret = wret; 1182 } 1183 BUG_ON(wret == 1); 1184 } 1185 if (btrfs_header_nritems(mid) == 0) { 1186 btrfs_clear_buffer_dirty(trans, mid); 1187 btrfs_tree_unlock(mid); 1188 btrfs_del_ptr(root, path, level + 1, pslot); 1189 root_sub_used(root, mid->len); 1190 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1191 free_extent_buffer_stale(mid); 1192 mid = NULL; 1193 } else { 1194 /* update the parent key to reflect our changes */ 1195 struct btrfs_disk_key mid_key; 1196 btrfs_node_key(mid, &mid_key, 0); 1197 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1198 BTRFS_MOD_LOG_KEY_REPLACE); 1199 if (ret < 0) { 1200 btrfs_abort_transaction(trans, ret); 1201 goto out; 1202 } 1203 btrfs_set_node_key(parent, &mid_key, pslot); 1204 btrfs_mark_buffer_dirty(parent); 1205 } 1206 1207 /* update the path */ 1208 if (left) { 1209 if (btrfs_header_nritems(left) > orig_slot) { 1210 atomic_inc(&left->refs); 1211 /* left was locked after cow */ 1212 path->nodes[level] = left; 1213 path->slots[level + 1] -= 1; 1214 path->slots[level] = orig_slot; 1215 if (mid) { 1216 btrfs_tree_unlock(mid); 1217 free_extent_buffer(mid); 1218 } 1219 } else { 1220 orig_slot -= btrfs_header_nritems(left); 1221 path->slots[level] = orig_slot; 1222 } 1223 } 1224 /* double check we haven't messed things up */ 1225 if (orig_ptr != 1226 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1227 BUG(); 1228 out: 1229 if (right) { 1230 btrfs_tree_unlock(right); 1231 free_extent_buffer(right); 1232 } 1233 if (left) { 1234 if (path->nodes[level] != left) 1235 btrfs_tree_unlock(left); 1236 free_extent_buffer(left); 1237 } 1238 return ret; 1239 } 1240 1241 /* Node balancing for insertion. Here we only split or push nodes around 1242 * when they are completely full. This is also done top down, so we 1243 * have to be pessimistic. 1244 */ 1245 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1246 struct btrfs_root *root, 1247 struct btrfs_path *path, int level) 1248 { 1249 struct btrfs_fs_info *fs_info = root->fs_info; 1250 struct extent_buffer *right = NULL; 1251 struct extent_buffer *mid; 1252 struct extent_buffer *left = NULL; 1253 struct extent_buffer *parent = NULL; 1254 int ret = 0; 1255 int wret; 1256 int pslot; 1257 int orig_slot = path->slots[level]; 1258 1259 if (level == 0) 1260 return 1; 1261 1262 mid = path->nodes[level]; 1263 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1264 1265 if (level < BTRFS_MAX_LEVEL - 1) { 1266 parent = path->nodes[level + 1]; 1267 pslot = path->slots[level + 1]; 1268 } 1269 1270 if (!parent) 1271 return 1; 1272 1273 /* first, try to make some room in the middle buffer */ 1274 if (pslot) { 1275 u32 left_nr; 1276 1277 left = btrfs_read_node_slot(parent, pslot - 1); 1278 if (IS_ERR(left)) 1279 return PTR_ERR(left); 1280 1281 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1282 1283 left_nr = btrfs_header_nritems(left); 1284 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1285 wret = 1; 1286 } else { 1287 ret = btrfs_cow_block(trans, root, left, parent, 1288 pslot - 1, &left, 1289 BTRFS_NESTING_LEFT_COW); 1290 if (ret) 1291 wret = 1; 1292 else { 1293 wret = push_node_left(trans, left, mid, 0); 1294 } 1295 } 1296 if (wret < 0) 1297 ret = wret; 1298 if (wret == 0) { 1299 struct btrfs_disk_key disk_key; 1300 orig_slot += left_nr; 1301 btrfs_node_key(mid, &disk_key, 0); 1302 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1303 BTRFS_MOD_LOG_KEY_REPLACE); 1304 BUG_ON(ret < 0); 1305 btrfs_set_node_key(parent, &disk_key, pslot); 1306 btrfs_mark_buffer_dirty(parent); 1307 if (btrfs_header_nritems(left) > orig_slot) { 1308 path->nodes[level] = left; 1309 path->slots[level + 1] -= 1; 1310 path->slots[level] = orig_slot; 1311 btrfs_tree_unlock(mid); 1312 free_extent_buffer(mid); 1313 } else { 1314 orig_slot -= 1315 btrfs_header_nritems(left); 1316 path->slots[level] = orig_slot; 1317 btrfs_tree_unlock(left); 1318 free_extent_buffer(left); 1319 } 1320 return 0; 1321 } 1322 btrfs_tree_unlock(left); 1323 free_extent_buffer(left); 1324 } 1325 1326 /* 1327 * then try to empty the right most buffer into the middle 1328 */ 1329 if (pslot + 1 < btrfs_header_nritems(parent)) { 1330 u32 right_nr; 1331 1332 right = btrfs_read_node_slot(parent, pslot + 1); 1333 if (IS_ERR(right)) 1334 return PTR_ERR(right); 1335 1336 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1337 1338 right_nr = btrfs_header_nritems(right); 1339 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1340 wret = 1; 1341 } else { 1342 ret = btrfs_cow_block(trans, root, right, 1343 parent, pslot + 1, 1344 &right, BTRFS_NESTING_RIGHT_COW); 1345 if (ret) 1346 wret = 1; 1347 else { 1348 wret = balance_node_right(trans, right, mid); 1349 } 1350 } 1351 if (wret < 0) 1352 ret = wret; 1353 if (wret == 0) { 1354 struct btrfs_disk_key disk_key; 1355 1356 btrfs_node_key(right, &disk_key, 0); 1357 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1358 BTRFS_MOD_LOG_KEY_REPLACE); 1359 BUG_ON(ret < 0); 1360 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1361 btrfs_mark_buffer_dirty(parent); 1362 1363 if (btrfs_header_nritems(mid) <= orig_slot) { 1364 path->nodes[level] = right; 1365 path->slots[level + 1] += 1; 1366 path->slots[level] = orig_slot - 1367 btrfs_header_nritems(mid); 1368 btrfs_tree_unlock(mid); 1369 free_extent_buffer(mid); 1370 } else { 1371 btrfs_tree_unlock(right); 1372 free_extent_buffer(right); 1373 } 1374 return 0; 1375 } 1376 btrfs_tree_unlock(right); 1377 free_extent_buffer(right); 1378 } 1379 return 1; 1380 } 1381 1382 /* 1383 * readahead one full node of leaves, finding things that are close 1384 * to the block in 'slot', and triggering ra on them. 1385 */ 1386 static void reada_for_search(struct btrfs_fs_info *fs_info, 1387 struct btrfs_path *path, 1388 int level, int slot, u64 objectid) 1389 { 1390 struct extent_buffer *node; 1391 struct btrfs_disk_key disk_key; 1392 u32 nritems; 1393 u64 search; 1394 u64 target; 1395 u64 nread = 0; 1396 u64 nread_max; 1397 u32 nr; 1398 u32 blocksize; 1399 u32 nscan = 0; 1400 1401 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1402 return; 1403 1404 if (!path->nodes[level]) 1405 return; 1406 1407 node = path->nodes[level]; 1408 1409 /* 1410 * Since the time between visiting leaves is much shorter than the time 1411 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1412 * much IO at once (possibly random). 1413 */ 1414 if (path->reada == READA_FORWARD_ALWAYS) { 1415 if (level > 1) 1416 nread_max = node->fs_info->nodesize; 1417 else 1418 nread_max = SZ_128K; 1419 } else { 1420 nread_max = SZ_64K; 1421 } 1422 1423 search = btrfs_node_blockptr(node, slot); 1424 blocksize = fs_info->nodesize; 1425 if (path->reada != READA_FORWARD_ALWAYS) { 1426 struct extent_buffer *eb; 1427 1428 eb = find_extent_buffer(fs_info, search); 1429 if (eb) { 1430 free_extent_buffer(eb); 1431 return; 1432 } 1433 } 1434 1435 target = search; 1436 1437 nritems = btrfs_header_nritems(node); 1438 nr = slot; 1439 1440 while (1) { 1441 if (path->reada == READA_BACK) { 1442 if (nr == 0) 1443 break; 1444 nr--; 1445 } else if (path->reada == READA_FORWARD || 1446 path->reada == READA_FORWARD_ALWAYS) { 1447 nr++; 1448 if (nr >= nritems) 1449 break; 1450 } 1451 if (path->reada == READA_BACK && objectid) { 1452 btrfs_node_key(node, &disk_key, nr); 1453 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1454 break; 1455 } 1456 search = btrfs_node_blockptr(node, nr); 1457 if (path->reada == READA_FORWARD_ALWAYS || 1458 (search <= target && target - search <= 65536) || 1459 (search > target && search - target <= 65536)) { 1460 btrfs_readahead_node_child(node, nr); 1461 nread += blocksize; 1462 } 1463 nscan++; 1464 if (nread > nread_max || nscan > 32) 1465 break; 1466 } 1467 } 1468 1469 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1470 { 1471 struct extent_buffer *parent; 1472 int slot; 1473 int nritems; 1474 1475 parent = path->nodes[level + 1]; 1476 if (!parent) 1477 return; 1478 1479 nritems = btrfs_header_nritems(parent); 1480 slot = path->slots[level + 1]; 1481 1482 if (slot > 0) 1483 btrfs_readahead_node_child(parent, slot - 1); 1484 if (slot + 1 < nritems) 1485 btrfs_readahead_node_child(parent, slot + 1); 1486 } 1487 1488 1489 /* 1490 * when we walk down the tree, it is usually safe to unlock the higher layers 1491 * in the tree. The exceptions are when our path goes through slot 0, because 1492 * operations on the tree might require changing key pointers higher up in the 1493 * tree. 1494 * 1495 * callers might also have set path->keep_locks, which tells this code to keep 1496 * the lock if the path points to the last slot in the block. This is part of 1497 * walking through the tree, and selecting the next slot in the higher block. 1498 * 1499 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1500 * if lowest_unlock is 1, level 0 won't be unlocked 1501 */ 1502 static noinline void unlock_up(struct btrfs_path *path, int level, 1503 int lowest_unlock, int min_write_lock_level, 1504 int *write_lock_level) 1505 { 1506 int i; 1507 int skip_level = level; 1508 bool check_skip = true; 1509 1510 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1511 if (!path->nodes[i]) 1512 break; 1513 if (!path->locks[i]) 1514 break; 1515 1516 if (check_skip) { 1517 if (path->slots[i] == 0) { 1518 skip_level = i + 1; 1519 continue; 1520 } 1521 1522 if (path->keep_locks) { 1523 u32 nritems; 1524 1525 nritems = btrfs_header_nritems(path->nodes[i]); 1526 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1527 skip_level = i + 1; 1528 continue; 1529 } 1530 } 1531 } 1532 1533 if (i >= lowest_unlock && i > skip_level) { 1534 check_skip = false; 1535 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1536 path->locks[i] = 0; 1537 if (write_lock_level && 1538 i > min_write_lock_level && 1539 i <= *write_lock_level) { 1540 *write_lock_level = i - 1; 1541 } 1542 } 1543 } 1544 } 1545 1546 /* 1547 * Helper function for btrfs_search_slot() and other functions that do a search 1548 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1549 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1550 * its pages from disk. 1551 * 1552 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1553 * whole btree search, starting again from the current root node. 1554 */ 1555 static int 1556 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1557 struct extent_buffer **eb_ret, int level, int slot, 1558 const struct btrfs_key *key) 1559 { 1560 struct btrfs_fs_info *fs_info = root->fs_info; 1561 struct btrfs_tree_parent_check check = { 0 }; 1562 u64 blocknr; 1563 u64 gen; 1564 struct extent_buffer *tmp; 1565 int ret; 1566 int parent_level; 1567 bool unlock_up; 1568 1569 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1570 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1571 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1572 parent_level = btrfs_header_level(*eb_ret); 1573 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1574 check.has_first_key = true; 1575 check.level = parent_level - 1; 1576 check.transid = gen; 1577 check.owner_root = root->root_key.objectid; 1578 1579 /* 1580 * If we need to read an extent buffer from disk and we are holding locks 1581 * on upper level nodes, we unlock all the upper nodes before reading the 1582 * extent buffer, and then return -EAGAIN to the caller as it needs to 1583 * restart the search. We don't release the lock on the current level 1584 * because we need to walk this node to figure out which blocks to read. 1585 */ 1586 tmp = find_extent_buffer(fs_info, blocknr); 1587 if (tmp) { 1588 if (p->reada == READA_FORWARD_ALWAYS) 1589 reada_for_search(fs_info, p, level, slot, key->objectid); 1590 1591 /* first we do an atomic uptodate check */ 1592 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1593 /* 1594 * Do extra check for first_key, eb can be stale due to 1595 * being cached, read from scrub, or have multiple 1596 * parents (shared tree blocks). 1597 */ 1598 if (btrfs_verify_level_key(tmp, 1599 parent_level - 1, &check.first_key, gen)) { 1600 free_extent_buffer(tmp); 1601 return -EUCLEAN; 1602 } 1603 *eb_ret = tmp; 1604 return 0; 1605 } 1606 1607 if (p->nowait) { 1608 free_extent_buffer(tmp); 1609 return -EAGAIN; 1610 } 1611 1612 if (unlock_up) 1613 btrfs_unlock_up_safe(p, level + 1); 1614 1615 /* now we're allowed to do a blocking uptodate check */ 1616 ret = btrfs_read_extent_buffer(tmp, &check); 1617 if (ret) { 1618 free_extent_buffer(tmp); 1619 btrfs_release_path(p); 1620 return -EIO; 1621 } 1622 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1623 free_extent_buffer(tmp); 1624 btrfs_release_path(p); 1625 return -EUCLEAN; 1626 } 1627 1628 if (unlock_up) 1629 ret = -EAGAIN; 1630 1631 goto out; 1632 } else if (p->nowait) { 1633 return -EAGAIN; 1634 } 1635 1636 if (unlock_up) { 1637 btrfs_unlock_up_safe(p, level + 1); 1638 ret = -EAGAIN; 1639 } else { 1640 ret = 0; 1641 } 1642 1643 if (p->reada != READA_NONE) 1644 reada_for_search(fs_info, p, level, slot, key->objectid); 1645 1646 tmp = read_tree_block(fs_info, blocknr, &check); 1647 if (IS_ERR(tmp)) { 1648 btrfs_release_path(p); 1649 return PTR_ERR(tmp); 1650 } 1651 /* 1652 * If the read above didn't mark this buffer up to date, 1653 * it will never end up being up to date. Set ret to EIO now 1654 * and give up so that our caller doesn't loop forever 1655 * on our EAGAINs. 1656 */ 1657 if (!extent_buffer_uptodate(tmp)) 1658 ret = -EIO; 1659 1660 out: 1661 if (ret == 0) { 1662 *eb_ret = tmp; 1663 } else { 1664 free_extent_buffer(tmp); 1665 btrfs_release_path(p); 1666 } 1667 1668 return ret; 1669 } 1670 1671 /* 1672 * helper function for btrfs_search_slot. This does all of the checks 1673 * for node-level blocks and does any balancing required based on 1674 * the ins_len. 1675 * 1676 * If no extra work was required, zero is returned. If we had to 1677 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1678 * start over 1679 */ 1680 static int 1681 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1682 struct btrfs_root *root, struct btrfs_path *p, 1683 struct extent_buffer *b, int level, int ins_len, 1684 int *write_lock_level) 1685 { 1686 struct btrfs_fs_info *fs_info = root->fs_info; 1687 int ret = 0; 1688 1689 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1690 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1691 1692 if (*write_lock_level < level + 1) { 1693 *write_lock_level = level + 1; 1694 btrfs_release_path(p); 1695 return -EAGAIN; 1696 } 1697 1698 reada_for_balance(p, level); 1699 ret = split_node(trans, root, p, level); 1700 1701 b = p->nodes[level]; 1702 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1703 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1704 1705 if (*write_lock_level < level + 1) { 1706 *write_lock_level = level + 1; 1707 btrfs_release_path(p); 1708 return -EAGAIN; 1709 } 1710 1711 reada_for_balance(p, level); 1712 ret = balance_level(trans, root, p, level); 1713 if (ret) 1714 return ret; 1715 1716 b = p->nodes[level]; 1717 if (!b) { 1718 btrfs_release_path(p); 1719 return -EAGAIN; 1720 } 1721 BUG_ON(btrfs_header_nritems(b) == 1); 1722 } 1723 return ret; 1724 } 1725 1726 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1727 u64 iobjectid, u64 ioff, u8 key_type, 1728 struct btrfs_key *found_key) 1729 { 1730 int ret; 1731 struct btrfs_key key; 1732 struct extent_buffer *eb; 1733 1734 ASSERT(path); 1735 ASSERT(found_key); 1736 1737 key.type = key_type; 1738 key.objectid = iobjectid; 1739 key.offset = ioff; 1740 1741 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1742 if (ret < 0) 1743 return ret; 1744 1745 eb = path->nodes[0]; 1746 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1747 ret = btrfs_next_leaf(fs_root, path); 1748 if (ret) 1749 return ret; 1750 eb = path->nodes[0]; 1751 } 1752 1753 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1754 if (found_key->type != key.type || 1755 found_key->objectid != key.objectid) 1756 return 1; 1757 1758 return 0; 1759 } 1760 1761 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1762 struct btrfs_path *p, 1763 int write_lock_level) 1764 { 1765 struct extent_buffer *b; 1766 int root_lock = 0; 1767 int level = 0; 1768 1769 if (p->search_commit_root) { 1770 b = root->commit_root; 1771 atomic_inc(&b->refs); 1772 level = btrfs_header_level(b); 1773 /* 1774 * Ensure that all callers have set skip_locking when 1775 * p->search_commit_root = 1. 1776 */ 1777 ASSERT(p->skip_locking == 1); 1778 1779 goto out; 1780 } 1781 1782 if (p->skip_locking) { 1783 b = btrfs_root_node(root); 1784 level = btrfs_header_level(b); 1785 goto out; 1786 } 1787 1788 /* We try very hard to do read locks on the root */ 1789 root_lock = BTRFS_READ_LOCK; 1790 1791 /* 1792 * If the level is set to maximum, we can skip trying to get the read 1793 * lock. 1794 */ 1795 if (write_lock_level < BTRFS_MAX_LEVEL) { 1796 /* 1797 * We don't know the level of the root node until we actually 1798 * have it read locked 1799 */ 1800 if (p->nowait) { 1801 b = btrfs_try_read_lock_root_node(root); 1802 if (IS_ERR(b)) 1803 return b; 1804 } else { 1805 b = btrfs_read_lock_root_node(root); 1806 } 1807 level = btrfs_header_level(b); 1808 if (level > write_lock_level) 1809 goto out; 1810 1811 /* Whoops, must trade for write lock */ 1812 btrfs_tree_read_unlock(b); 1813 free_extent_buffer(b); 1814 } 1815 1816 b = btrfs_lock_root_node(root); 1817 root_lock = BTRFS_WRITE_LOCK; 1818 1819 /* The level might have changed, check again */ 1820 level = btrfs_header_level(b); 1821 1822 out: 1823 /* 1824 * The root may have failed to write out at some point, and thus is no 1825 * longer valid, return an error in this case. 1826 */ 1827 if (!extent_buffer_uptodate(b)) { 1828 if (root_lock) 1829 btrfs_tree_unlock_rw(b, root_lock); 1830 free_extent_buffer(b); 1831 return ERR_PTR(-EIO); 1832 } 1833 1834 p->nodes[level] = b; 1835 if (!p->skip_locking) 1836 p->locks[level] = root_lock; 1837 /* 1838 * Callers are responsible for dropping b's references. 1839 */ 1840 return b; 1841 } 1842 1843 /* 1844 * Replace the extent buffer at the lowest level of the path with a cloned 1845 * version. The purpose is to be able to use it safely, after releasing the 1846 * commit root semaphore, even if relocation is happening in parallel, the 1847 * transaction used for relocation is committed and the extent buffer is 1848 * reallocated in the next transaction. 1849 * 1850 * This is used in a context where the caller does not prevent transaction 1851 * commits from happening, either by holding a transaction handle or holding 1852 * some lock, while it's doing searches through a commit root. 1853 * At the moment it's only used for send operations. 1854 */ 1855 static int finish_need_commit_sem_search(struct btrfs_path *path) 1856 { 1857 const int i = path->lowest_level; 1858 const int slot = path->slots[i]; 1859 struct extent_buffer *lowest = path->nodes[i]; 1860 struct extent_buffer *clone; 1861 1862 ASSERT(path->need_commit_sem); 1863 1864 if (!lowest) 1865 return 0; 1866 1867 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1868 1869 clone = btrfs_clone_extent_buffer(lowest); 1870 if (!clone) 1871 return -ENOMEM; 1872 1873 btrfs_release_path(path); 1874 path->nodes[i] = clone; 1875 path->slots[i] = slot; 1876 1877 return 0; 1878 } 1879 1880 static inline int search_for_key_slot(struct extent_buffer *eb, 1881 int search_low_slot, 1882 const struct btrfs_key *key, 1883 int prev_cmp, 1884 int *slot) 1885 { 1886 /* 1887 * If a previous call to btrfs_bin_search() on a parent node returned an 1888 * exact match (prev_cmp == 0), we can safely assume the target key will 1889 * always be at slot 0 on lower levels, since each key pointer 1890 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1891 * subtree it points to. Thus we can skip searching lower levels. 1892 */ 1893 if (prev_cmp == 0) { 1894 *slot = 0; 1895 return 0; 1896 } 1897 1898 return btrfs_bin_search(eb, search_low_slot, key, slot); 1899 } 1900 1901 static int search_leaf(struct btrfs_trans_handle *trans, 1902 struct btrfs_root *root, 1903 const struct btrfs_key *key, 1904 struct btrfs_path *path, 1905 int ins_len, 1906 int prev_cmp) 1907 { 1908 struct extent_buffer *leaf = path->nodes[0]; 1909 int leaf_free_space = -1; 1910 int search_low_slot = 0; 1911 int ret; 1912 bool do_bin_search = true; 1913 1914 /* 1915 * If we are doing an insertion, the leaf has enough free space and the 1916 * destination slot for the key is not slot 0, then we can unlock our 1917 * write lock on the parent, and any other upper nodes, before doing the 1918 * binary search on the leaf (with search_for_key_slot()), allowing other 1919 * tasks to lock the parent and any other upper nodes. 1920 */ 1921 if (ins_len > 0) { 1922 /* 1923 * Cache the leaf free space, since we will need it later and it 1924 * will not change until then. 1925 */ 1926 leaf_free_space = btrfs_leaf_free_space(leaf); 1927 1928 /* 1929 * !path->locks[1] means we have a single node tree, the leaf is 1930 * the root of the tree. 1931 */ 1932 if (path->locks[1] && leaf_free_space >= ins_len) { 1933 struct btrfs_disk_key first_key; 1934 1935 ASSERT(btrfs_header_nritems(leaf) > 0); 1936 btrfs_item_key(leaf, &first_key, 0); 1937 1938 /* 1939 * Doing the extra comparison with the first key is cheap, 1940 * taking into account that the first key is very likely 1941 * already in a cache line because it immediately follows 1942 * the extent buffer's header and we have recently accessed 1943 * the header's level field. 1944 */ 1945 ret = comp_keys(&first_key, key); 1946 if (ret < 0) { 1947 /* 1948 * The first key is smaller than the key we want 1949 * to insert, so we are safe to unlock all upper 1950 * nodes and we have to do the binary search. 1951 * 1952 * We do use btrfs_unlock_up_safe() and not 1953 * unlock_up() because the later does not unlock 1954 * nodes with a slot of 0 - we can safely unlock 1955 * any node even if its slot is 0 since in this 1956 * case the key does not end up at slot 0 of the 1957 * leaf and there's no need to split the leaf. 1958 */ 1959 btrfs_unlock_up_safe(path, 1); 1960 search_low_slot = 1; 1961 } else { 1962 /* 1963 * The first key is >= then the key we want to 1964 * insert, so we can skip the binary search as 1965 * the target key will be at slot 0. 1966 * 1967 * We can not unlock upper nodes when the key is 1968 * less than the first key, because we will need 1969 * to update the key at slot 0 of the parent node 1970 * and possibly of other upper nodes too. 1971 * If the key matches the first key, then we can 1972 * unlock all the upper nodes, using 1973 * btrfs_unlock_up_safe() instead of unlock_up() 1974 * as stated above. 1975 */ 1976 if (ret == 0) 1977 btrfs_unlock_up_safe(path, 1); 1978 /* 1979 * ret is already 0 or 1, matching the result of 1980 * a btrfs_bin_search() call, so there is no need 1981 * to adjust it. 1982 */ 1983 do_bin_search = false; 1984 path->slots[0] = 0; 1985 } 1986 } 1987 } 1988 1989 if (do_bin_search) { 1990 ret = search_for_key_slot(leaf, search_low_slot, key, 1991 prev_cmp, &path->slots[0]); 1992 if (ret < 0) 1993 return ret; 1994 } 1995 1996 if (ins_len > 0) { 1997 /* 1998 * Item key already exists. In this case, if we are allowed to 1999 * insert the item (for example, in dir_item case, item key 2000 * collision is allowed), it will be merged with the original 2001 * item. Only the item size grows, no new btrfs item will be 2002 * added. If search_for_extension is not set, ins_len already 2003 * accounts the size btrfs_item, deduct it here so leaf space 2004 * check will be correct. 2005 */ 2006 if (ret == 0 && !path->search_for_extension) { 2007 ASSERT(ins_len >= sizeof(struct btrfs_item)); 2008 ins_len -= sizeof(struct btrfs_item); 2009 } 2010 2011 ASSERT(leaf_free_space >= 0); 2012 2013 if (leaf_free_space < ins_len) { 2014 int err; 2015 2016 err = split_leaf(trans, root, key, path, ins_len, 2017 (ret == 0)); 2018 ASSERT(err <= 0); 2019 if (WARN_ON(err > 0)) 2020 err = -EUCLEAN; 2021 if (err) 2022 ret = err; 2023 } 2024 } 2025 2026 return ret; 2027 } 2028 2029 /* 2030 * btrfs_search_slot - look for a key in a tree and perform necessary 2031 * modifications to preserve tree invariants. 2032 * 2033 * @trans: Handle of transaction, used when modifying the tree 2034 * @p: Holds all btree nodes along the search path 2035 * @root: The root node of the tree 2036 * @key: The key we are looking for 2037 * @ins_len: Indicates purpose of search: 2038 * >0 for inserts it's size of item inserted (*) 2039 * <0 for deletions 2040 * 0 for plain searches, not modifying the tree 2041 * 2042 * (*) If size of item inserted doesn't include 2043 * sizeof(struct btrfs_item), then p->search_for_extension must 2044 * be set. 2045 * @cow: boolean should CoW operations be performed. Must always be 1 2046 * when modifying the tree. 2047 * 2048 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2049 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2050 * 2051 * If @key is found, 0 is returned and you can find the item in the leaf level 2052 * of the path (level 0) 2053 * 2054 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2055 * points to the slot where it should be inserted 2056 * 2057 * If an error is encountered while searching the tree a negative error number 2058 * is returned 2059 */ 2060 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2061 const struct btrfs_key *key, struct btrfs_path *p, 2062 int ins_len, int cow) 2063 { 2064 struct btrfs_fs_info *fs_info = root->fs_info; 2065 struct extent_buffer *b; 2066 int slot; 2067 int ret; 2068 int err; 2069 int level; 2070 int lowest_unlock = 1; 2071 /* everything at write_lock_level or lower must be write locked */ 2072 int write_lock_level = 0; 2073 u8 lowest_level = 0; 2074 int min_write_lock_level; 2075 int prev_cmp; 2076 2077 might_sleep(); 2078 2079 lowest_level = p->lowest_level; 2080 WARN_ON(lowest_level && ins_len > 0); 2081 WARN_ON(p->nodes[0] != NULL); 2082 BUG_ON(!cow && ins_len); 2083 2084 /* 2085 * For now only allow nowait for read only operations. There's no 2086 * strict reason why we can't, we just only need it for reads so it's 2087 * only implemented for reads. 2088 */ 2089 ASSERT(!p->nowait || !cow); 2090 2091 if (ins_len < 0) { 2092 lowest_unlock = 2; 2093 2094 /* when we are removing items, we might have to go up to level 2095 * two as we update tree pointers Make sure we keep write 2096 * for those levels as well 2097 */ 2098 write_lock_level = 2; 2099 } else if (ins_len > 0) { 2100 /* 2101 * for inserting items, make sure we have a write lock on 2102 * level 1 so we can update keys 2103 */ 2104 write_lock_level = 1; 2105 } 2106 2107 if (!cow) 2108 write_lock_level = -1; 2109 2110 if (cow && (p->keep_locks || p->lowest_level)) 2111 write_lock_level = BTRFS_MAX_LEVEL; 2112 2113 min_write_lock_level = write_lock_level; 2114 2115 if (p->need_commit_sem) { 2116 ASSERT(p->search_commit_root); 2117 if (p->nowait) { 2118 if (!down_read_trylock(&fs_info->commit_root_sem)) 2119 return -EAGAIN; 2120 } else { 2121 down_read(&fs_info->commit_root_sem); 2122 } 2123 } 2124 2125 again: 2126 prev_cmp = -1; 2127 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2128 if (IS_ERR(b)) { 2129 ret = PTR_ERR(b); 2130 goto done; 2131 } 2132 2133 while (b) { 2134 int dec = 0; 2135 2136 level = btrfs_header_level(b); 2137 2138 if (cow) { 2139 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2140 2141 /* 2142 * if we don't really need to cow this block 2143 * then we don't want to set the path blocking, 2144 * so we test it here 2145 */ 2146 if (!should_cow_block(trans, root, b)) 2147 goto cow_done; 2148 2149 /* 2150 * must have write locks on this node and the 2151 * parent 2152 */ 2153 if (level > write_lock_level || 2154 (level + 1 > write_lock_level && 2155 level + 1 < BTRFS_MAX_LEVEL && 2156 p->nodes[level + 1])) { 2157 write_lock_level = level + 1; 2158 btrfs_release_path(p); 2159 goto again; 2160 } 2161 2162 if (last_level) 2163 err = btrfs_cow_block(trans, root, b, NULL, 0, 2164 &b, 2165 BTRFS_NESTING_COW); 2166 else 2167 err = btrfs_cow_block(trans, root, b, 2168 p->nodes[level + 1], 2169 p->slots[level + 1], &b, 2170 BTRFS_NESTING_COW); 2171 if (err) { 2172 ret = err; 2173 goto done; 2174 } 2175 } 2176 cow_done: 2177 p->nodes[level] = b; 2178 2179 /* 2180 * we have a lock on b and as long as we aren't changing 2181 * the tree, there is no way to for the items in b to change. 2182 * It is safe to drop the lock on our parent before we 2183 * go through the expensive btree search on b. 2184 * 2185 * If we're inserting or deleting (ins_len != 0), then we might 2186 * be changing slot zero, which may require changing the parent. 2187 * So, we can't drop the lock until after we know which slot 2188 * we're operating on. 2189 */ 2190 if (!ins_len && !p->keep_locks) { 2191 int u = level + 1; 2192 2193 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2194 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2195 p->locks[u] = 0; 2196 } 2197 } 2198 2199 if (level == 0) { 2200 if (ins_len > 0) 2201 ASSERT(write_lock_level >= 1); 2202 2203 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2204 if (!p->search_for_split) 2205 unlock_up(p, level, lowest_unlock, 2206 min_write_lock_level, NULL); 2207 goto done; 2208 } 2209 2210 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2211 if (ret < 0) 2212 goto done; 2213 prev_cmp = ret; 2214 2215 if (ret && slot > 0) { 2216 dec = 1; 2217 slot--; 2218 } 2219 p->slots[level] = slot; 2220 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2221 &write_lock_level); 2222 if (err == -EAGAIN) 2223 goto again; 2224 if (err) { 2225 ret = err; 2226 goto done; 2227 } 2228 b = p->nodes[level]; 2229 slot = p->slots[level]; 2230 2231 /* 2232 * Slot 0 is special, if we change the key we have to update 2233 * the parent pointer which means we must have a write lock on 2234 * the parent 2235 */ 2236 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2237 write_lock_level = level + 1; 2238 btrfs_release_path(p); 2239 goto again; 2240 } 2241 2242 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2243 &write_lock_level); 2244 2245 if (level == lowest_level) { 2246 if (dec) 2247 p->slots[level]++; 2248 goto done; 2249 } 2250 2251 err = read_block_for_search(root, p, &b, level, slot, key); 2252 if (err == -EAGAIN) 2253 goto again; 2254 if (err) { 2255 ret = err; 2256 goto done; 2257 } 2258 2259 if (!p->skip_locking) { 2260 level = btrfs_header_level(b); 2261 2262 btrfs_maybe_reset_lockdep_class(root, b); 2263 2264 if (level <= write_lock_level) { 2265 btrfs_tree_lock(b); 2266 p->locks[level] = BTRFS_WRITE_LOCK; 2267 } else { 2268 if (p->nowait) { 2269 if (!btrfs_try_tree_read_lock(b)) { 2270 free_extent_buffer(b); 2271 ret = -EAGAIN; 2272 goto done; 2273 } 2274 } else { 2275 btrfs_tree_read_lock(b); 2276 } 2277 p->locks[level] = BTRFS_READ_LOCK; 2278 } 2279 p->nodes[level] = b; 2280 } 2281 } 2282 ret = 1; 2283 done: 2284 if (ret < 0 && !p->skip_release_on_error) 2285 btrfs_release_path(p); 2286 2287 if (p->need_commit_sem) { 2288 int ret2; 2289 2290 ret2 = finish_need_commit_sem_search(p); 2291 up_read(&fs_info->commit_root_sem); 2292 if (ret2) 2293 ret = ret2; 2294 } 2295 2296 return ret; 2297 } 2298 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2299 2300 /* 2301 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2302 * current state of the tree together with the operations recorded in the tree 2303 * modification log to search for the key in a previous version of this tree, as 2304 * denoted by the time_seq parameter. 2305 * 2306 * Naturally, there is no support for insert, delete or cow operations. 2307 * 2308 * The resulting path and return value will be set up as if we called 2309 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2310 */ 2311 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2312 struct btrfs_path *p, u64 time_seq) 2313 { 2314 struct btrfs_fs_info *fs_info = root->fs_info; 2315 struct extent_buffer *b; 2316 int slot; 2317 int ret; 2318 int err; 2319 int level; 2320 int lowest_unlock = 1; 2321 u8 lowest_level = 0; 2322 2323 lowest_level = p->lowest_level; 2324 WARN_ON(p->nodes[0] != NULL); 2325 ASSERT(!p->nowait); 2326 2327 if (p->search_commit_root) { 2328 BUG_ON(time_seq); 2329 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2330 } 2331 2332 again: 2333 b = btrfs_get_old_root(root, time_seq); 2334 if (!b) { 2335 ret = -EIO; 2336 goto done; 2337 } 2338 level = btrfs_header_level(b); 2339 p->locks[level] = BTRFS_READ_LOCK; 2340 2341 while (b) { 2342 int dec = 0; 2343 2344 level = btrfs_header_level(b); 2345 p->nodes[level] = b; 2346 2347 /* 2348 * we have a lock on b and as long as we aren't changing 2349 * the tree, there is no way to for the items in b to change. 2350 * It is safe to drop the lock on our parent before we 2351 * go through the expensive btree search on b. 2352 */ 2353 btrfs_unlock_up_safe(p, level + 1); 2354 2355 ret = btrfs_bin_search(b, 0, key, &slot); 2356 if (ret < 0) 2357 goto done; 2358 2359 if (level == 0) { 2360 p->slots[level] = slot; 2361 unlock_up(p, level, lowest_unlock, 0, NULL); 2362 goto done; 2363 } 2364 2365 if (ret && slot > 0) { 2366 dec = 1; 2367 slot--; 2368 } 2369 p->slots[level] = slot; 2370 unlock_up(p, level, lowest_unlock, 0, NULL); 2371 2372 if (level == lowest_level) { 2373 if (dec) 2374 p->slots[level]++; 2375 goto done; 2376 } 2377 2378 err = read_block_for_search(root, p, &b, level, slot, key); 2379 if (err == -EAGAIN) 2380 goto again; 2381 if (err) { 2382 ret = err; 2383 goto done; 2384 } 2385 2386 level = btrfs_header_level(b); 2387 btrfs_tree_read_lock(b); 2388 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2389 if (!b) { 2390 ret = -ENOMEM; 2391 goto done; 2392 } 2393 p->locks[level] = BTRFS_READ_LOCK; 2394 p->nodes[level] = b; 2395 } 2396 ret = 1; 2397 done: 2398 if (ret < 0) 2399 btrfs_release_path(p); 2400 2401 return ret; 2402 } 2403 2404 /* 2405 * Search the tree again to find a leaf with smaller keys. 2406 * Returns 0 if it found something. 2407 * Returns 1 if there are no smaller keys. 2408 * Returns < 0 on error. 2409 * 2410 * This may release the path, and so you may lose any locks held at the 2411 * time you call it. 2412 */ 2413 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2414 { 2415 struct btrfs_key key; 2416 struct btrfs_key orig_key; 2417 struct btrfs_disk_key found_key; 2418 int ret; 2419 2420 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2421 orig_key = key; 2422 2423 if (key.offset > 0) { 2424 key.offset--; 2425 } else if (key.type > 0) { 2426 key.type--; 2427 key.offset = (u64)-1; 2428 } else if (key.objectid > 0) { 2429 key.objectid--; 2430 key.type = (u8)-1; 2431 key.offset = (u64)-1; 2432 } else { 2433 return 1; 2434 } 2435 2436 btrfs_release_path(path); 2437 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2438 if (ret <= 0) 2439 return ret; 2440 2441 /* 2442 * Previous key not found. Even if we were at slot 0 of the leaf we had 2443 * before releasing the path and calling btrfs_search_slot(), we now may 2444 * be in a slot pointing to the same original key - this can happen if 2445 * after we released the path, one of more items were moved from a 2446 * sibling leaf into the front of the leaf we had due to an insertion 2447 * (see push_leaf_right()). 2448 * If we hit this case and our slot is > 0 and just decrement the slot 2449 * so that the caller does not process the same key again, which may or 2450 * may not break the caller, depending on its logic. 2451 */ 2452 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2453 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2454 ret = comp_keys(&found_key, &orig_key); 2455 if (ret == 0) { 2456 if (path->slots[0] > 0) { 2457 path->slots[0]--; 2458 return 0; 2459 } 2460 /* 2461 * At slot 0, same key as before, it means orig_key is 2462 * the lowest, leftmost, key in the tree. We're done. 2463 */ 2464 return 1; 2465 } 2466 } 2467 2468 btrfs_item_key(path->nodes[0], &found_key, 0); 2469 ret = comp_keys(&found_key, &key); 2470 /* 2471 * We might have had an item with the previous key in the tree right 2472 * before we released our path. And after we released our path, that 2473 * item might have been pushed to the first slot (0) of the leaf we 2474 * were holding due to a tree balance. Alternatively, an item with the 2475 * previous key can exist as the only element of a leaf (big fat item). 2476 * Therefore account for these 2 cases, so that our callers (like 2477 * btrfs_previous_item) don't miss an existing item with a key matching 2478 * the previous key we computed above. 2479 */ 2480 if (ret <= 0) 2481 return 0; 2482 return 1; 2483 } 2484 2485 /* 2486 * helper to use instead of search slot if no exact match is needed but 2487 * instead the next or previous item should be returned. 2488 * When find_higher is true, the next higher item is returned, the next lower 2489 * otherwise. 2490 * When return_any and find_higher are both true, and no higher item is found, 2491 * return the next lower instead. 2492 * When return_any is true and find_higher is false, and no lower item is found, 2493 * return the next higher instead. 2494 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2495 * < 0 on error 2496 */ 2497 int btrfs_search_slot_for_read(struct btrfs_root *root, 2498 const struct btrfs_key *key, 2499 struct btrfs_path *p, int find_higher, 2500 int return_any) 2501 { 2502 int ret; 2503 struct extent_buffer *leaf; 2504 2505 again: 2506 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2507 if (ret <= 0) 2508 return ret; 2509 /* 2510 * a return value of 1 means the path is at the position where the 2511 * item should be inserted. Normally this is the next bigger item, 2512 * but in case the previous item is the last in a leaf, path points 2513 * to the first free slot in the previous leaf, i.e. at an invalid 2514 * item. 2515 */ 2516 leaf = p->nodes[0]; 2517 2518 if (find_higher) { 2519 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2520 ret = btrfs_next_leaf(root, p); 2521 if (ret <= 0) 2522 return ret; 2523 if (!return_any) 2524 return 1; 2525 /* 2526 * no higher item found, return the next 2527 * lower instead 2528 */ 2529 return_any = 0; 2530 find_higher = 0; 2531 btrfs_release_path(p); 2532 goto again; 2533 } 2534 } else { 2535 if (p->slots[0] == 0) { 2536 ret = btrfs_prev_leaf(root, p); 2537 if (ret < 0) 2538 return ret; 2539 if (!ret) { 2540 leaf = p->nodes[0]; 2541 if (p->slots[0] == btrfs_header_nritems(leaf)) 2542 p->slots[0]--; 2543 return 0; 2544 } 2545 if (!return_any) 2546 return 1; 2547 /* 2548 * no lower item found, return the next 2549 * higher instead 2550 */ 2551 return_any = 0; 2552 find_higher = 1; 2553 btrfs_release_path(p); 2554 goto again; 2555 } else { 2556 --p->slots[0]; 2557 } 2558 } 2559 return 0; 2560 } 2561 2562 /* 2563 * Execute search and call btrfs_previous_item to traverse backwards if the item 2564 * was not found. 2565 * 2566 * Return 0 if found, 1 if not found and < 0 if error. 2567 */ 2568 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2569 struct btrfs_path *path) 2570 { 2571 int ret; 2572 2573 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2574 if (ret > 0) 2575 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2576 2577 if (ret == 0) 2578 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2579 2580 return ret; 2581 } 2582 2583 /* 2584 * Search for a valid slot for the given path. 2585 * 2586 * @root: The root node of the tree. 2587 * @key: Will contain a valid item if found. 2588 * @path: The starting point to validate the slot. 2589 * 2590 * Return: 0 if the item is valid 2591 * 1 if not found 2592 * <0 if error. 2593 */ 2594 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2595 struct btrfs_path *path) 2596 { 2597 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2598 int ret; 2599 2600 ret = btrfs_next_leaf(root, path); 2601 if (ret) 2602 return ret; 2603 } 2604 2605 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2606 return 0; 2607 } 2608 2609 /* 2610 * adjust the pointers going up the tree, starting at level 2611 * making sure the right key of each node is points to 'key'. 2612 * This is used after shifting pointers to the left, so it stops 2613 * fixing up pointers when a given leaf/node is not in slot 0 of the 2614 * higher levels 2615 * 2616 */ 2617 static void fixup_low_keys(struct btrfs_path *path, 2618 struct btrfs_disk_key *key, int level) 2619 { 2620 int i; 2621 struct extent_buffer *t; 2622 int ret; 2623 2624 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2625 int tslot = path->slots[i]; 2626 2627 if (!path->nodes[i]) 2628 break; 2629 t = path->nodes[i]; 2630 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2631 BTRFS_MOD_LOG_KEY_REPLACE); 2632 BUG_ON(ret < 0); 2633 btrfs_set_node_key(t, key, tslot); 2634 btrfs_mark_buffer_dirty(path->nodes[i]); 2635 if (tslot != 0) 2636 break; 2637 } 2638 } 2639 2640 /* 2641 * update item key. 2642 * 2643 * This function isn't completely safe. It's the caller's responsibility 2644 * that the new key won't break the order 2645 */ 2646 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2647 struct btrfs_path *path, 2648 const struct btrfs_key *new_key) 2649 { 2650 struct btrfs_disk_key disk_key; 2651 struct extent_buffer *eb; 2652 int slot; 2653 2654 eb = path->nodes[0]; 2655 slot = path->slots[0]; 2656 if (slot > 0) { 2657 btrfs_item_key(eb, &disk_key, slot - 1); 2658 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2659 btrfs_print_leaf(eb); 2660 btrfs_crit(fs_info, 2661 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2662 slot, btrfs_disk_key_objectid(&disk_key), 2663 btrfs_disk_key_type(&disk_key), 2664 btrfs_disk_key_offset(&disk_key), 2665 new_key->objectid, new_key->type, 2666 new_key->offset); 2667 BUG(); 2668 } 2669 } 2670 if (slot < btrfs_header_nritems(eb) - 1) { 2671 btrfs_item_key(eb, &disk_key, slot + 1); 2672 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2673 btrfs_print_leaf(eb); 2674 btrfs_crit(fs_info, 2675 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2676 slot, btrfs_disk_key_objectid(&disk_key), 2677 btrfs_disk_key_type(&disk_key), 2678 btrfs_disk_key_offset(&disk_key), 2679 new_key->objectid, new_key->type, 2680 new_key->offset); 2681 BUG(); 2682 } 2683 } 2684 2685 btrfs_cpu_key_to_disk(&disk_key, new_key); 2686 btrfs_set_item_key(eb, &disk_key, slot); 2687 btrfs_mark_buffer_dirty(eb); 2688 if (slot == 0) 2689 fixup_low_keys(path, &disk_key, 1); 2690 } 2691 2692 /* 2693 * Check key order of two sibling extent buffers. 2694 * 2695 * Return true if something is wrong. 2696 * Return false if everything is fine. 2697 * 2698 * Tree-checker only works inside one tree block, thus the following 2699 * corruption can not be detected by tree-checker: 2700 * 2701 * Leaf @left | Leaf @right 2702 * -------------------------------------------------------------- 2703 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2704 * 2705 * Key f6 in leaf @left itself is valid, but not valid when the next 2706 * key in leaf @right is 7. 2707 * This can only be checked at tree block merge time. 2708 * And since tree checker has ensured all key order in each tree block 2709 * is correct, we only need to bother the last key of @left and the first 2710 * key of @right. 2711 */ 2712 static bool check_sibling_keys(struct extent_buffer *left, 2713 struct extent_buffer *right) 2714 { 2715 struct btrfs_key left_last; 2716 struct btrfs_key right_first; 2717 int level = btrfs_header_level(left); 2718 int nr_left = btrfs_header_nritems(left); 2719 int nr_right = btrfs_header_nritems(right); 2720 2721 /* No key to check in one of the tree blocks */ 2722 if (!nr_left || !nr_right) 2723 return false; 2724 2725 if (level) { 2726 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2727 btrfs_node_key_to_cpu(right, &right_first, 0); 2728 } else { 2729 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2730 btrfs_item_key_to_cpu(right, &right_first, 0); 2731 } 2732 2733 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2734 btrfs_crit(left->fs_info, "left extent buffer:"); 2735 btrfs_print_tree(left, false); 2736 btrfs_crit(left->fs_info, "right extent buffer:"); 2737 btrfs_print_tree(right, false); 2738 btrfs_crit(left->fs_info, 2739 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2740 left_last.objectid, left_last.type, 2741 left_last.offset, right_first.objectid, 2742 right_first.type, right_first.offset); 2743 return true; 2744 } 2745 return false; 2746 } 2747 2748 /* 2749 * try to push data from one node into the next node left in the 2750 * tree. 2751 * 2752 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2753 * error, and > 0 if there was no room in the left hand block. 2754 */ 2755 static int push_node_left(struct btrfs_trans_handle *trans, 2756 struct extent_buffer *dst, 2757 struct extent_buffer *src, int empty) 2758 { 2759 struct btrfs_fs_info *fs_info = trans->fs_info; 2760 int push_items = 0; 2761 int src_nritems; 2762 int dst_nritems; 2763 int ret = 0; 2764 2765 src_nritems = btrfs_header_nritems(src); 2766 dst_nritems = btrfs_header_nritems(dst); 2767 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2768 WARN_ON(btrfs_header_generation(src) != trans->transid); 2769 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2770 2771 if (!empty && src_nritems <= 8) 2772 return 1; 2773 2774 if (push_items <= 0) 2775 return 1; 2776 2777 if (empty) { 2778 push_items = min(src_nritems, push_items); 2779 if (push_items < src_nritems) { 2780 /* leave at least 8 pointers in the node if 2781 * we aren't going to empty it 2782 */ 2783 if (src_nritems - push_items < 8) { 2784 if (push_items <= 8) 2785 return 1; 2786 push_items -= 8; 2787 } 2788 } 2789 } else 2790 push_items = min(src_nritems - 8, push_items); 2791 2792 /* dst is the left eb, src is the middle eb */ 2793 if (check_sibling_keys(dst, src)) { 2794 ret = -EUCLEAN; 2795 btrfs_abort_transaction(trans, ret); 2796 return ret; 2797 } 2798 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2799 if (ret) { 2800 btrfs_abort_transaction(trans, ret); 2801 return ret; 2802 } 2803 copy_extent_buffer(dst, src, 2804 btrfs_node_key_ptr_offset(dst, dst_nritems), 2805 btrfs_node_key_ptr_offset(src, 0), 2806 push_items * sizeof(struct btrfs_key_ptr)); 2807 2808 if (push_items < src_nritems) { 2809 /* 2810 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2811 * don't need to do an explicit tree mod log operation for it. 2812 */ 2813 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2814 btrfs_node_key_ptr_offset(src, push_items), 2815 (src_nritems - push_items) * 2816 sizeof(struct btrfs_key_ptr)); 2817 } 2818 btrfs_set_header_nritems(src, src_nritems - push_items); 2819 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2820 btrfs_mark_buffer_dirty(src); 2821 btrfs_mark_buffer_dirty(dst); 2822 2823 return ret; 2824 } 2825 2826 /* 2827 * try to push data from one node into the next node right in the 2828 * tree. 2829 * 2830 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2831 * error, and > 0 if there was no room in the right hand block. 2832 * 2833 * this will only push up to 1/2 the contents of the left node over 2834 */ 2835 static int balance_node_right(struct btrfs_trans_handle *trans, 2836 struct extent_buffer *dst, 2837 struct extent_buffer *src) 2838 { 2839 struct btrfs_fs_info *fs_info = trans->fs_info; 2840 int push_items = 0; 2841 int max_push; 2842 int src_nritems; 2843 int dst_nritems; 2844 int ret = 0; 2845 2846 WARN_ON(btrfs_header_generation(src) != trans->transid); 2847 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2848 2849 src_nritems = btrfs_header_nritems(src); 2850 dst_nritems = btrfs_header_nritems(dst); 2851 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2852 if (push_items <= 0) 2853 return 1; 2854 2855 if (src_nritems < 4) 2856 return 1; 2857 2858 max_push = src_nritems / 2 + 1; 2859 /* don't try to empty the node */ 2860 if (max_push >= src_nritems) 2861 return 1; 2862 2863 if (max_push < push_items) 2864 push_items = max_push; 2865 2866 /* dst is the right eb, src is the middle eb */ 2867 if (check_sibling_keys(src, dst)) { 2868 ret = -EUCLEAN; 2869 btrfs_abort_transaction(trans, ret); 2870 return ret; 2871 } 2872 2873 /* 2874 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2875 * need to do an explicit tree mod log operation for it. 2876 */ 2877 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2878 btrfs_node_key_ptr_offset(dst, 0), 2879 (dst_nritems) * 2880 sizeof(struct btrfs_key_ptr)); 2881 2882 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2883 push_items); 2884 if (ret) { 2885 btrfs_abort_transaction(trans, ret); 2886 return ret; 2887 } 2888 copy_extent_buffer(dst, src, 2889 btrfs_node_key_ptr_offset(dst, 0), 2890 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2891 push_items * sizeof(struct btrfs_key_ptr)); 2892 2893 btrfs_set_header_nritems(src, src_nritems - push_items); 2894 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2895 2896 btrfs_mark_buffer_dirty(src); 2897 btrfs_mark_buffer_dirty(dst); 2898 2899 return ret; 2900 } 2901 2902 /* 2903 * helper function to insert a new root level in the tree. 2904 * A new node is allocated, and a single item is inserted to 2905 * point to the existing root 2906 * 2907 * returns zero on success or < 0 on failure. 2908 */ 2909 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2910 struct btrfs_root *root, 2911 struct btrfs_path *path, int level) 2912 { 2913 struct btrfs_fs_info *fs_info = root->fs_info; 2914 u64 lower_gen; 2915 struct extent_buffer *lower; 2916 struct extent_buffer *c; 2917 struct extent_buffer *old; 2918 struct btrfs_disk_key lower_key; 2919 int ret; 2920 2921 BUG_ON(path->nodes[level]); 2922 BUG_ON(path->nodes[level-1] != root->node); 2923 2924 lower = path->nodes[level-1]; 2925 if (level == 1) 2926 btrfs_item_key(lower, &lower_key, 0); 2927 else 2928 btrfs_node_key(lower, &lower_key, 0); 2929 2930 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2931 &lower_key, level, root->node->start, 0, 2932 BTRFS_NESTING_NEW_ROOT); 2933 if (IS_ERR(c)) 2934 return PTR_ERR(c); 2935 2936 root_add_used(root, fs_info->nodesize); 2937 2938 btrfs_set_header_nritems(c, 1); 2939 btrfs_set_node_key(c, &lower_key, 0); 2940 btrfs_set_node_blockptr(c, 0, lower->start); 2941 lower_gen = btrfs_header_generation(lower); 2942 WARN_ON(lower_gen != trans->transid); 2943 2944 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2945 2946 btrfs_mark_buffer_dirty(c); 2947 2948 old = root->node; 2949 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2950 BUG_ON(ret < 0); 2951 rcu_assign_pointer(root->node, c); 2952 2953 /* the super has an extra ref to root->node */ 2954 free_extent_buffer(old); 2955 2956 add_root_to_dirty_list(root); 2957 atomic_inc(&c->refs); 2958 path->nodes[level] = c; 2959 path->locks[level] = BTRFS_WRITE_LOCK; 2960 path->slots[level] = 0; 2961 return 0; 2962 } 2963 2964 /* 2965 * worker function to insert a single pointer in a node. 2966 * the node should have enough room for the pointer already 2967 * 2968 * slot and level indicate where you want the key to go, and 2969 * blocknr is the block the key points to. 2970 */ 2971 static void insert_ptr(struct btrfs_trans_handle *trans, 2972 struct btrfs_path *path, 2973 struct btrfs_disk_key *key, u64 bytenr, 2974 int slot, int level) 2975 { 2976 struct extent_buffer *lower; 2977 int nritems; 2978 int ret; 2979 2980 BUG_ON(!path->nodes[level]); 2981 btrfs_assert_tree_write_locked(path->nodes[level]); 2982 lower = path->nodes[level]; 2983 nritems = btrfs_header_nritems(lower); 2984 BUG_ON(slot > nritems); 2985 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2986 if (slot != nritems) { 2987 if (level) { 2988 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2989 slot, nritems - slot); 2990 BUG_ON(ret < 0); 2991 } 2992 memmove_extent_buffer(lower, 2993 btrfs_node_key_ptr_offset(lower, slot + 1), 2994 btrfs_node_key_ptr_offset(lower, slot), 2995 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2996 } 2997 if (level) { 2998 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2999 BTRFS_MOD_LOG_KEY_ADD); 3000 BUG_ON(ret < 0); 3001 } 3002 btrfs_set_node_key(lower, key, slot); 3003 btrfs_set_node_blockptr(lower, slot, bytenr); 3004 WARN_ON(trans->transid == 0); 3005 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3006 btrfs_set_header_nritems(lower, nritems + 1); 3007 btrfs_mark_buffer_dirty(lower); 3008 } 3009 3010 /* 3011 * split the node at the specified level in path in two. 3012 * The path is corrected to point to the appropriate node after the split 3013 * 3014 * Before splitting this tries to make some room in the node by pushing 3015 * left and right, if either one works, it returns right away. 3016 * 3017 * returns 0 on success and < 0 on failure 3018 */ 3019 static noinline int split_node(struct btrfs_trans_handle *trans, 3020 struct btrfs_root *root, 3021 struct btrfs_path *path, int level) 3022 { 3023 struct btrfs_fs_info *fs_info = root->fs_info; 3024 struct extent_buffer *c; 3025 struct extent_buffer *split; 3026 struct btrfs_disk_key disk_key; 3027 int mid; 3028 int ret; 3029 u32 c_nritems; 3030 3031 c = path->nodes[level]; 3032 WARN_ON(btrfs_header_generation(c) != trans->transid); 3033 if (c == root->node) { 3034 /* 3035 * trying to split the root, lets make a new one 3036 * 3037 * tree mod log: We don't log_removal old root in 3038 * insert_new_root, because that root buffer will be kept as a 3039 * normal node. We are going to log removal of half of the 3040 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3041 * holding a tree lock on the buffer, which is why we cannot 3042 * race with other tree_mod_log users. 3043 */ 3044 ret = insert_new_root(trans, root, path, level + 1); 3045 if (ret) 3046 return ret; 3047 } else { 3048 ret = push_nodes_for_insert(trans, root, path, level); 3049 c = path->nodes[level]; 3050 if (!ret && btrfs_header_nritems(c) < 3051 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3052 return 0; 3053 if (ret < 0) 3054 return ret; 3055 } 3056 3057 c_nritems = btrfs_header_nritems(c); 3058 mid = (c_nritems + 1) / 2; 3059 btrfs_node_key(c, &disk_key, mid); 3060 3061 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3062 &disk_key, level, c->start, 0, 3063 BTRFS_NESTING_SPLIT); 3064 if (IS_ERR(split)) 3065 return PTR_ERR(split); 3066 3067 root_add_used(root, fs_info->nodesize); 3068 ASSERT(btrfs_header_level(c) == level); 3069 3070 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3071 if (ret) { 3072 btrfs_tree_unlock(split); 3073 free_extent_buffer(split); 3074 btrfs_abort_transaction(trans, ret); 3075 return ret; 3076 } 3077 copy_extent_buffer(split, c, 3078 btrfs_node_key_ptr_offset(split, 0), 3079 btrfs_node_key_ptr_offset(c, mid), 3080 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3081 btrfs_set_header_nritems(split, c_nritems - mid); 3082 btrfs_set_header_nritems(c, mid); 3083 3084 btrfs_mark_buffer_dirty(c); 3085 btrfs_mark_buffer_dirty(split); 3086 3087 insert_ptr(trans, path, &disk_key, split->start, 3088 path->slots[level + 1] + 1, level + 1); 3089 3090 if (path->slots[level] >= mid) { 3091 path->slots[level] -= mid; 3092 btrfs_tree_unlock(c); 3093 free_extent_buffer(c); 3094 path->nodes[level] = split; 3095 path->slots[level + 1] += 1; 3096 } else { 3097 btrfs_tree_unlock(split); 3098 free_extent_buffer(split); 3099 } 3100 return 0; 3101 } 3102 3103 /* 3104 * how many bytes are required to store the items in a leaf. start 3105 * and nr indicate which items in the leaf to check. This totals up the 3106 * space used both by the item structs and the item data 3107 */ 3108 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3109 { 3110 int data_len; 3111 int nritems = btrfs_header_nritems(l); 3112 int end = min(nritems, start + nr) - 1; 3113 3114 if (!nr) 3115 return 0; 3116 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3117 data_len = data_len - btrfs_item_offset(l, end); 3118 data_len += sizeof(struct btrfs_item) * nr; 3119 WARN_ON(data_len < 0); 3120 return data_len; 3121 } 3122 3123 /* 3124 * The space between the end of the leaf items and 3125 * the start of the leaf data. IOW, how much room 3126 * the leaf has left for both items and data 3127 */ 3128 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3129 { 3130 struct btrfs_fs_info *fs_info = leaf->fs_info; 3131 int nritems = btrfs_header_nritems(leaf); 3132 int ret; 3133 3134 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3135 if (ret < 0) { 3136 btrfs_crit(fs_info, 3137 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3138 ret, 3139 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3140 leaf_space_used(leaf, 0, nritems), nritems); 3141 } 3142 return ret; 3143 } 3144 3145 /* 3146 * min slot controls the lowest index we're willing to push to the 3147 * right. We'll push up to and including min_slot, but no lower 3148 */ 3149 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3150 struct btrfs_path *path, 3151 int data_size, int empty, 3152 struct extent_buffer *right, 3153 int free_space, u32 left_nritems, 3154 u32 min_slot) 3155 { 3156 struct btrfs_fs_info *fs_info = right->fs_info; 3157 struct extent_buffer *left = path->nodes[0]; 3158 struct extent_buffer *upper = path->nodes[1]; 3159 struct btrfs_map_token token; 3160 struct btrfs_disk_key disk_key; 3161 int slot; 3162 u32 i; 3163 int push_space = 0; 3164 int push_items = 0; 3165 u32 nr; 3166 u32 right_nritems; 3167 u32 data_end; 3168 u32 this_item_size; 3169 3170 if (empty) 3171 nr = 0; 3172 else 3173 nr = max_t(u32, 1, min_slot); 3174 3175 if (path->slots[0] >= left_nritems) 3176 push_space += data_size; 3177 3178 slot = path->slots[1]; 3179 i = left_nritems - 1; 3180 while (i >= nr) { 3181 if (!empty && push_items > 0) { 3182 if (path->slots[0] > i) 3183 break; 3184 if (path->slots[0] == i) { 3185 int space = btrfs_leaf_free_space(left); 3186 3187 if (space + push_space * 2 > free_space) 3188 break; 3189 } 3190 } 3191 3192 if (path->slots[0] == i) 3193 push_space += data_size; 3194 3195 this_item_size = btrfs_item_size(left, i); 3196 if (this_item_size + sizeof(struct btrfs_item) + 3197 push_space > free_space) 3198 break; 3199 3200 push_items++; 3201 push_space += this_item_size + sizeof(struct btrfs_item); 3202 if (i == 0) 3203 break; 3204 i--; 3205 } 3206 3207 if (push_items == 0) 3208 goto out_unlock; 3209 3210 WARN_ON(!empty && push_items == left_nritems); 3211 3212 /* push left to right */ 3213 right_nritems = btrfs_header_nritems(right); 3214 3215 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3216 push_space -= leaf_data_end(left); 3217 3218 /* make room in the right data area */ 3219 data_end = leaf_data_end(right); 3220 memmove_leaf_data(right, data_end - push_space, data_end, 3221 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3222 3223 /* copy from the left data area */ 3224 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3225 leaf_data_end(left), push_space); 3226 3227 memmove_leaf_items(right, push_items, 0, right_nritems); 3228 3229 /* copy the items from left to right */ 3230 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3231 3232 /* update the item pointers */ 3233 btrfs_init_map_token(&token, right); 3234 right_nritems += push_items; 3235 btrfs_set_header_nritems(right, right_nritems); 3236 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3237 for (i = 0; i < right_nritems; i++) { 3238 push_space -= btrfs_token_item_size(&token, i); 3239 btrfs_set_token_item_offset(&token, i, push_space); 3240 } 3241 3242 left_nritems -= push_items; 3243 btrfs_set_header_nritems(left, left_nritems); 3244 3245 if (left_nritems) 3246 btrfs_mark_buffer_dirty(left); 3247 else 3248 btrfs_clear_buffer_dirty(trans, left); 3249 3250 btrfs_mark_buffer_dirty(right); 3251 3252 btrfs_item_key(right, &disk_key, 0); 3253 btrfs_set_node_key(upper, &disk_key, slot + 1); 3254 btrfs_mark_buffer_dirty(upper); 3255 3256 /* then fixup the leaf pointer in the path */ 3257 if (path->slots[0] >= left_nritems) { 3258 path->slots[0] -= left_nritems; 3259 if (btrfs_header_nritems(path->nodes[0]) == 0) 3260 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3261 btrfs_tree_unlock(path->nodes[0]); 3262 free_extent_buffer(path->nodes[0]); 3263 path->nodes[0] = right; 3264 path->slots[1] += 1; 3265 } else { 3266 btrfs_tree_unlock(right); 3267 free_extent_buffer(right); 3268 } 3269 return 0; 3270 3271 out_unlock: 3272 btrfs_tree_unlock(right); 3273 free_extent_buffer(right); 3274 return 1; 3275 } 3276 3277 /* 3278 * push some data in the path leaf to the right, trying to free up at 3279 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3280 * 3281 * returns 1 if the push failed because the other node didn't have enough 3282 * room, 0 if everything worked out and < 0 if there were major errors. 3283 * 3284 * this will push starting from min_slot to the end of the leaf. It won't 3285 * push any slot lower than min_slot 3286 */ 3287 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3288 *root, struct btrfs_path *path, 3289 int min_data_size, int data_size, 3290 int empty, u32 min_slot) 3291 { 3292 struct extent_buffer *left = path->nodes[0]; 3293 struct extent_buffer *right; 3294 struct extent_buffer *upper; 3295 int slot; 3296 int free_space; 3297 u32 left_nritems; 3298 int ret; 3299 3300 if (!path->nodes[1]) 3301 return 1; 3302 3303 slot = path->slots[1]; 3304 upper = path->nodes[1]; 3305 if (slot >= btrfs_header_nritems(upper) - 1) 3306 return 1; 3307 3308 btrfs_assert_tree_write_locked(path->nodes[1]); 3309 3310 right = btrfs_read_node_slot(upper, slot + 1); 3311 if (IS_ERR(right)) 3312 return PTR_ERR(right); 3313 3314 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3315 3316 free_space = btrfs_leaf_free_space(right); 3317 if (free_space < data_size) 3318 goto out_unlock; 3319 3320 ret = btrfs_cow_block(trans, root, right, upper, 3321 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3322 if (ret) 3323 goto out_unlock; 3324 3325 left_nritems = btrfs_header_nritems(left); 3326 if (left_nritems == 0) 3327 goto out_unlock; 3328 3329 if (check_sibling_keys(left, right)) { 3330 ret = -EUCLEAN; 3331 btrfs_abort_transaction(trans, ret); 3332 btrfs_tree_unlock(right); 3333 free_extent_buffer(right); 3334 return ret; 3335 } 3336 if (path->slots[0] == left_nritems && !empty) { 3337 /* Key greater than all keys in the leaf, right neighbor has 3338 * enough room for it and we're not emptying our leaf to delete 3339 * it, therefore use right neighbor to insert the new item and 3340 * no need to touch/dirty our left leaf. */ 3341 btrfs_tree_unlock(left); 3342 free_extent_buffer(left); 3343 path->nodes[0] = right; 3344 path->slots[0] = 0; 3345 path->slots[1]++; 3346 return 0; 3347 } 3348 3349 return __push_leaf_right(trans, path, min_data_size, empty, right, 3350 free_space, left_nritems, min_slot); 3351 out_unlock: 3352 btrfs_tree_unlock(right); 3353 free_extent_buffer(right); 3354 return 1; 3355 } 3356 3357 /* 3358 * push some data in the path leaf to the left, trying to free up at 3359 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3360 * 3361 * max_slot can put a limit on how far into the leaf we'll push items. The 3362 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3363 * items 3364 */ 3365 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3366 struct btrfs_path *path, int data_size, 3367 int empty, struct extent_buffer *left, 3368 int free_space, u32 right_nritems, 3369 u32 max_slot) 3370 { 3371 struct btrfs_fs_info *fs_info = left->fs_info; 3372 struct btrfs_disk_key disk_key; 3373 struct extent_buffer *right = path->nodes[0]; 3374 int i; 3375 int push_space = 0; 3376 int push_items = 0; 3377 u32 old_left_nritems; 3378 u32 nr; 3379 int ret = 0; 3380 u32 this_item_size; 3381 u32 old_left_item_size; 3382 struct btrfs_map_token token; 3383 3384 if (empty) 3385 nr = min(right_nritems, max_slot); 3386 else 3387 nr = min(right_nritems - 1, max_slot); 3388 3389 for (i = 0; i < nr; i++) { 3390 if (!empty && push_items > 0) { 3391 if (path->slots[0] < i) 3392 break; 3393 if (path->slots[0] == i) { 3394 int space = btrfs_leaf_free_space(right); 3395 3396 if (space + push_space * 2 > free_space) 3397 break; 3398 } 3399 } 3400 3401 if (path->slots[0] == i) 3402 push_space += data_size; 3403 3404 this_item_size = btrfs_item_size(right, i); 3405 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3406 free_space) 3407 break; 3408 3409 push_items++; 3410 push_space += this_item_size + sizeof(struct btrfs_item); 3411 } 3412 3413 if (push_items == 0) { 3414 ret = 1; 3415 goto out; 3416 } 3417 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3418 3419 /* push data from right to left */ 3420 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3421 3422 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3423 btrfs_item_offset(right, push_items - 1); 3424 3425 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3426 btrfs_item_offset(right, push_items - 1), push_space); 3427 old_left_nritems = btrfs_header_nritems(left); 3428 BUG_ON(old_left_nritems <= 0); 3429 3430 btrfs_init_map_token(&token, left); 3431 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3432 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3433 u32 ioff; 3434 3435 ioff = btrfs_token_item_offset(&token, i); 3436 btrfs_set_token_item_offset(&token, i, 3437 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3438 } 3439 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3440 3441 /* fixup right node */ 3442 if (push_items > right_nritems) 3443 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3444 right_nritems); 3445 3446 if (push_items < right_nritems) { 3447 push_space = btrfs_item_offset(right, push_items - 1) - 3448 leaf_data_end(right); 3449 memmove_leaf_data(right, 3450 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3451 leaf_data_end(right), push_space); 3452 3453 memmove_leaf_items(right, 0, push_items, 3454 btrfs_header_nritems(right) - push_items); 3455 } 3456 3457 btrfs_init_map_token(&token, right); 3458 right_nritems -= push_items; 3459 btrfs_set_header_nritems(right, right_nritems); 3460 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3461 for (i = 0; i < right_nritems; i++) { 3462 push_space = push_space - btrfs_token_item_size(&token, i); 3463 btrfs_set_token_item_offset(&token, i, push_space); 3464 } 3465 3466 btrfs_mark_buffer_dirty(left); 3467 if (right_nritems) 3468 btrfs_mark_buffer_dirty(right); 3469 else 3470 btrfs_clear_buffer_dirty(trans, right); 3471 3472 btrfs_item_key(right, &disk_key, 0); 3473 fixup_low_keys(path, &disk_key, 1); 3474 3475 /* then fixup the leaf pointer in the path */ 3476 if (path->slots[0] < push_items) { 3477 path->slots[0] += old_left_nritems; 3478 btrfs_tree_unlock(path->nodes[0]); 3479 free_extent_buffer(path->nodes[0]); 3480 path->nodes[0] = left; 3481 path->slots[1] -= 1; 3482 } else { 3483 btrfs_tree_unlock(left); 3484 free_extent_buffer(left); 3485 path->slots[0] -= push_items; 3486 } 3487 BUG_ON(path->slots[0] < 0); 3488 return ret; 3489 out: 3490 btrfs_tree_unlock(left); 3491 free_extent_buffer(left); 3492 return ret; 3493 } 3494 3495 /* 3496 * push some data in the path leaf to the left, trying to free up at 3497 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3498 * 3499 * max_slot can put a limit on how far into the leaf we'll push items. The 3500 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3501 * items 3502 */ 3503 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3504 *root, struct btrfs_path *path, int min_data_size, 3505 int data_size, int empty, u32 max_slot) 3506 { 3507 struct extent_buffer *right = path->nodes[0]; 3508 struct extent_buffer *left; 3509 int slot; 3510 int free_space; 3511 u32 right_nritems; 3512 int ret = 0; 3513 3514 slot = path->slots[1]; 3515 if (slot == 0) 3516 return 1; 3517 if (!path->nodes[1]) 3518 return 1; 3519 3520 right_nritems = btrfs_header_nritems(right); 3521 if (right_nritems == 0) 3522 return 1; 3523 3524 btrfs_assert_tree_write_locked(path->nodes[1]); 3525 3526 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3527 if (IS_ERR(left)) 3528 return PTR_ERR(left); 3529 3530 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3531 3532 free_space = btrfs_leaf_free_space(left); 3533 if (free_space < data_size) { 3534 ret = 1; 3535 goto out; 3536 } 3537 3538 ret = btrfs_cow_block(trans, root, left, 3539 path->nodes[1], slot - 1, &left, 3540 BTRFS_NESTING_LEFT_COW); 3541 if (ret) { 3542 /* we hit -ENOSPC, but it isn't fatal here */ 3543 if (ret == -ENOSPC) 3544 ret = 1; 3545 goto out; 3546 } 3547 3548 if (check_sibling_keys(left, right)) { 3549 ret = -EUCLEAN; 3550 btrfs_abort_transaction(trans, ret); 3551 goto out; 3552 } 3553 return __push_leaf_left(trans, path, min_data_size, empty, left, 3554 free_space, right_nritems, max_slot); 3555 out: 3556 btrfs_tree_unlock(left); 3557 free_extent_buffer(left); 3558 return ret; 3559 } 3560 3561 /* 3562 * split the path's leaf in two, making sure there is at least data_size 3563 * available for the resulting leaf level of the path. 3564 */ 3565 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3566 struct btrfs_path *path, 3567 struct extent_buffer *l, 3568 struct extent_buffer *right, 3569 int slot, int mid, int nritems) 3570 { 3571 struct btrfs_fs_info *fs_info = trans->fs_info; 3572 int data_copy_size; 3573 int rt_data_off; 3574 int i; 3575 struct btrfs_disk_key disk_key; 3576 struct btrfs_map_token token; 3577 3578 nritems = nritems - mid; 3579 btrfs_set_header_nritems(right, nritems); 3580 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3581 3582 copy_leaf_items(right, l, 0, mid, nritems); 3583 3584 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3585 leaf_data_end(l), data_copy_size); 3586 3587 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3588 3589 btrfs_init_map_token(&token, right); 3590 for (i = 0; i < nritems; i++) { 3591 u32 ioff; 3592 3593 ioff = btrfs_token_item_offset(&token, i); 3594 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3595 } 3596 3597 btrfs_set_header_nritems(l, mid); 3598 btrfs_item_key(right, &disk_key, 0); 3599 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3600 3601 btrfs_mark_buffer_dirty(right); 3602 btrfs_mark_buffer_dirty(l); 3603 BUG_ON(path->slots[0] != slot); 3604 3605 if (mid <= slot) { 3606 btrfs_tree_unlock(path->nodes[0]); 3607 free_extent_buffer(path->nodes[0]); 3608 path->nodes[0] = right; 3609 path->slots[0] -= mid; 3610 path->slots[1] += 1; 3611 } else { 3612 btrfs_tree_unlock(right); 3613 free_extent_buffer(right); 3614 } 3615 3616 BUG_ON(path->slots[0] < 0); 3617 } 3618 3619 /* 3620 * double splits happen when we need to insert a big item in the middle 3621 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3622 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3623 * A B C 3624 * 3625 * We avoid this by trying to push the items on either side of our target 3626 * into the adjacent leaves. If all goes well we can avoid the double split 3627 * completely. 3628 */ 3629 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3630 struct btrfs_root *root, 3631 struct btrfs_path *path, 3632 int data_size) 3633 { 3634 int ret; 3635 int progress = 0; 3636 int slot; 3637 u32 nritems; 3638 int space_needed = data_size; 3639 3640 slot = path->slots[0]; 3641 if (slot < btrfs_header_nritems(path->nodes[0])) 3642 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3643 3644 /* 3645 * try to push all the items after our slot into the 3646 * right leaf 3647 */ 3648 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3649 if (ret < 0) 3650 return ret; 3651 3652 if (ret == 0) 3653 progress++; 3654 3655 nritems = btrfs_header_nritems(path->nodes[0]); 3656 /* 3657 * our goal is to get our slot at the start or end of a leaf. If 3658 * we've done so we're done 3659 */ 3660 if (path->slots[0] == 0 || path->slots[0] == nritems) 3661 return 0; 3662 3663 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3664 return 0; 3665 3666 /* try to push all the items before our slot into the next leaf */ 3667 slot = path->slots[0]; 3668 space_needed = data_size; 3669 if (slot > 0) 3670 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3671 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3672 if (ret < 0) 3673 return ret; 3674 3675 if (ret == 0) 3676 progress++; 3677 3678 if (progress) 3679 return 0; 3680 return 1; 3681 } 3682 3683 /* 3684 * split the path's leaf in two, making sure there is at least data_size 3685 * available for the resulting leaf level of the path. 3686 * 3687 * returns 0 if all went well and < 0 on failure. 3688 */ 3689 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3690 struct btrfs_root *root, 3691 const struct btrfs_key *ins_key, 3692 struct btrfs_path *path, int data_size, 3693 int extend) 3694 { 3695 struct btrfs_disk_key disk_key; 3696 struct extent_buffer *l; 3697 u32 nritems; 3698 int mid; 3699 int slot; 3700 struct extent_buffer *right; 3701 struct btrfs_fs_info *fs_info = root->fs_info; 3702 int ret = 0; 3703 int wret; 3704 int split; 3705 int num_doubles = 0; 3706 int tried_avoid_double = 0; 3707 3708 l = path->nodes[0]; 3709 slot = path->slots[0]; 3710 if (extend && data_size + btrfs_item_size(l, slot) + 3711 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3712 return -EOVERFLOW; 3713 3714 /* first try to make some room by pushing left and right */ 3715 if (data_size && path->nodes[1]) { 3716 int space_needed = data_size; 3717 3718 if (slot < btrfs_header_nritems(l)) 3719 space_needed -= btrfs_leaf_free_space(l); 3720 3721 wret = push_leaf_right(trans, root, path, space_needed, 3722 space_needed, 0, 0); 3723 if (wret < 0) 3724 return wret; 3725 if (wret) { 3726 space_needed = data_size; 3727 if (slot > 0) 3728 space_needed -= btrfs_leaf_free_space(l); 3729 wret = push_leaf_left(trans, root, path, space_needed, 3730 space_needed, 0, (u32)-1); 3731 if (wret < 0) 3732 return wret; 3733 } 3734 l = path->nodes[0]; 3735 3736 /* did the pushes work? */ 3737 if (btrfs_leaf_free_space(l) >= data_size) 3738 return 0; 3739 } 3740 3741 if (!path->nodes[1]) { 3742 ret = insert_new_root(trans, root, path, 1); 3743 if (ret) 3744 return ret; 3745 } 3746 again: 3747 split = 1; 3748 l = path->nodes[0]; 3749 slot = path->slots[0]; 3750 nritems = btrfs_header_nritems(l); 3751 mid = (nritems + 1) / 2; 3752 3753 if (mid <= slot) { 3754 if (nritems == 1 || 3755 leaf_space_used(l, mid, nritems - mid) + data_size > 3756 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3757 if (slot >= nritems) { 3758 split = 0; 3759 } else { 3760 mid = slot; 3761 if (mid != nritems && 3762 leaf_space_used(l, mid, nritems - mid) + 3763 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3764 if (data_size && !tried_avoid_double) 3765 goto push_for_double; 3766 split = 2; 3767 } 3768 } 3769 } 3770 } else { 3771 if (leaf_space_used(l, 0, mid) + data_size > 3772 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3773 if (!extend && data_size && slot == 0) { 3774 split = 0; 3775 } else if ((extend || !data_size) && slot == 0) { 3776 mid = 1; 3777 } else { 3778 mid = slot; 3779 if (mid != nritems && 3780 leaf_space_used(l, mid, nritems - mid) + 3781 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3782 if (data_size && !tried_avoid_double) 3783 goto push_for_double; 3784 split = 2; 3785 } 3786 } 3787 } 3788 } 3789 3790 if (split == 0) 3791 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3792 else 3793 btrfs_item_key(l, &disk_key, mid); 3794 3795 /* 3796 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3797 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3798 * subclasses, which is 8 at the time of this patch, and we've maxed it 3799 * out. In the future we could add a 3800 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3801 * use BTRFS_NESTING_NEW_ROOT. 3802 */ 3803 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3804 &disk_key, 0, l->start, 0, 3805 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3806 BTRFS_NESTING_SPLIT); 3807 if (IS_ERR(right)) 3808 return PTR_ERR(right); 3809 3810 root_add_used(root, fs_info->nodesize); 3811 3812 if (split == 0) { 3813 if (mid <= slot) { 3814 btrfs_set_header_nritems(right, 0); 3815 insert_ptr(trans, path, &disk_key, 3816 right->start, path->slots[1] + 1, 1); 3817 btrfs_tree_unlock(path->nodes[0]); 3818 free_extent_buffer(path->nodes[0]); 3819 path->nodes[0] = right; 3820 path->slots[0] = 0; 3821 path->slots[1] += 1; 3822 } else { 3823 btrfs_set_header_nritems(right, 0); 3824 insert_ptr(trans, path, &disk_key, 3825 right->start, path->slots[1], 1); 3826 btrfs_tree_unlock(path->nodes[0]); 3827 free_extent_buffer(path->nodes[0]); 3828 path->nodes[0] = right; 3829 path->slots[0] = 0; 3830 if (path->slots[1] == 0) 3831 fixup_low_keys(path, &disk_key, 1); 3832 } 3833 /* 3834 * We create a new leaf 'right' for the required ins_len and 3835 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3836 * the content of ins_len to 'right'. 3837 */ 3838 return ret; 3839 } 3840 3841 copy_for_split(trans, path, l, right, slot, mid, nritems); 3842 3843 if (split == 2) { 3844 BUG_ON(num_doubles != 0); 3845 num_doubles++; 3846 goto again; 3847 } 3848 3849 return 0; 3850 3851 push_for_double: 3852 push_for_double_split(trans, root, path, data_size); 3853 tried_avoid_double = 1; 3854 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3855 return 0; 3856 goto again; 3857 } 3858 3859 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3860 struct btrfs_root *root, 3861 struct btrfs_path *path, int ins_len) 3862 { 3863 struct btrfs_key key; 3864 struct extent_buffer *leaf; 3865 struct btrfs_file_extent_item *fi; 3866 u64 extent_len = 0; 3867 u32 item_size; 3868 int ret; 3869 3870 leaf = path->nodes[0]; 3871 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3872 3873 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3874 key.type != BTRFS_EXTENT_CSUM_KEY); 3875 3876 if (btrfs_leaf_free_space(leaf) >= ins_len) 3877 return 0; 3878 3879 item_size = btrfs_item_size(leaf, path->slots[0]); 3880 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3881 fi = btrfs_item_ptr(leaf, path->slots[0], 3882 struct btrfs_file_extent_item); 3883 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3884 } 3885 btrfs_release_path(path); 3886 3887 path->keep_locks = 1; 3888 path->search_for_split = 1; 3889 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3890 path->search_for_split = 0; 3891 if (ret > 0) 3892 ret = -EAGAIN; 3893 if (ret < 0) 3894 goto err; 3895 3896 ret = -EAGAIN; 3897 leaf = path->nodes[0]; 3898 /* if our item isn't there, return now */ 3899 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3900 goto err; 3901 3902 /* the leaf has changed, it now has room. return now */ 3903 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3904 goto err; 3905 3906 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3907 fi = btrfs_item_ptr(leaf, path->slots[0], 3908 struct btrfs_file_extent_item); 3909 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3910 goto err; 3911 } 3912 3913 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3914 if (ret) 3915 goto err; 3916 3917 path->keep_locks = 0; 3918 btrfs_unlock_up_safe(path, 1); 3919 return 0; 3920 err: 3921 path->keep_locks = 0; 3922 return ret; 3923 } 3924 3925 static noinline int split_item(struct btrfs_path *path, 3926 const struct btrfs_key *new_key, 3927 unsigned long split_offset) 3928 { 3929 struct extent_buffer *leaf; 3930 int orig_slot, slot; 3931 char *buf; 3932 u32 nritems; 3933 u32 item_size; 3934 u32 orig_offset; 3935 struct btrfs_disk_key disk_key; 3936 3937 leaf = path->nodes[0]; 3938 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3939 3940 orig_slot = path->slots[0]; 3941 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3942 item_size = btrfs_item_size(leaf, path->slots[0]); 3943 3944 buf = kmalloc(item_size, GFP_NOFS); 3945 if (!buf) 3946 return -ENOMEM; 3947 3948 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3949 path->slots[0]), item_size); 3950 3951 slot = path->slots[0] + 1; 3952 nritems = btrfs_header_nritems(leaf); 3953 if (slot != nritems) { 3954 /* shift the items */ 3955 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3956 } 3957 3958 btrfs_cpu_key_to_disk(&disk_key, new_key); 3959 btrfs_set_item_key(leaf, &disk_key, slot); 3960 3961 btrfs_set_item_offset(leaf, slot, orig_offset); 3962 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3963 3964 btrfs_set_item_offset(leaf, orig_slot, 3965 orig_offset + item_size - split_offset); 3966 btrfs_set_item_size(leaf, orig_slot, split_offset); 3967 3968 btrfs_set_header_nritems(leaf, nritems + 1); 3969 3970 /* write the data for the start of the original item */ 3971 write_extent_buffer(leaf, buf, 3972 btrfs_item_ptr_offset(leaf, path->slots[0]), 3973 split_offset); 3974 3975 /* write the data for the new item */ 3976 write_extent_buffer(leaf, buf + split_offset, 3977 btrfs_item_ptr_offset(leaf, slot), 3978 item_size - split_offset); 3979 btrfs_mark_buffer_dirty(leaf); 3980 3981 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3982 kfree(buf); 3983 return 0; 3984 } 3985 3986 /* 3987 * This function splits a single item into two items, 3988 * giving 'new_key' to the new item and splitting the 3989 * old one at split_offset (from the start of the item). 3990 * 3991 * The path may be released by this operation. After 3992 * the split, the path is pointing to the old item. The 3993 * new item is going to be in the same node as the old one. 3994 * 3995 * Note, the item being split must be smaller enough to live alone on 3996 * a tree block with room for one extra struct btrfs_item 3997 * 3998 * This allows us to split the item in place, keeping a lock on the 3999 * leaf the entire time. 4000 */ 4001 int btrfs_split_item(struct btrfs_trans_handle *trans, 4002 struct btrfs_root *root, 4003 struct btrfs_path *path, 4004 const struct btrfs_key *new_key, 4005 unsigned long split_offset) 4006 { 4007 int ret; 4008 ret = setup_leaf_for_split(trans, root, path, 4009 sizeof(struct btrfs_item)); 4010 if (ret) 4011 return ret; 4012 4013 ret = split_item(path, new_key, split_offset); 4014 return ret; 4015 } 4016 4017 /* 4018 * make the item pointed to by the path smaller. new_size indicates 4019 * how small to make it, and from_end tells us if we just chop bytes 4020 * off the end of the item or if we shift the item to chop bytes off 4021 * the front. 4022 */ 4023 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 4024 { 4025 int slot; 4026 struct extent_buffer *leaf; 4027 u32 nritems; 4028 unsigned int data_end; 4029 unsigned int old_data_start; 4030 unsigned int old_size; 4031 unsigned int size_diff; 4032 int i; 4033 struct btrfs_map_token token; 4034 4035 leaf = path->nodes[0]; 4036 slot = path->slots[0]; 4037 4038 old_size = btrfs_item_size(leaf, slot); 4039 if (old_size == new_size) 4040 return; 4041 4042 nritems = btrfs_header_nritems(leaf); 4043 data_end = leaf_data_end(leaf); 4044 4045 old_data_start = btrfs_item_offset(leaf, slot); 4046 4047 size_diff = old_size - new_size; 4048 4049 BUG_ON(slot < 0); 4050 BUG_ON(slot >= nritems); 4051 4052 /* 4053 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4054 */ 4055 /* first correct the data pointers */ 4056 btrfs_init_map_token(&token, leaf); 4057 for (i = slot; i < nritems; i++) { 4058 u32 ioff; 4059 4060 ioff = btrfs_token_item_offset(&token, i); 4061 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4062 } 4063 4064 /* shift the data */ 4065 if (from_end) { 4066 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4067 old_data_start + new_size - data_end); 4068 } else { 4069 struct btrfs_disk_key disk_key; 4070 u64 offset; 4071 4072 btrfs_item_key(leaf, &disk_key, slot); 4073 4074 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4075 unsigned long ptr; 4076 struct btrfs_file_extent_item *fi; 4077 4078 fi = btrfs_item_ptr(leaf, slot, 4079 struct btrfs_file_extent_item); 4080 fi = (struct btrfs_file_extent_item *)( 4081 (unsigned long)fi - size_diff); 4082 4083 if (btrfs_file_extent_type(leaf, fi) == 4084 BTRFS_FILE_EXTENT_INLINE) { 4085 ptr = btrfs_item_ptr_offset(leaf, slot); 4086 memmove_extent_buffer(leaf, ptr, 4087 (unsigned long)fi, 4088 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4089 } 4090 } 4091 4092 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4093 old_data_start - data_end); 4094 4095 offset = btrfs_disk_key_offset(&disk_key); 4096 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4097 btrfs_set_item_key(leaf, &disk_key, slot); 4098 if (slot == 0) 4099 fixup_low_keys(path, &disk_key, 1); 4100 } 4101 4102 btrfs_set_item_size(leaf, slot, new_size); 4103 btrfs_mark_buffer_dirty(leaf); 4104 4105 if (btrfs_leaf_free_space(leaf) < 0) { 4106 btrfs_print_leaf(leaf); 4107 BUG(); 4108 } 4109 } 4110 4111 /* 4112 * make the item pointed to by the path bigger, data_size is the added size. 4113 */ 4114 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4115 { 4116 int slot; 4117 struct extent_buffer *leaf; 4118 u32 nritems; 4119 unsigned int data_end; 4120 unsigned int old_data; 4121 unsigned int old_size; 4122 int i; 4123 struct btrfs_map_token token; 4124 4125 leaf = path->nodes[0]; 4126 4127 nritems = btrfs_header_nritems(leaf); 4128 data_end = leaf_data_end(leaf); 4129 4130 if (btrfs_leaf_free_space(leaf) < data_size) { 4131 btrfs_print_leaf(leaf); 4132 BUG(); 4133 } 4134 slot = path->slots[0]; 4135 old_data = btrfs_item_data_end(leaf, slot); 4136 4137 BUG_ON(slot < 0); 4138 if (slot >= nritems) { 4139 btrfs_print_leaf(leaf); 4140 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4141 slot, nritems); 4142 BUG(); 4143 } 4144 4145 /* 4146 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4147 */ 4148 /* first correct the data pointers */ 4149 btrfs_init_map_token(&token, leaf); 4150 for (i = slot; i < nritems; i++) { 4151 u32 ioff; 4152 4153 ioff = btrfs_token_item_offset(&token, i); 4154 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4155 } 4156 4157 /* shift the data */ 4158 memmove_leaf_data(leaf, data_end - data_size, data_end, 4159 old_data - data_end); 4160 4161 data_end = old_data; 4162 old_size = btrfs_item_size(leaf, slot); 4163 btrfs_set_item_size(leaf, slot, old_size + data_size); 4164 btrfs_mark_buffer_dirty(leaf); 4165 4166 if (btrfs_leaf_free_space(leaf) < 0) { 4167 btrfs_print_leaf(leaf); 4168 BUG(); 4169 } 4170 } 4171 4172 /* 4173 * Make space in the node before inserting one or more items. 4174 * 4175 * @root: root we are inserting items to 4176 * @path: points to the leaf/slot where we are going to insert new items 4177 * @batch: information about the batch of items to insert 4178 * 4179 * Main purpose is to save stack depth by doing the bulk of the work in a 4180 * function that doesn't call btrfs_search_slot 4181 */ 4182 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4183 const struct btrfs_item_batch *batch) 4184 { 4185 struct btrfs_fs_info *fs_info = root->fs_info; 4186 int i; 4187 u32 nritems; 4188 unsigned int data_end; 4189 struct btrfs_disk_key disk_key; 4190 struct extent_buffer *leaf; 4191 int slot; 4192 struct btrfs_map_token token; 4193 u32 total_size; 4194 4195 /* 4196 * Before anything else, update keys in the parent and other ancestors 4197 * if needed, then release the write locks on them, so that other tasks 4198 * can use them while we modify the leaf. 4199 */ 4200 if (path->slots[0] == 0) { 4201 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4202 fixup_low_keys(path, &disk_key, 1); 4203 } 4204 btrfs_unlock_up_safe(path, 1); 4205 4206 leaf = path->nodes[0]; 4207 slot = path->slots[0]; 4208 4209 nritems = btrfs_header_nritems(leaf); 4210 data_end = leaf_data_end(leaf); 4211 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4212 4213 if (btrfs_leaf_free_space(leaf) < total_size) { 4214 btrfs_print_leaf(leaf); 4215 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4216 total_size, btrfs_leaf_free_space(leaf)); 4217 BUG(); 4218 } 4219 4220 btrfs_init_map_token(&token, leaf); 4221 if (slot != nritems) { 4222 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4223 4224 if (old_data < data_end) { 4225 btrfs_print_leaf(leaf); 4226 btrfs_crit(fs_info, 4227 "item at slot %d with data offset %u beyond data end of leaf %u", 4228 slot, old_data, data_end); 4229 BUG(); 4230 } 4231 /* 4232 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4233 */ 4234 /* first correct the data pointers */ 4235 for (i = slot; i < nritems; i++) { 4236 u32 ioff; 4237 4238 ioff = btrfs_token_item_offset(&token, i); 4239 btrfs_set_token_item_offset(&token, i, 4240 ioff - batch->total_data_size); 4241 } 4242 /* shift the items */ 4243 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4244 4245 /* shift the data */ 4246 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4247 data_end, old_data - data_end); 4248 data_end = old_data; 4249 } 4250 4251 /* setup the item for the new data */ 4252 for (i = 0; i < batch->nr; i++) { 4253 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4254 btrfs_set_item_key(leaf, &disk_key, slot + i); 4255 data_end -= batch->data_sizes[i]; 4256 btrfs_set_token_item_offset(&token, slot + i, data_end); 4257 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4258 } 4259 4260 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4261 btrfs_mark_buffer_dirty(leaf); 4262 4263 if (btrfs_leaf_free_space(leaf) < 0) { 4264 btrfs_print_leaf(leaf); 4265 BUG(); 4266 } 4267 } 4268 4269 /* 4270 * Insert a new item into a leaf. 4271 * 4272 * @root: The root of the btree. 4273 * @path: A path pointing to the target leaf and slot. 4274 * @key: The key of the new item. 4275 * @data_size: The size of the data associated with the new key. 4276 */ 4277 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4278 struct btrfs_path *path, 4279 const struct btrfs_key *key, 4280 u32 data_size) 4281 { 4282 struct btrfs_item_batch batch; 4283 4284 batch.keys = key; 4285 batch.data_sizes = &data_size; 4286 batch.total_data_size = data_size; 4287 batch.nr = 1; 4288 4289 setup_items_for_insert(root, path, &batch); 4290 } 4291 4292 /* 4293 * Given a key and some data, insert items into the tree. 4294 * This does all the path init required, making room in the tree if needed. 4295 */ 4296 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4297 struct btrfs_root *root, 4298 struct btrfs_path *path, 4299 const struct btrfs_item_batch *batch) 4300 { 4301 int ret = 0; 4302 int slot; 4303 u32 total_size; 4304 4305 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4306 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4307 if (ret == 0) 4308 return -EEXIST; 4309 if (ret < 0) 4310 return ret; 4311 4312 slot = path->slots[0]; 4313 BUG_ON(slot < 0); 4314 4315 setup_items_for_insert(root, path, batch); 4316 return 0; 4317 } 4318 4319 /* 4320 * Given a key and some data, insert an item into the tree. 4321 * This does all the path init required, making room in the tree if needed. 4322 */ 4323 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4324 const struct btrfs_key *cpu_key, void *data, 4325 u32 data_size) 4326 { 4327 int ret = 0; 4328 struct btrfs_path *path; 4329 struct extent_buffer *leaf; 4330 unsigned long ptr; 4331 4332 path = btrfs_alloc_path(); 4333 if (!path) 4334 return -ENOMEM; 4335 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4336 if (!ret) { 4337 leaf = path->nodes[0]; 4338 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4339 write_extent_buffer(leaf, data, ptr, data_size); 4340 btrfs_mark_buffer_dirty(leaf); 4341 } 4342 btrfs_free_path(path); 4343 return ret; 4344 } 4345 4346 /* 4347 * This function duplicates an item, giving 'new_key' to the new item. 4348 * It guarantees both items live in the same tree leaf and the new item is 4349 * contiguous with the original item. 4350 * 4351 * This allows us to split a file extent in place, keeping a lock on the leaf 4352 * the entire time. 4353 */ 4354 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4355 struct btrfs_root *root, 4356 struct btrfs_path *path, 4357 const struct btrfs_key *new_key) 4358 { 4359 struct extent_buffer *leaf; 4360 int ret; 4361 u32 item_size; 4362 4363 leaf = path->nodes[0]; 4364 item_size = btrfs_item_size(leaf, path->slots[0]); 4365 ret = setup_leaf_for_split(trans, root, path, 4366 item_size + sizeof(struct btrfs_item)); 4367 if (ret) 4368 return ret; 4369 4370 path->slots[0]++; 4371 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4372 leaf = path->nodes[0]; 4373 memcpy_extent_buffer(leaf, 4374 btrfs_item_ptr_offset(leaf, path->slots[0]), 4375 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4376 item_size); 4377 return 0; 4378 } 4379 4380 /* 4381 * delete the pointer from a given node. 4382 * 4383 * the tree should have been previously balanced so the deletion does not 4384 * empty a node. 4385 * 4386 * This is exported for use inside btrfs-progs, don't un-export it. 4387 */ 4388 void btrfs_del_ptr(struct btrfs_root *root, struct btrfs_path *path, int level, 4389 int slot) 4390 { 4391 struct extent_buffer *parent = path->nodes[level]; 4392 u32 nritems; 4393 int ret; 4394 4395 nritems = btrfs_header_nritems(parent); 4396 if (slot != nritems - 1) { 4397 if (level) { 4398 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4399 slot + 1, nritems - slot - 1); 4400 BUG_ON(ret < 0); 4401 } 4402 memmove_extent_buffer(parent, 4403 btrfs_node_key_ptr_offset(parent, slot), 4404 btrfs_node_key_ptr_offset(parent, slot + 1), 4405 sizeof(struct btrfs_key_ptr) * 4406 (nritems - slot - 1)); 4407 } else if (level) { 4408 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4409 BTRFS_MOD_LOG_KEY_REMOVE); 4410 BUG_ON(ret < 0); 4411 } 4412 4413 nritems--; 4414 btrfs_set_header_nritems(parent, nritems); 4415 if (nritems == 0 && parent == root->node) { 4416 BUG_ON(btrfs_header_level(root->node) != 1); 4417 /* just turn the root into a leaf and break */ 4418 btrfs_set_header_level(root->node, 0); 4419 } else if (slot == 0) { 4420 struct btrfs_disk_key disk_key; 4421 4422 btrfs_node_key(parent, &disk_key, 0); 4423 fixup_low_keys(path, &disk_key, level + 1); 4424 } 4425 btrfs_mark_buffer_dirty(parent); 4426 } 4427 4428 /* 4429 * a helper function to delete the leaf pointed to by path->slots[1] and 4430 * path->nodes[1]. 4431 * 4432 * This deletes the pointer in path->nodes[1] and frees the leaf 4433 * block extent. zero is returned if it all worked out, < 0 otherwise. 4434 * 4435 * The path must have already been setup for deleting the leaf, including 4436 * all the proper balancing. path->nodes[1] must be locked. 4437 */ 4438 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4439 struct btrfs_root *root, 4440 struct btrfs_path *path, 4441 struct extent_buffer *leaf) 4442 { 4443 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4444 btrfs_del_ptr(root, path, 1, path->slots[1]); 4445 4446 /* 4447 * btrfs_free_extent is expensive, we want to make sure we 4448 * aren't holding any locks when we call it 4449 */ 4450 btrfs_unlock_up_safe(path, 0); 4451 4452 root_sub_used(root, leaf->len); 4453 4454 atomic_inc(&leaf->refs); 4455 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4456 free_extent_buffer_stale(leaf); 4457 } 4458 /* 4459 * delete the item at the leaf level in path. If that empties 4460 * the leaf, remove it from the tree 4461 */ 4462 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4463 struct btrfs_path *path, int slot, int nr) 4464 { 4465 struct btrfs_fs_info *fs_info = root->fs_info; 4466 struct extent_buffer *leaf; 4467 int ret = 0; 4468 int wret; 4469 u32 nritems; 4470 4471 leaf = path->nodes[0]; 4472 nritems = btrfs_header_nritems(leaf); 4473 4474 if (slot + nr != nritems) { 4475 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4476 const int data_end = leaf_data_end(leaf); 4477 struct btrfs_map_token token; 4478 u32 dsize = 0; 4479 int i; 4480 4481 for (i = 0; i < nr; i++) 4482 dsize += btrfs_item_size(leaf, slot + i); 4483 4484 memmove_leaf_data(leaf, data_end + dsize, data_end, 4485 last_off - data_end); 4486 4487 btrfs_init_map_token(&token, leaf); 4488 for (i = slot + nr; i < nritems; i++) { 4489 u32 ioff; 4490 4491 ioff = btrfs_token_item_offset(&token, i); 4492 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4493 } 4494 4495 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4496 } 4497 btrfs_set_header_nritems(leaf, nritems - nr); 4498 nritems -= nr; 4499 4500 /* delete the leaf if we've emptied it */ 4501 if (nritems == 0) { 4502 if (leaf == root->node) { 4503 btrfs_set_header_level(leaf, 0); 4504 } else { 4505 btrfs_clear_buffer_dirty(trans, leaf); 4506 btrfs_del_leaf(trans, root, path, leaf); 4507 } 4508 } else { 4509 int used = leaf_space_used(leaf, 0, nritems); 4510 if (slot == 0) { 4511 struct btrfs_disk_key disk_key; 4512 4513 btrfs_item_key(leaf, &disk_key, 0); 4514 fixup_low_keys(path, &disk_key, 1); 4515 } 4516 4517 /* 4518 * Try to delete the leaf if it is mostly empty. We do this by 4519 * trying to move all its items into its left and right neighbours. 4520 * If we can't move all the items, then we don't delete it - it's 4521 * not ideal, but future insertions might fill the leaf with more 4522 * items, or items from other leaves might be moved later into our 4523 * leaf due to deletions on those leaves. 4524 */ 4525 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4526 u32 min_push_space; 4527 4528 /* push_leaf_left fixes the path. 4529 * make sure the path still points to our leaf 4530 * for possible call to btrfs_del_ptr below 4531 */ 4532 slot = path->slots[1]; 4533 atomic_inc(&leaf->refs); 4534 /* 4535 * We want to be able to at least push one item to the 4536 * left neighbour leaf, and that's the first item. 4537 */ 4538 min_push_space = sizeof(struct btrfs_item) + 4539 btrfs_item_size(leaf, 0); 4540 wret = push_leaf_left(trans, root, path, 0, 4541 min_push_space, 1, (u32)-1); 4542 if (wret < 0 && wret != -ENOSPC) 4543 ret = wret; 4544 4545 if (path->nodes[0] == leaf && 4546 btrfs_header_nritems(leaf)) { 4547 /* 4548 * If we were not able to push all items from our 4549 * leaf to its left neighbour, then attempt to 4550 * either push all the remaining items to the 4551 * right neighbour or none. There's no advantage 4552 * in pushing only some items, instead of all, as 4553 * it's pointless to end up with a leaf having 4554 * too few items while the neighbours can be full 4555 * or nearly full. 4556 */ 4557 nritems = btrfs_header_nritems(leaf); 4558 min_push_space = leaf_space_used(leaf, 0, nritems); 4559 wret = push_leaf_right(trans, root, path, 0, 4560 min_push_space, 1, 0); 4561 if (wret < 0 && wret != -ENOSPC) 4562 ret = wret; 4563 } 4564 4565 if (btrfs_header_nritems(leaf) == 0) { 4566 path->slots[1] = slot; 4567 btrfs_del_leaf(trans, root, path, leaf); 4568 free_extent_buffer(leaf); 4569 ret = 0; 4570 } else { 4571 /* if we're still in the path, make sure 4572 * we're dirty. Otherwise, one of the 4573 * push_leaf functions must have already 4574 * dirtied this buffer 4575 */ 4576 if (path->nodes[0] == leaf) 4577 btrfs_mark_buffer_dirty(leaf); 4578 free_extent_buffer(leaf); 4579 } 4580 } else { 4581 btrfs_mark_buffer_dirty(leaf); 4582 } 4583 } 4584 return ret; 4585 } 4586 4587 /* 4588 * A helper function to walk down the tree starting at min_key, and looking 4589 * for nodes or leaves that are have a minimum transaction id. 4590 * This is used by the btree defrag code, and tree logging 4591 * 4592 * This does not cow, but it does stuff the starting key it finds back 4593 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4594 * key and get a writable path. 4595 * 4596 * This honors path->lowest_level to prevent descent past a given level 4597 * of the tree. 4598 * 4599 * min_trans indicates the oldest transaction that you are interested 4600 * in walking through. Any nodes or leaves older than min_trans are 4601 * skipped over (without reading them). 4602 * 4603 * returns zero if something useful was found, < 0 on error and 1 if there 4604 * was nothing in the tree that matched the search criteria. 4605 */ 4606 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4607 struct btrfs_path *path, 4608 u64 min_trans) 4609 { 4610 struct extent_buffer *cur; 4611 struct btrfs_key found_key; 4612 int slot; 4613 int sret; 4614 u32 nritems; 4615 int level; 4616 int ret = 1; 4617 int keep_locks = path->keep_locks; 4618 4619 ASSERT(!path->nowait); 4620 path->keep_locks = 1; 4621 again: 4622 cur = btrfs_read_lock_root_node(root); 4623 level = btrfs_header_level(cur); 4624 WARN_ON(path->nodes[level]); 4625 path->nodes[level] = cur; 4626 path->locks[level] = BTRFS_READ_LOCK; 4627 4628 if (btrfs_header_generation(cur) < min_trans) { 4629 ret = 1; 4630 goto out; 4631 } 4632 while (1) { 4633 nritems = btrfs_header_nritems(cur); 4634 level = btrfs_header_level(cur); 4635 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4636 if (sret < 0) { 4637 ret = sret; 4638 goto out; 4639 } 4640 4641 /* at the lowest level, we're done, setup the path and exit */ 4642 if (level == path->lowest_level) { 4643 if (slot >= nritems) 4644 goto find_next_key; 4645 ret = 0; 4646 path->slots[level] = slot; 4647 btrfs_item_key_to_cpu(cur, &found_key, slot); 4648 goto out; 4649 } 4650 if (sret && slot > 0) 4651 slot--; 4652 /* 4653 * check this node pointer against the min_trans parameters. 4654 * If it is too old, skip to the next one. 4655 */ 4656 while (slot < nritems) { 4657 u64 gen; 4658 4659 gen = btrfs_node_ptr_generation(cur, slot); 4660 if (gen < min_trans) { 4661 slot++; 4662 continue; 4663 } 4664 break; 4665 } 4666 find_next_key: 4667 /* 4668 * we didn't find a candidate key in this node, walk forward 4669 * and find another one 4670 */ 4671 if (slot >= nritems) { 4672 path->slots[level] = slot; 4673 sret = btrfs_find_next_key(root, path, min_key, level, 4674 min_trans); 4675 if (sret == 0) { 4676 btrfs_release_path(path); 4677 goto again; 4678 } else { 4679 goto out; 4680 } 4681 } 4682 /* save our key for returning back */ 4683 btrfs_node_key_to_cpu(cur, &found_key, slot); 4684 path->slots[level] = slot; 4685 if (level == path->lowest_level) { 4686 ret = 0; 4687 goto out; 4688 } 4689 cur = btrfs_read_node_slot(cur, slot); 4690 if (IS_ERR(cur)) { 4691 ret = PTR_ERR(cur); 4692 goto out; 4693 } 4694 4695 btrfs_tree_read_lock(cur); 4696 4697 path->locks[level - 1] = BTRFS_READ_LOCK; 4698 path->nodes[level - 1] = cur; 4699 unlock_up(path, level, 1, 0, NULL); 4700 } 4701 out: 4702 path->keep_locks = keep_locks; 4703 if (ret == 0) { 4704 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4705 memcpy(min_key, &found_key, sizeof(found_key)); 4706 } 4707 return ret; 4708 } 4709 4710 /* 4711 * this is similar to btrfs_next_leaf, but does not try to preserve 4712 * and fixup the path. It looks for and returns the next key in the 4713 * tree based on the current path and the min_trans parameters. 4714 * 4715 * 0 is returned if another key is found, < 0 if there are any errors 4716 * and 1 is returned if there are no higher keys in the tree 4717 * 4718 * path->keep_locks should be set to 1 on the search made before 4719 * calling this function. 4720 */ 4721 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4722 struct btrfs_key *key, int level, u64 min_trans) 4723 { 4724 int slot; 4725 struct extent_buffer *c; 4726 4727 WARN_ON(!path->keep_locks && !path->skip_locking); 4728 while (level < BTRFS_MAX_LEVEL) { 4729 if (!path->nodes[level]) 4730 return 1; 4731 4732 slot = path->slots[level] + 1; 4733 c = path->nodes[level]; 4734 next: 4735 if (slot >= btrfs_header_nritems(c)) { 4736 int ret; 4737 int orig_lowest; 4738 struct btrfs_key cur_key; 4739 if (level + 1 >= BTRFS_MAX_LEVEL || 4740 !path->nodes[level + 1]) 4741 return 1; 4742 4743 if (path->locks[level + 1] || path->skip_locking) { 4744 level++; 4745 continue; 4746 } 4747 4748 slot = btrfs_header_nritems(c) - 1; 4749 if (level == 0) 4750 btrfs_item_key_to_cpu(c, &cur_key, slot); 4751 else 4752 btrfs_node_key_to_cpu(c, &cur_key, slot); 4753 4754 orig_lowest = path->lowest_level; 4755 btrfs_release_path(path); 4756 path->lowest_level = level; 4757 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4758 0, 0); 4759 path->lowest_level = orig_lowest; 4760 if (ret < 0) 4761 return ret; 4762 4763 c = path->nodes[level]; 4764 slot = path->slots[level]; 4765 if (ret == 0) 4766 slot++; 4767 goto next; 4768 } 4769 4770 if (level == 0) 4771 btrfs_item_key_to_cpu(c, key, slot); 4772 else { 4773 u64 gen = btrfs_node_ptr_generation(c, slot); 4774 4775 if (gen < min_trans) { 4776 slot++; 4777 goto next; 4778 } 4779 btrfs_node_key_to_cpu(c, key, slot); 4780 } 4781 return 0; 4782 } 4783 return 1; 4784 } 4785 4786 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4787 u64 time_seq) 4788 { 4789 int slot; 4790 int level; 4791 struct extent_buffer *c; 4792 struct extent_buffer *next; 4793 struct btrfs_fs_info *fs_info = root->fs_info; 4794 struct btrfs_key key; 4795 bool need_commit_sem = false; 4796 u32 nritems; 4797 int ret; 4798 int i; 4799 4800 /* 4801 * The nowait semantics are used only for write paths, where we don't 4802 * use the tree mod log and sequence numbers. 4803 */ 4804 if (time_seq) 4805 ASSERT(!path->nowait); 4806 4807 nritems = btrfs_header_nritems(path->nodes[0]); 4808 if (nritems == 0) 4809 return 1; 4810 4811 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4812 again: 4813 level = 1; 4814 next = NULL; 4815 btrfs_release_path(path); 4816 4817 path->keep_locks = 1; 4818 4819 if (time_seq) { 4820 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4821 } else { 4822 if (path->need_commit_sem) { 4823 path->need_commit_sem = 0; 4824 need_commit_sem = true; 4825 if (path->nowait) { 4826 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4827 ret = -EAGAIN; 4828 goto done; 4829 } 4830 } else { 4831 down_read(&fs_info->commit_root_sem); 4832 } 4833 } 4834 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4835 } 4836 path->keep_locks = 0; 4837 4838 if (ret < 0) 4839 goto done; 4840 4841 nritems = btrfs_header_nritems(path->nodes[0]); 4842 /* 4843 * by releasing the path above we dropped all our locks. A balance 4844 * could have added more items next to the key that used to be 4845 * at the very end of the block. So, check again here and 4846 * advance the path if there are now more items available. 4847 */ 4848 if (nritems > 0 && path->slots[0] < nritems - 1) { 4849 if (ret == 0) 4850 path->slots[0]++; 4851 ret = 0; 4852 goto done; 4853 } 4854 /* 4855 * So the above check misses one case: 4856 * - after releasing the path above, someone has removed the item that 4857 * used to be at the very end of the block, and balance between leafs 4858 * gets another one with bigger key.offset to replace it. 4859 * 4860 * This one should be returned as well, or we can get leaf corruption 4861 * later(esp. in __btrfs_drop_extents()). 4862 * 4863 * And a bit more explanation about this check, 4864 * with ret > 0, the key isn't found, the path points to the slot 4865 * where it should be inserted, so the path->slots[0] item must be the 4866 * bigger one. 4867 */ 4868 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4869 ret = 0; 4870 goto done; 4871 } 4872 4873 while (level < BTRFS_MAX_LEVEL) { 4874 if (!path->nodes[level]) { 4875 ret = 1; 4876 goto done; 4877 } 4878 4879 slot = path->slots[level] + 1; 4880 c = path->nodes[level]; 4881 if (slot >= btrfs_header_nritems(c)) { 4882 level++; 4883 if (level == BTRFS_MAX_LEVEL) { 4884 ret = 1; 4885 goto done; 4886 } 4887 continue; 4888 } 4889 4890 4891 /* 4892 * Our current level is where we're going to start from, and to 4893 * make sure lockdep doesn't complain we need to drop our locks 4894 * and nodes from 0 to our current level. 4895 */ 4896 for (i = 0; i < level; i++) { 4897 if (path->locks[level]) { 4898 btrfs_tree_read_unlock(path->nodes[i]); 4899 path->locks[i] = 0; 4900 } 4901 free_extent_buffer(path->nodes[i]); 4902 path->nodes[i] = NULL; 4903 } 4904 4905 next = c; 4906 ret = read_block_for_search(root, path, &next, level, 4907 slot, &key); 4908 if (ret == -EAGAIN && !path->nowait) 4909 goto again; 4910 4911 if (ret < 0) { 4912 btrfs_release_path(path); 4913 goto done; 4914 } 4915 4916 if (!path->skip_locking) { 4917 ret = btrfs_try_tree_read_lock(next); 4918 if (!ret && path->nowait) { 4919 ret = -EAGAIN; 4920 goto done; 4921 } 4922 if (!ret && time_seq) { 4923 /* 4924 * If we don't get the lock, we may be racing 4925 * with push_leaf_left, holding that lock while 4926 * itself waiting for the leaf we've currently 4927 * locked. To solve this situation, we give up 4928 * on our lock and cycle. 4929 */ 4930 free_extent_buffer(next); 4931 btrfs_release_path(path); 4932 cond_resched(); 4933 goto again; 4934 } 4935 if (!ret) 4936 btrfs_tree_read_lock(next); 4937 } 4938 break; 4939 } 4940 path->slots[level] = slot; 4941 while (1) { 4942 level--; 4943 path->nodes[level] = next; 4944 path->slots[level] = 0; 4945 if (!path->skip_locking) 4946 path->locks[level] = BTRFS_READ_LOCK; 4947 if (!level) 4948 break; 4949 4950 ret = read_block_for_search(root, path, &next, level, 4951 0, &key); 4952 if (ret == -EAGAIN && !path->nowait) 4953 goto again; 4954 4955 if (ret < 0) { 4956 btrfs_release_path(path); 4957 goto done; 4958 } 4959 4960 if (!path->skip_locking) { 4961 if (path->nowait) { 4962 if (!btrfs_try_tree_read_lock(next)) { 4963 ret = -EAGAIN; 4964 goto done; 4965 } 4966 } else { 4967 btrfs_tree_read_lock(next); 4968 } 4969 } 4970 } 4971 ret = 0; 4972 done: 4973 unlock_up(path, 0, 1, 0, NULL); 4974 if (need_commit_sem) { 4975 int ret2; 4976 4977 path->need_commit_sem = 1; 4978 ret2 = finish_need_commit_sem_search(path); 4979 up_read(&fs_info->commit_root_sem); 4980 if (ret2) 4981 ret = ret2; 4982 } 4983 4984 return ret; 4985 } 4986 4987 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 4988 { 4989 path->slots[0]++; 4990 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 4991 return btrfs_next_old_leaf(root, path, time_seq); 4992 return 0; 4993 } 4994 4995 /* 4996 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4997 * searching until it gets past min_objectid or finds an item of 'type' 4998 * 4999 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5000 */ 5001 int btrfs_previous_item(struct btrfs_root *root, 5002 struct btrfs_path *path, u64 min_objectid, 5003 int type) 5004 { 5005 struct btrfs_key found_key; 5006 struct extent_buffer *leaf; 5007 u32 nritems; 5008 int ret; 5009 5010 while (1) { 5011 if (path->slots[0] == 0) { 5012 ret = btrfs_prev_leaf(root, path); 5013 if (ret != 0) 5014 return ret; 5015 } else { 5016 path->slots[0]--; 5017 } 5018 leaf = path->nodes[0]; 5019 nritems = btrfs_header_nritems(leaf); 5020 if (nritems == 0) 5021 return 1; 5022 if (path->slots[0] == nritems) 5023 path->slots[0]--; 5024 5025 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5026 if (found_key.objectid < min_objectid) 5027 break; 5028 if (found_key.type == type) 5029 return 0; 5030 if (found_key.objectid == min_objectid && 5031 found_key.type < type) 5032 break; 5033 } 5034 return 1; 5035 } 5036 5037 /* 5038 * search in extent tree to find a previous Metadata/Data extent item with 5039 * min objecitd. 5040 * 5041 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5042 */ 5043 int btrfs_previous_extent_item(struct btrfs_root *root, 5044 struct btrfs_path *path, u64 min_objectid) 5045 { 5046 struct btrfs_key found_key; 5047 struct extent_buffer *leaf; 5048 u32 nritems; 5049 int ret; 5050 5051 while (1) { 5052 if (path->slots[0] == 0) { 5053 ret = btrfs_prev_leaf(root, path); 5054 if (ret != 0) 5055 return ret; 5056 } else { 5057 path->slots[0]--; 5058 } 5059 leaf = path->nodes[0]; 5060 nritems = btrfs_header_nritems(leaf); 5061 if (nritems == 0) 5062 return 1; 5063 if (path->slots[0] == nritems) 5064 path->slots[0]--; 5065 5066 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5067 if (found_key.objectid < min_objectid) 5068 break; 5069 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5070 found_key.type == BTRFS_METADATA_ITEM_KEY) 5071 return 0; 5072 if (found_key.objectid == min_objectid && 5073 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5074 break; 5075 } 5076 return 1; 5077 } 5078 5079 int __init btrfs_ctree_init(void) 5080 { 5081 btrfs_path_cachep = kmem_cache_create("btrfs_path", 5082 sizeof(struct btrfs_path), 0, 5083 SLAB_MEM_SPREAD, NULL); 5084 if (!btrfs_path_cachep) 5085 return -ENOMEM; 5086 return 0; 5087 } 5088 5089 void __cold btrfs_ctree_exit(void) 5090 { 5091 kmem_cache_destroy(btrfs_path_cachep); 5092 } 5093