1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int errno) 234 { 235 switch (errno) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 320 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 321 trans->transid != fs_info->running_transaction->transid); 322 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 323 trans->transid != root->last_trans); 324 325 level = btrfs_header_level(buf); 326 if (level == 0) 327 btrfs_item_key(buf, &disk_key, 0); 328 else 329 btrfs_node_key(buf, &disk_key, 0); 330 331 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 332 &disk_key, level, buf->start, 0, 333 BTRFS_NESTING_NEW_ROOT); 334 if (IS_ERR(cow)) 335 return PTR_ERR(cow); 336 337 copy_extent_buffer_full(cow, buf); 338 btrfs_set_header_bytenr(cow, cow->start); 339 btrfs_set_header_generation(cow, trans->transid); 340 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 341 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 342 BTRFS_HEADER_FLAG_RELOC); 343 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 344 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 345 else 346 btrfs_set_header_owner(cow, new_root_objectid); 347 348 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 349 350 WARN_ON(btrfs_header_generation(buf) > trans->transid); 351 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 352 ret = btrfs_inc_ref(trans, root, cow, 1); 353 else 354 ret = btrfs_inc_ref(trans, root, cow, 0); 355 if (ret) { 356 btrfs_tree_unlock(cow); 357 free_extent_buffer(cow); 358 btrfs_abort_transaction(trans, ret); 359 return ret; 360 } 361 362 btrfs_mark_buffer_dirty(cow); 363 *cow_ret = cow; 364 return 0; 365 } 366 367 /* 368 * check if the tree block can be shared by multiple trees 369 */ 370 int btrfs_block_can_be_shared(struct btrfs_root *root, 371 struct extent_buffer *buf) 372 { 373 /* 374 * Tree blocks not in shareable trees and tree roots are never shared. 375 * If a block was allocated after the last snapshot and the block was 376 * not allocated by tree relocation, we know the block is not shared. 377 */ 378 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 379 buf != root->node && buf != root->commit_root && 380 (btrfs_header_generation(buf) <= 381 btrfs_root_last_snapshot(&root->root_item) || 382 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 383 return 1; 384 385 return 0; 386 } 387 388 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 389 struct btrfs_root *root, 390 struct extent_buffer *buf, 391 struct extent_buffer *cow, 392 int *last_ref) 393 { 394 struct btrfs_fs_info *fs_info = root->fs_info; 395 u64 refs; 396 u64 owner; 397 u64 flags; 398 u64 new_flags = 0; 399 int ret; 400 401 /* 402 * Backrefs update rules: 403 * 404 * Always use full backrefs for extent pointers in tree block 405 * allocated by tree relocation. 406 * 407 * If a shared tree block is no longer referenced by its owner 408 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 409 * use full backrefs for extent pointers in tree block. 410 * 411 * If a tree block is been relocating 412 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 413 * use full backrefs for extent pointers in tree block. 414 * The reason for this is some operations (such as drop tree) 415 * are only allowed for blocks use full backrefs. 416 */ 417 418 if (btrfs_block_can_be_shared(root, buf)) { 419 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 420 btrfs_header_level(buf), 1, 421 &refs, &flags); 422 if (ret) 423 return ret; 424 if (refs == 0) { 425 ret = -EROFS; 426 btrfs_handle_fs_error(fs_info, ret, NULL); 427 return ret; 428 } 429 } else { 430 refs = 1; 431 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 432 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 433 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 434 else 435 flags = 0; 436 } 437 438 owner = btrfs_header_owner(buf); 439 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 440 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 441 442 if (refs > 1) { 443 if ((owner == root->root_key.objectid || 444 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 445 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 446 ret = btrfs_inc_ref(trans, root, buf, 1); 447 if (ret) 448 return ret; 449 450 if (root->root_key.objectid == 451 BTRFS_TREE_RELOC_OBJECTID) { 452 ret = btrfs_dec_ref(trans, root, buf, 0); 453 if (ret) 454 return ret; 455 ret = btrfs_inc_ref(trans, root, cow, 1); 456 if (ret) 457 return ret; 458 } 459 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 460 } else { 461 462 if (root->root_key.objectid == 463 BTRFS_TREE_RELOC_OBJECTID) 464 ret = btrfs_inc_ref(trans, root, cow, 1); 465 else 466 ret = btrfs_inc_ref(trans, root, cow, 0); 467 if (ret) 468 return ret; 469 } 470 if (new_flags != 0) { 471 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags); 472 if (ret) 473 return ret; 474 } 475 } else { 476 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 477 if (root->root_key.objectid == 478 BTRFS_TREE_RELOC_OBJECTID) 479 ret = btrfs_inc_ref(trans, root, cow, 1); 480 else 481 ret = btrfs_inc_ref(trans, root, cow, 0); 482 if (ret) 483 return ret; 484 ret = btrfs_dec_ref(trans, root, buf, 1); 485 if (ret) 486 return ret; 487 } 488 btrfs_clear_buffer_dirty(trans, buf); 489 *last_ref = 1; 490 } 491 return 0; 492 } 493 494 /* 495 * does the dirty work in cow of a single block. The parent block (if 496 * supplied) is updated to point to the new cow copy. The new buffer is marked 497 * dirty and returned locked. If you modify the block it needs to be marked 498 * dirty again. 499 * 500 * search_start -- an allocation hint for the new block 501 * 502 * empty_size -- a hint that you plan on doing more cow. This is the size in 503 * bytes the allocator should try to find free next to the block it returns. 504 * This is just a hint and may be ignored by the allocator. 505 */ 506 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 507 struct btrfs_root *root, 508 struct extent_buffer *buf, 509 struct extent_buffer *parent, int parent_slot, 510 struct extent_buffer **cow_ret, 511 u64 search_start, u64 empty_size, 512 enum btrfs_lock_nesting nest) 513 { 514 struct btrfs_fs_info *fs_info = root->fs_info; 515 struct btrfs_disk_key disk_key; 516 struct extent_buffer *cow; 517 int level, ret; 518 int last_ref = 0; 519 int unlock_orig = 0; 520 u64 parent_start = 0; 521 522 if (*cow_ret == buf) 523 unlock_orig = 1; 524 525 btrfs_assert_tree_write_locked(buf); 526 527 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 528 trans->transid != fs_info->running_transaction->transid); 529 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 530 trans->transid != root->last_trans); 531 532 level = btrfs_header_level(buf); 533 534 if (level == 0) 535 btrfs_item_key(buf, &disk_key, 0); 536 else 537 btrfs_node_key(buf, &disk_key, 0); 538 539 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 540 parent_start = parent->start; 541 542 cow = btrfs_alloc_tree_block(trans, root, parent_start, 543 root->root_key.objectid, &disk_key, level, 544 search_start, empty_size, nest); 545 if (IS_ERR(cow)) 546 return PTR_ERR(cow); 547 548 /* cow is set to blocking by btrfs_init_new_buffer */ 549 550 copy_extent_buffer_full(cow, buf); 551 btrfs_set_header_bytenr(cow, cow->start); 552 btrfs_set_header_generation(cow, trans->transid); 553 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 554 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 555 BTRFS_HEADER_FLAG_RELOC); 556 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 557 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 558 else 559 btrfs_set_header_owner(cow, root->root_key.objectid); 560 561 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 562 563 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 564 if (ret) { 565 btrfs_tree_unlock(cow); 566 free_extent_buffer(cow); 567 btrfs_abort_transaction(trans, ret); 568 return ret; 569 } 570 571 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 572 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 573 if (ret) { 574 btrfs_tree_unlock(cow); 575 free_extent_buffer(cow); 576 btrfs_abort_transaction(trans, ret); 577 return ret; 578 } 579 } 580 581 if (buf == root->node) { 582 WARN_ON(parent && parent != buf); 583 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 584 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 585 parent_start = buf->start; 586 587 atomic_inc(&cow->refs); 588 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 589 BUG_ON(ret < 0); 590 rcu_assign_pointer(root->node, cow); 591 592 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 593 parent_start, last_ref); 594 free_extent_buffer(buf); 595 add_root_to_dirty_list(root); 596 } else { 597 WARN_ON(trans->transid != btrfs_header_generation(parent)); 598 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 599 BTRFS_MOD_LOG_KEY_REPLACE); 600 if (ret) { 601 btrfs_tree_unlock(cow); 602 free_extent_buffer(cow); 603 btrfs_abort_transaction(trans, ret); 604 return ret; 605 } 606 btrfs_set_node_blockptr(parent, parent_slot, 607 cow->start); 608 btrfs_set_node_ptr_generation(parent, parent_slot, 609 trans->transid); 610 btrfs_mark_buffer_dirty(parent); 611 if (last_ref) { 612 ret = btrfs_tree_mod_log_free_eb(buf); 613 if (ret) { 614 btrfs_tree_unlock(cow); 615 free_extent_buffer(cow); 616 btrfs_abort_transaction(trans, ret); 617 return ret; 618 } 619 } 620 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 621 parent_start, last_ref); 622 } 623 if (unlock_orig) 624 btrfs_tree_unlock(buf); 625 free_extent_buffer_stale(buf); 626 btrfs_mark_buffer_dirty(cow); 627 *cow_ret = cow; 628 return 0; 629 } 630 631 static inline int should_cow_block(struct btrfs_trans_handle *trans, 632 struct btrfs_root *root, 633 struct extent_buffer *buf) 634 { 635 if (btrfs_is_testing(root->fs_info)) 636 return 0; 637 638 /* Ensure we can see the FORCE_COW bit */ 639 smp_mb__before_atomic(); 640 641 /* 642 * We do not need to cow a block if 643 * 1) this block is not created or changed in this transaction; 644 * 2) this block does not belong to TREE_RELOC tree; 645 * 3) the root is not forced COW. 646 * 647 * What is forced COW: 648 * when we create snapshot during committing the transaction, 649 * after we've finished copying src root, we must COW the shared 650 * block to ensure the metadata consistency. 651 */ 652 if (btrfs_header_generation(buf) == trans->transid && 653 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 654 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 655 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 656 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 657 return 0; 658 return 1; 659 } 660 661 /* 662 * cows a single block, see __btrfs_cow_block for the real work. 663 * This version of it has extra checks so that a block isn't COWed more than 664 * once per transaction, as long as it hasn't been written yet 665 */ 666 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 667 struct btrfs_root *root, struct extent_buffer *buf, 668 struct extent_buffer *parent, int parent_slot, 669 struct extent_buffer **cow_ret, 670 enum btrfs_lock_nesting nest) 671 { 672 struct btrfs_fs_info *fs_info = root->fs_info; 673 u64 search_start; 674 int ret; 675 676 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 677 btrfs_err(fs_info, 678 "COW'ing blocks on a fs root that's being dropped"); 679 680 if (trans->transaction != fs_info->running_transaction) 681 WARN(1, KERN_CRIT "trans %llu running %llu\n", 682 trans->transid, 683 fs_info->running_transaction->transid); 684 685 if (trans->transid != fs_info->generation) 686 WARN(1, KERN_CRIT "trans %llu running %llu\n", 687 trans->transid, fs_info->generation); 688 689 if (!should_cow_block(trans, root, buf)) { 690 *cow_ret = buf; 691 return 0; 692 } 693 694 search_start = buf->start & ~((u64)SZ_1G - 1); 695 696 /* 697 * Before CoWing this block for later modification, check if it's 698 * the subtree root and do the delayed subtree trace if needed. 699 * 700 * Also We don't care about the error, as it's handled internally. 701 */ 702 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 703 ret = __btrfs_cow_block(trans, root, buf, parent, 704 parent_slot, cow_ret, search_start, 0, nest); 705 706 trace_btrfs_cow_block(root, buf, *cow_ret); 707 708 return ret; 709 } 710 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 711 712 /* 713 * helper function for defrag to decide if two blocks pointed to by a 714 * node are actually close by 715 */ 716 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 717 { 718 if (blocknr < other && other - (blocknr + blocksize) < 32768) 719 return 1; 720 if (blocknr > other && blocknr - (other + blocksize) < 32768) 721 return 1; 722 return 0; 723 } 724 725 #ifdef __LITTLE_ENDIAN 726 727 /* 728 * Compare two keys, on little-endian the disk order is same as CPU order and 729 * we can avoid the conversion. 730 */ 731 static int comp_keys(const struct btrfs_disk_key *disk_key, 732 const struct btrfs_key *k2) 733 { 734 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 735 736 return btrfs_comp_cpu_keys(k1, k2); 737 } 738 739 #else 740 741 /* 742 * compare two keys in a memcmp fashion 743 */ 744 static int comp_keys(const struct btrfs_disk_key *disk, 745 const struct btrfs_key *k2) 746 { 747 struct btrfs_key k1; 748 749 btrfs_disk_key_to_cpu(&k1, disk); 750 751 return btrfs_comp_cpu_keys(&k1, k2); 752 } 753 #endif 754 755 /* 756 * same as comp_keys only with two btrfs_key's 757 */ 758 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 759 { 760 if (k1->objectid > k2->objectid) 761 return 1; 762 if (k1->objectid < k2->objectid) 763 return -1; 764 if (k1->type > k2->type) 765 return 1; 766 if (k1->type < k2->type) 767 return -1; 768 if (k1->offset > k2->offset) 769 return 1; 770 if (k1->offset < k2->offset) 771 return -1; 772 return 0; 773 } 774 775 /* 776 * this is used by the defrag code to go through all the 777 * leaves pointed to by a node and reallocate them so that 778 * disk order is close to key order 779 */ 780 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 781 struct btrfs_root *root, struct extent_buffer *parent, 782 int start_slot, u64 *last_ret, 783 struct btrfs_key *progress) 784 { 785 struct btrfs_fs_info *fs_info = root->fs_info; 786 struct extent_buffer *cur; 787 u64 blocknr; 788 u64 search_start = *last_ret; 789 u64 last_block = 0; 790 u64 other; 791 u32 parent_nritems; 792 int end_slot; 793 int i; 794 int err = 0; 795 u32 blocksize; 796 int progress_passed = 0; 797 struct btrfs_disk_key disk_key; 798 799 WARN_ON(trans->transaction != fs_info->running_transaction); 800 WARN_ON(trans->transid != fs_info->generation); 801 802 parent_nritems = btrfs_header_nritems(parent); 803 blocksize = fs_info->nodesize; 804 end_slot = parent_nritems - 1; 805 806 if (parent_nritems <= 1) 807 return 0; 808 809 for (i = start_slot; i <= end_slot; i++) { 810 int close = 1; 811 812 btrfs_node_key(parent, &disk_key, i); 813 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 814 continue; 815 816 progress_passed = 1; 817 blocknr = btrfs_node_blockptr(parent, i); 818 if (last_block == 0) 819 last_block = blocknr; 820 821 if (i > 0) { 822 other = btrfs_node_blockptr(parent, i - 1); 823 close = close_blocks(blocknr, other, blocksize); 824 } 825 if (!close && i < end_slot) { 826 other = btrfs_node_blockptr(parent, i + 1); 827 close = close_blocks(blocknr, other, blocksize); 828 } 829 if (close) { 830 last_block = blocknr; 831 continue; 832 } 833 834 cur = btrfs_read_node_slot(parent, i); 835 if (IS_ERR(cur)) 836 return PTR_ERR(cur); 837 if (search_start == 0) 838 search_start = last_block; 839 840 btrfs_tree_lock(cur); 841 err = __btrfs_cow_block(trans, root, cur, parent, i, 842 &cur, search_start, 843 min(16 * blocksize, 844 (end_slot - i) * blocksize), 845 BTRFS_NESTING_COW); 846 if (err) { 847 btrfs_tree_unlock(cur); 848 free_extent_buffer(cur); 849 break; 850 } 851 search_start = cur->start; 852 last_block = cur->start; 853 *last_ret = search_start; 854 btrfs_tree_unlock(cur); 855 free_extent_buffer(cur); 856 } 857 return err; 858 } 859 860 /* 861 * Search for a key in the given extent_buffer. 862 * 863 * The lower boundary for the search is specified by the slot number @first_slot. 864 * Use a value of 0 to search over the whole extent buffer. Works for both 865 * leaves and nodes. 866 * 867 * The slot in the extent buffer is returned via @slot. If the key exists in the 868 * extent buffer, then @slot will point to the slot where the key is, otherwise 869 * it points to the slot where you would insert the key. 870 * 871 * Slot may point to the total number of items (i.e. one position beyond the last 872 * key) if the key is bigger than the last key in the extent buffer. 873 */ 874 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 875 const struct btrfs_key *key, int *slot) 876 { 877 unsigned long p; 878 int item_size; 879 /* 880 * Use unsigned types for the low and high slots, so that we get a more 881 * efficient division in the search loop below. 882 */ 883 u32 low = first_slot; 884 u32 high = btrfs_header_nritems(eb); 885 int ret; 886 const int key_size = sizeof(struct btrfs_disk_key); 887 888 if (unlikely(low > high)) { 889 btrfs_err(eb->fs_info, 890 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 891 __func__, low, high, eb->start, 892 btrfs_header_owner(eb), btrfs_header_level(eb)); 893 return -EINVAL; 894 } 895 896 if (btrfs_header_level(eb) == 0) { 897 p = offsetof(struct btrfs_leaf, items); 898 item_size = sizeof(struct btrfs_item); 899 } else { 900 p = offsetof(struct btrfs_node, ptrs); 901 item_size = sizeof(struct btrfs_key_ptr); 902 } 903 904 while (low < high) { 905 unsigned long oip; 906 unsigned long offset; 907 struct btrfs_disk_key *tmp; 908 struct btrfs_disk_key unaligned; 909 int mid; 910 911 mid = (low + high) / 2; 912 offset = p + mid * item_size; 913 oip = offset_in_page(offset); 914 915 if (oip + key_size <= PAGE_SIZE) { 916 const unsigned long idx = get_eb_page_index(offset); 917 char *kaddr = page_address(eb->pages[idx]); 918 919 oip = get_eb_offset_in_page(eb, offset); 920 tmp = (struct btrfs_disk_key *)(kaddr + oip); 921 } else { 922 read_extent_buffer(eb, &unaligned, offset, key_size); 923 tmp = &unaligned; 924 } 925 926 ret = comp_keys(tmp, key); 927 928 if (ret < 0) 929 low = mid + 1; 930 else if (ret > 0) 931 high = mid; 932 else { 933 *slot = mid; 934 return 0; 935 } 936 } 937 *slot = low; 938 return 1; 939 } 940 941 static void root_add_used(struct btrfs_root *root, u32 size) 942 { 943 spin_lock(&root->accounting_lock); 944 btrfs_set_root_used(&root->root_item, 945 btrfs_root_used(&root->root_item) + size); 946 spin_unlock(&root->accounting_lock); 947 } 948 949 static void root_sub_used(struct btrfs_root *root, u32 size) 950 { 951 spin_lock(&root->accounting_lock); 952 btrfs_set_root_used(&root->root_item, 953 btrfs_root_used(&root->root_item) - size); 954 spin_unlock(&root->accounting_lock); 955 } 956 957 /* given a node and slot number, this reads the blocks it points to. The 958 * extent buffer is returned with a reference taken (but unlocked). 959 */ 960 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 961 int slot) 962 { 963 int level = btrfs_header_level(parent); 964 struct btrfs_tree_parent_check check = { 0 }; 965 struct extent_buffer *eb; 966 967 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 968 return ERR_PTR(-ENOENT); 969 970 ASSERT(level); 971 972 check.level = level - 1; 973 check.transid = btrfs_node_ptr_generation(parent, slot); 974 check.owner_root = btrfs_header_owner(parent); 975 check.has_first_key = true; 976 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 977 978 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 979 &check); 980 if (IS_ERR(eb)) 981 return eb; 982 if (!extent_buffer_uptodate(eb)) { 983 free_extent_buffer(eb); 984 return ERR_PTR(-EIO); 985 } 986 987 return eb; 988 } 989 990 /* 991 * node level balancing, used to make sure nodes are in proper order for 992 * item deletion. We balance from the top down, so we have to make sure 993 * that a deletion won't leave an node completely empty later on. 994 */ 995 static noinline int balance_level(struct btrfs_trans_handle *trans, 996 struct btrfs_root *root, 997 struct btrfs_path *path, int level) 998 { 999 struct btrfs_fs_info *fs_info = root->fs_info; 1000 struct extent_buffer *right = NULL; 1001 struct extent_buffer *mid; 1002 struct extent_buffer *left = NULL; 1003 struct extent_buffer *parent = NULL; 1004 int ret = 0; 1005 int wret; 1006 int pslot; 1007 int orig_slot = path->slots[level]; 1008 u64 orig_ptr; 1009 1010 ASSERT(level > 0); 1011 1012 mid = path->nodes[level]; 1013 1014 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1015 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1016 1017 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1018 1019 if (level < BTRFS_MAX_LEVEL - 1) { 1020 parent = path->nodes[level + 1]; 1021 pslot = path->slots[level + 1]; 1022 } 1023 1024 /* 1025 * deal with the case where there is only one pointer in the root 1026 * by promoting the node below to a root 1027 */ 1028 if (!parent) { 1029 struct extent_buffer *child; 1030 1031 if (btrfs_header_nritems(mid) != 1) 1032 return 0; 1033 1034 /* promote the child to a root */ 1035 child = btrfs_read_node_slot(mid, 0); 1036 if (IS_ERR(child)) { 1037 ret = PTR_ERR(child); 1038 btrfs_handle_fs_error(fs_info, ret, NULL); 1039 goto enospc; 1040 } 1041 1042 btrfs_tree_lock(child); 1043 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1044 BTRFS_NESTING_COW); 1045 if (ret) { 1046 btrfs_tree_unlock(child); 1047 free_extent_buffer(child); 1048 goto enospc; 1049 } 1050 1051 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 1052 BUG_ON(ret < 0); 1053 rcu_assign_pointer(root->node, child); 1054 1055 add_root_to_dirty_list(root); 1056 btrfs_tree_unlock(child); 1057 1058 path->locks[level] = 0; 1059 path->nodes[level] = NULL; 1060 btrfs_clear_buffer_dirty(trans, mid); 1061 btrfs_tree_unlock(mid); 1062 /* once for the path */ 1063 free_extent_buffer(mid); 1064 1065 root_sub_used(root, mid->len); 1066 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1067 /* once for the root ptr */ 1068 free_extent_buffer_stale(mid); 1069 return 0; 1070 } 1071 if (btrfs_header_nritems(mid) > 1072 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1073 return 0; 1074 1075 if (pslot) { 1076 left = btrfs_read_node_slot(parent, pslot - 1); 1077 if (IS_ERR(left)) { 1078 ret = PTR_ERR(left); 1079 left = NULL; 1080 goto enospc; 1081 } 1082 1083 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1084 wret = btrfs_cow_block(trans, root, left, 1085 parent, pslot - 1, &left, 1086 BTRFS_NESTING_LEFT_COW); 1087 if (wret) { 1088 ret = wret; 1089 goto enospc; 1090 } 1091 } 1092 1093 if (pslot + 1 < btrfs_header_nritems(parent)) { 1094 right = btrfs_read_node_slot(parent, pslot + 1); 1095 if (IS_ERR(right)) { 1096 ret = PTR_ERR(right); 1097 right = NULL; 1098 goto enospc; 1099 } 1100 1101 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1102 wret = btrfs_cow_block(trans, root, right, 1103 parent, pslot + 1, &right, 1104 BTRFS_NESTING_RIGHT_COW); 1105 if (wret) { 1106 ret = wret; 1107 goto enospc; 1108 } 1109 } 1110 1111 /* first, try to make some room in the middle buffer */ 1112 if (left) { 1113 orig_slot += btrfs_header_nritems(left); 1114 wret = push_node_left(trans, left, mid, 1); 1115 if (wret < 0) 1116 ret = wret; 1117 } 1118 1119 /* 1120 * then try to empty the right most buffer into the middle 1121 */ 1122 if (right) { 1123 wret = push_node_left(trans, mid, right, 1); 1124 if (wret < 0 && wret != -ENOSPC) 1125 ret = wret; 1126 if (btrfs_header_nritems(right) == 0) { 1127 btrfs_clear_buffer_dirty(trans, right); 1128 btrfs_tree_unlock(right); 1129 btrfs_del_ptr(root, path, level + 1, pslot + 1); 1130 root_sub_used(root, right->len); 1131 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1132 0, 1); 1133 free_extent_buffer_stale(right); 1134 right = NULL; 1135 } else { 1136 struct btrfs_disk_key right_key; 1137 btrfs_node_key(right, &right_key, 0); 1138 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1139 BTRFS_MOD_LOG_KEY_REPLACE); 1140 BUG_ON(ret < 0); 1141 btrfs_set_node_key(parent, &right_key, pslot + 1); 1142 btrfs_mark_buffer_dirty(parent); 1143 } 1144 } 1145 if (btrfs_header_nritems(mid) == 1) { 1146 /* 1147 * we're not allowed to leave a node with one item in the 1148 * tree during a delete. A deletion from lower in the tree 1149 * could try to delete the only pointer in this node. 1150 * So, pull some keys from the left. 1151 * There has to be a left pointer at this point because 1152 * otherwise we would have pulled some pointers from the 1153 * right 1154 */ 1155 if (!left) { 1156 ret = -EROFS; 1157 btrfs_handle_fs_error(fs_info, ret, NULL); 1158 goto enospc; 1159 } 1160 wret = balance_node_right(trans, mid, left); 1161 if (wret < 0) { 1162 ret = wret; 1163 goto enospc; 1164 } 1165 if (wret == 1) { 1166 wret = push_node_left(trans, left, mid, 1); 1167 if (wret < 0) 1168 ret = wret; 1169 } 1170 BUG_ON(wret == 1); 1171 } 1172 if (btrfs_header_nritems(mid) == 0) { 1173 btrfs_clear_buffer_dirty(trans, mid); 1174 btrfs_tree_unlock(mid); 1175 btrfs_del_ptr(root, path, level + 1, pslot); 1176 root_sub_used(root, mid->len); 1177 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1178 free_extent_buffer_stale(mid); 1179 mid = NULL; 1180 } else { 1181 /* update the parent key to reflect our changes */ 1182 struct btrfs_disk_key mid_key; 1183 btrfs_node_key(mid, &mid_key, 0); 1184 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1185 BTRFS_MOD_LOG_KEY_REPLACE); 1186 BUG_ON(ret < 0); 1187 btrfs_set_node_key(parent, &mid_key, pslot); 1188 btrfs_mark_buffer_dirty(parent); 1189 } 1190 1191 /* update the path */ 1192 if (left) { 1193 if (btrfs_header_nritems(left) > orig_slot) { 1194 atomic_inc(&left->refs); 1195 /* left was locked after cow */ 1196 path->nodes[level] = left; 1197 path->slots[level + 1] -= 1; 1198 path->slots[level] = orig_slot; 1199 if (mid) { 1200 btrfs_tree_unlock(mid); 1201 free_extent_buffer(mid); 1202 } 1203 } else { 1204 orig_slot -= btrfs_header_nritems(left); 1205 path->slots[level] = orig_slot; 1206 } 1207 } 1208 /* double check we haven't messed things up */ 1209 if (orig_ptr != 1210 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1211 BUG(); 1212 enospc: 1213 if (right) { 1214 btrfs_tree_unlock(right); 1215 free_extent_buffer(right); 1216 } 1217 if (left) { 1218 if (path->nodes[level] != left) 1219 btrfs_tree_unlock(left); 1220 free_extent_buffer(left); 1221 } 1222 return ret; 1223 } 1224 1225 /* Node balancing for insertion. Here we only split or push nodes around 1226 * when they are completely full. This is also done top down, so we 1227 * have to be pessimistic. 1228 */ 1229 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1230 struct btrfs_root *root, 1231 struct btrfs_path *path, int level) 1232 { 1233 struct btrfs_fs_info *fs_info = root->fs_info; 1234 struct extent_buffer *right = NULL; 1235 struct extent_buffer *mid; 1236 struct extent_buffer *left = NULL; 1237 struct extent_buffer *parent = NULL; 1238 int ret = 0; 1239 int wret; 1240 int pslot; 1241 int orig_slot = path->slots[level]; 1242 1243 if (level == 0) 1244 return 1; 1245 1246 mid = path->nodes[level]; 1247 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1248 1249 if (level < BTRFS_MAX_LEVEL - 1) { 1250 parent = path->nodes[level + 1]; 1251 pslot = path->slots[level + 1]; 1252 } 1253 1254 if (!parent) 1255 return 1; 1256 1257 /* first, try to make some room in the middle buffer */ 1258 if (pslot) { 1259 u32 left_nr; 1260 1261 left = btrfs_read_node_slot(parent, pslot - 1); 1262 if (IS_ERR(left)) 1263 return PTR_ERR(left); 1264 1265 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1266 1267 left_nr = btrfs_header_nritems(left); 1268 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1269 wret = 1; 1270 } else { 1271 ret = btrfs_cow_block(trans, root, left, parent, 1272 pslot - 1, &left, 1273 BTRFS_NESTING_LEFT_COW); 1274 if (ret) 1275 wret = 1; 1276 else { 1277 wret = push_node_left(trans, left, mid, 0); 1278 } 1279 } 1280 if (wret < 0) 1281 ret = wret; 1282 if (wret == 0) { 1283 struct btrfs_disk_key disk_key; 1284 orig_slot += left_nr; 1285 btrfs_node_key(mid, &disk_key, 0); 1286 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1287 BTRFS_MOD_LOG_KEY_REPLACE); 1288 BUG_ON(ret < 0); 1289 btrfs_set_node_key(parent, &disk_key, pslot); 1290 btrfs_mark_buffer_dirty(parent); 1291 if (btrfs_header_nritems(left) > orig_slot) { 1292 path->nodes[level] = left; 1293 path->slots[level + 1] -= 1; 1294 path->slots[level] = orig_slot; 1295 btrfs_tree_unlock(mid); 1296 free_extent_buffer(mid); 1297 } else { 1298 orig_slot -= 1299 btrfs_header_nritems(left); 1300 path->slots[level] = orig_slot; 1301 btrfs_tree_unlock(left); 1302 free_extent_buffer(left); 1303 } 1304 return 0; 1305 } 1306 btrfs_tree_unlock(left); 1307 free_extent_buffer(left); 1308 } 1309 1310 /* 1311 * then try to empty the right most buffer into the middle 1312 */ 1313 if (pslot + 1 < btrfs_header_nritems(parent)) { 1314 u32 right_nr; 1315 1316 right = btrfs_read_node_slot(parent, pslot + 1); 1317 if (IS_ERR(right)) 1318 return PTR_ERR(right); 1319 1320 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1321 1322 right_nr = btrfs_header_nritems(right); 1323 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1324 wret = 1; 1325 } else { 1326 ret = btrfs_cow_block(trans, root, right, 1327 parent, pslot + 1, 1328 &right, BTRFS_NESTING_RIGHT_COW); 1329 if (ret) 1330 wret = 1; 1331 else { 1332 wret = balance_node_right(trans, right, mid); 1333 } 1334 } 1335 if (wret < 0) 1336 ret = wret; 1337 if (wret == 0) { 1338 struct btrfs_disk_key disk_key; 1339 1340 btrfs_node_key(right, &disk_key, 0); 1341 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1342 BTRFS_MOD_LOG_KEY_REPLACE); 1343 BUG_ON(ret < 0); 1344 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1345 btrfs_mark_buffer_dirty(parent); 1346 1347 if (btrfs_header_nritems(mid) <= orig_slot) { 1348 path->nodes[level] = right; 1349 path->slots[level + 1] += 1; 1350 path->slots[level] = orig_slot - 1351 btrfs_header_nritems(mid); 1352 btrfs_tree_unlock(mid); 1353 free_extent_buffer(mid); 1354 } else { 1355 btrfs_tree_unlock(right); 1356 free_extent_buffer(right); 1357 } 1358 return 0; 1359 } 1360 btrfs_tree_unlock(right); 1361 free_extent_buffer(right); 1362 } 1363 return 1; 1364 } 1365 1366 /* 1367 * readahead one full node of leaves, finding things that are close 1368 * to the block in 'slot', and triggering ra on them. 1369 */ 1370 static void reada_for_search(struct btrfs_fs_info *fs_info, 1371 struct btrfs_path *path, 1372 int level, int slot, u64 objectid) 1373 { 1374 struct extent_buffer *node; 1375 struct btrfs_disk_key disk_key; 1376 u32 nritems; 1377 u64 search; 1378 u64 target; 1379 u64 nread = 0; 1380 u64 nread_max; 1381 u32 nr; 1382 u32 blocksize; 1383 u32 nscan = 0; 1384 1385 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1386 return; 1387 1388 if (!path->nodes[level]) 1389 return; 1390 1391 node = path->nodes[level]; 1392 1393 /* 1394 * Since the time between visiting leaves is much shorter than the time 1395 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1396 * much IO at once (possibly random). 1397 */ 1398 if (path->reada == READA_FORWARD_ALWAYS) { 1399 if (level > 1) 1400 nread_max = node->fs_info->nodesize; 1401 else 1402 nread_max = SZ_128K; 1403 } else { 1404 nread_max = SZ_64K; 1405 } 1406 1407 search = btrfs_node_blockptr(node, slot); 1408 blocksize = fs_info->nodesize; 1409 if (path->reada != READA_FORWARD_ALWAYS) { 1410 struct extent_buffer *eb; 1411 1412 eb = find_extent_buffer(fs_info, search); 1413 if (eb) { 1414 free_extent_buffer(eb); 1415 return; 1416 } 1417 } 1418 1419 target = search; 1420 1421 nritems = btrfs_header_nritems(node); 1422 nr = slot; 1423 1424 while (1) { 1425 if (path->reada == READA_BACK) { 1426 if (nr == 0) 1427 break; 1428 nr--; 1429 } else if (path->reada == READA_FORWARD || 1430 path->reada == READA_FORWARD_ALWAYS) { 1431 nr++; 1432 if (nr >= nritems) 1433 break; 1434 } 1435 if (path->reada == READA_BACK && objectid) { 1436 btrfs_node_key(node, &disk_key, nr); 1437 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1438 break; 1439 } 1440 search = btrfs_node_blockptr(node, nr); 1441 if (path->reada == READA_FORWARD_ALWAYS || 1442 (search <= target && target - search <= 65536) || 1443 (search > target && search - target <= 65536)) { 1444 btrfs_readahead_node_child(node, nr); 1445 nread += blocksize; 1446 } 1447 nscan++; 1448 if (nread > nread_max || nscan > 32) 1449 break; 1450 } 1451 } 1452 1453 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1454 { 1455 struct extent_buffer *parent; 1456 int slot; 1457 int nritems; 1458 1459 parent = path->nodes[level + 1]; 1460 if (!parent) 1461 return; 1462 1463 nritems = btrfs_header_nritems(parent); 1464 slot = path->slots[level + 1]; 1465 1466 if (slot > 0) 1467 btrfs_readahead_node_child(parent, slot - 1); 1468 if (slot + 1 < nritems) 1469 btrfs_readahead_node_child(parent, slot + 1); 1470 } 1471 1472 1473 /* 1474 * when we walk down the tree, it is usually safe to unlock the higher layers 1475 * in the tree. The exceptions are when our path goes through slot 0, because 1476 * operations on the tree might require changing key pointers higher up in the 1477 * tree. 1478 * 1479 * callers might also have set path->keep_locks, which tells this code to keep 1480 * the lock if the path points to the last slot in the block. This is part of 1481 * walking through the tree, and selecting the next slot in the higher block. 1482 * 1483 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1484 * if lowest_unlock is 1, level 0 won't be unlocked 1485 */ 1486 static noinline void unlock_up(struct btrfs_path *path, int level, 1487 int lowest_unlock, int min_write_lock_level, 1488 int *write_lock_level) 1489 { 1490 int i; 1491 int skip_level = level; 1492 bool check_skip = true; 1493 1494 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1495 if (!path->nodes[i]) 1496 break; 1497 if (!path->locks[i]) 1498 break; 1499 1500 if (check_skip) { 1501 if (path->slots[i] == 0) { 1502 skip_level = i + 1; 1503 continue; 1504 } 1505 1506 if (path->keep_locks) { 1507 u32 nritems; 1508 1509 nritems = btrfs_header_nritems(path->nodes[i]); 1510 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1511 skip_level = i + 1; 1512 continue; 1513 } 1514 } 1515 } 1516 1517 if (i >= lowest_unlock && i > skip_level) { 1518 check_skip = false; 1519 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1520 path->locks[i] = 0; 1521 if (write_lock_level && 1522 i > min_write_lock_level && 1523 i <= *write_lock_level) { 1524 *write_lock_level = i - 1; 1525 } 1526 } 1527 } 1528 } 1529 1530 /* 1531 * Helper function for btrfs_search_slot() and other functions that do a search 1532 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1533 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1534 * its pages from disk. 1535 * 1536 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1537 * whole btree search, starting again from the current root node. 1538 */ 1539 static int 1540 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1541 struct extent_buffer **eb_ret, int level, int slot, 1542 const struct btrfs_key *key) 1543 { 1544 struct btrfs_fs_info *fs_info = root->fs_info; 1545 struct btrfs_tree_parent_check check = { 0 }; 1546 u64 blocknr; 1547 u64 gen; 1548 struct extent_buffer *tmp; 1549 int ret; 1550 int parent_level; 1551 bool unlock_up; 1552 1553 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1554 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1555 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1556 parent_level = btrfs_header_level(*eb_ret); 1557 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1558 check.has_first_key = true; 1559 check.level = parent_level - 1; 1560 check.transid = gen; 1561 check.owner_root = root->root_key.objectid; 1562 1563 /* 1564 * If we need to read an extent buffer from disk and we are holding locks 1565 * on upper level nodes, we unlock all the upper nodes before reading the 1566 * extent buffer, and then return -EAGAIN to the caller as it needs to 1567 * restart the search. We don't release the lock on the current level 1568 * because we need to walk this node to figure out which blocks to read. 1569 */ 1570 tmp = find_extent_buffer(fs_info, blocknr); 1571 if (tmp) { 1572 if (p->reada == READA_FORWARD_ALWAYS) 1573 reada_for_search(fs_info, p, level, slot, key->objectid); 1574 1575 /* first we do an atomic uptodate check */ 1576 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1577 /* 1578 * Do extra check for first_key, eb can be stale due to 1579 * being cached, read from scrub, or have multiple 1580 * parents (shared tree blocks). 1581 */ 1582 if (btrfs_verify_level_key(tmp, 1583 parent_level - 1, &check.first_key, gen)) { 1584 free_extent_buffer(tmp); 1585 return -EUCLEAN; 1586 } 1587 *eb_ret = tmp; 1588 return 0; 1589 } 1590 1591 if (p->nowait) { 1592 free_extent_buffer(tmp); 1593 return -EAGAIN; 1594 } 1595 1596 if (unlock_up) 1597 btrfs_unlock_up_safe(p, level + 1); 1598 1599 /* now we're allowed to do a blocking uptodate check */ 1600 ret = btrfs_read_extent_buffer(tmp, &check); 1601 if (ret) { 1602 free_extent_buffer(tmp); 1603 btrfs_release_path(p); 1604 return -EIO; 1605 } 1606 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1607 free_extent_buffer(tmp); 1608 btrfs_release_path(p); 1609 return -EUCLEAN; 1610 } 1611 1612 if (unlock_up) 1613 ret = -EAGAIN; 1614 1615 goto out; 1616 } else if (p->nowait) { 1617 return -EAGAIN; 1618 } 1619 1620 if (unlock_up) { 1621 btrfs_unlock_up_safe(p, level + 1); 1622 ret = -EAGAIN; 1623 } else { 1624 ret = 0; 1625 } 1626 1627 if (p->reada != READA_NONE) 1628 reada_for_search(fs_info, p, level, slot, key->objectid); 1629 1630 tmp = read_tree_block(fs_info, blocknr, &check); 1631 if (IS_ERR(tmp)) { 1632 btrfs_release_path(p); 1633 return PTR_ERR(tmp); 1634 } 1635 /* 1636 * If the read above didn't mark this buffer up to date, 1637 * it will never end up being up to date. Set ret to EIO now 1638 * and give up so that our caller doesn't loop forever 1639 * on our EAGAINs. 1640 */ 1641 if (!extent_buffer_uptodate(tmp)) 1642 ret = -EIO; 1643 1644 out: 1645 if (ret == 0) { 1646 *eb_ret = tmp; 1647 } else { 1648 free_extent_buffer(tmp); 1649 btrfs_release_path(p); 1650 } 1651 1652 return ret; 1653 } 1654 1655 /* 1656 * helper function for btrfs_search_slot. This does all of the checks 1657 * for node-level blocks and does any balancing required based on 1658 * the ins_len. 1659 * 1660 * If no extra work was required, zero is returned. If we had to 1661 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1662 * start over 1663 */ 1664 static int 1665 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1666 struct btrfs_root *root, struct btrfs_path *p, 1667 struct extent_buffer *b, int level, int ins_len, 1668 int *write_lock_level) 1669 { 1670 struct btrfs_fs_info *fs_info = root->fs_info; 1671 int ret = 0; 1672 1673 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1674 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1675 1676 if (*write_lock_level < level + 1) { 1677 *write_lock_level = level + 1; 1678 btrfs_release_path(p); 1679 return -EAGAIN; 1680 } 1681 1682 reada_for_balance(p, level); 1683 ret = split_node(trans, root, p, level); 1684 1685 b = p->nodes[level]; 1686 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1687 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1688 1689 if (*write_lock_level < level + 1) { 1690 *write_lock_level = level + 1; 1691 btrfs_release_path(p); 1692 return -EAGAIN; 1693 } 1694 1695 reada_for_balance(p, level); 1696 ret = balance_level(trans, root, p, level); 1697 if (ret) 1698 return ret; 1699 1700 b = p->nodes[level]; 1701 if (!b) { 1702 btrfs_release_path(p); 1703 return -EAGAIN; 1704 } 1705 BUG_ON(btrfs_header_nritems(b) == 1); 1706 } 1707 return ret; 1708 } 1709 1710 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1711 u64 iobjectid, u64 ioff, u8 key_type, 1712 struct btrfs_key *found_key) 1713 { 1714 int ret; 1715 struct btrfs_key key; 1716 struct extent_buffer *eb; 1717 1718 ASSERT(path); 1719 ASSERT(found_key); 1720 1721 key.type = key_type; 1722 key.objectid = iobjectid; 1723 key.offset = ioff; 1724 1725 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1726 if (ret < 0) 1727 return ret; 1728 1729 eb = path->nodes[0]; 1730 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1731 ret = btrfs_next_leaf(fs_root, path); 1732 if (ret) 1733 return ret; 1734 eb = path->nodes[0]; 1735 } 1736 1737 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1738 if (found_key->type != key.type || 1739 found_key->objectid != key.objectid) 1740 return 1; 1741 1742 return 0; 1743 } 1744 1745 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1746 struct btrfs_path *p, 1747 int write_lock_level) 1748 { 1749 struct extent_buffer *b; 1750 int root_lock = 0; 1751 int level = 0; 1752 1753 if (p->search_commit_root) { 1754 b = root->commit_root; 1755 atomic_inc(&b->refs); 1756 level = btrfs_header_level(b); 1757 /* 1758 * Ensure that all callers have set skip_locking when 1759 * p->search_commit_root = 1. 1760 */ 1761 ASSERT(p->skip_locking == 1); 1762 1763 goto out; 1764 } 1765 1766 if (p->skip_locking) { 1767 b = btrfs_root_node(root); 1768 level = btrfs_header_level(b); 1769 goto out; 1770 } 1771 1772 /* We try very hard to do read locks on the root */ 1773 root_lock = BTRFS_READ_LOCK; 1774 1775 /* 1776 * If the level is set to maximum, we can skip trying to get the read 1777 * lock. 1778 */ 1779 if (write_lock_level < BTRFS_MAX_LEVEL) { 1780 /* 1781 * We don't know the level of the root node until we actually 1782 * have it read locked 1783 */ 1784 if (p->nowait) { 1785 b = btrfs_try_read_lock_root_node(root); 1786 if (IS_ERR(b)) 1787 return b; 1788 } else { 1789 b = btrfs_read_lock_root_node(root); 1790 } 1791 level = btrfs_header_level(b); 1792 if (level > write_lock_level) 1793 goto out; 1794 1795 /* Whoops, must trade for write lock */ 1796 btrfs_tree_read_unlock(b); 1797 free_extent_buffer(b); 1798 } 1799 1800 b = btrfs_lock_root_node(root); 1801 root_lock = BTRFS_WRITE_LOCK; 1802 1803 /* The level might have changed, check again */ 1804 level = btrfs_header_level(b); 1805 1806 out: 1807 /* 1808 * The root may have failed to write out at some point, and thus is no 1809 * longer valid, return an error in this case. 1810 */ 1811 if (!extent_buffer_uptodate(b)) { 1812 if (root_lock) 1813 btrfs_tree_unlock_rw(b, root_lock); 1814 free_extent_buffer(b); 1815 return ERR_PTR(-EIO); 1816 } 1817 1818 p->nodes[level] = b; 1819 if (!p->skip_locking) 1820 p->locks[level] = root_lock; 1821 /* 1822 * Callers are responsible for dropping b's references. 1823 */ 1824 return b; 1825 } 1826 1827 /* 1828 * Replace the extent buffer at the lowest level of the path with a cloned 1829 * version. The purpose is to be able to use it safely, after releasing the 1830 * commit root semaphore, even if relocation is happening in parallel, the 1831 * transaction used for relocation is committed and the extent buffer is 1832 * reallocated in the next transaction. 1833 * 1834 * This is used in a context where the caller does not prevent transaction 1835 * commits from happening, either by holding a transaction handle or holding 1836 * some lock, while it's doing searches through a commit root. 1837 * At the moment it's only used for send operations. 1838 */ 1839 static int finish_need_commit_sem_search(struct btrfs_path *path) 1840 { 1841 const int i = path->lowest_level; 1842 const int slot = path->slots[i]; 1843 struct extent_buffer *lowest = path->nodes[i]; 1844 struct extent_buffer *clone; 1845 1846 ASSERT(path->need_commit_sem); 1847 1848 if (!lowest) 1849 return 0; 1850 1851 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1852 1853 clone = btrfs_clone_extent_buffer(lowest); 1854 if (!clone) 1855 return -ENOMEM; 1856 1857 btrfs_release_path(path); 1858 path->nodes[i] = clone; 1859 path->slots[i] = slot; 1860 1861 return 0; 1862 } 1863 1864 static inline int search_for_key_slot(struct extent_buffer *eb, 1865 int search_low_slot, 1866 const struct btrfs_key *key, 1867 int prev_cmp, 1868 int *slot) 1869 { 1870 /* 1871 * If a previous call to btrfs_bin_search() on a parent node returned an 1872 * exact match (prev_cmp == 0), we can safely assume the target key will 1873 * always be at slot 0 on lower levels, since each key pointer 1874 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1875 * subtree it points to. Thus we can skip searching lower levels. 1876 */ 1877 if (prev_cmp == 0) { 1878 *slot = 0; 1879 return 0; 1880 } 1881 1882 return btrfs_bin_search(eb, search_low_slot, key, slot); 1883 } 1884 1885 static int search_leaf(struct btrfs_trans_handle *trans, 1886 struct btrfs_root *root, 1887 const struct btrfs_key *key, 1888 struct btrfs_path *path, 1889 int ins_len, 1890 int prev_cmp) 1891 { 1892 struct extent_buffer *leaf = path->nodes[0]; 1893 int leaf_free_space = -1; 1894 int search_low_slot = 0; 1895 int ret; 1896 bool do_bin_search = true; 1897 1898 /* 1899 * If we are doing an insertion, the leaf has enough free space and the 1900 * destination slot for the key is not slot 0, then we can unlock our 1901 * write lock on the parent, and any other upper nodes, before doing the 1902 * binary search on the leaf (with search_for_key_slot()), allowing other 1903 * tasks to lock the parent and any other upper nodes. 1904 */ 1905 if (ins_len > 0) { 1906 /* 1907 * Cache the leaf free space, since we will need it later and it 1908 * will not change until then. 1909 */ 1910 leaf_free_space = btrfs_leaf_free_space(leaf); 1911 1912 /* 1913 * !path->locks[1] means we have a single node tree, the leaf is 1914 * the root of the tree. 1915 */ 1916 if (path->locks[1] && leaf_free_space >= ins_len) { 1917 struct btrfs_disk_key first_key; 1918 1919 ASSERT(btrfs_header_nritems(leaf) > 0); 1920 btrfs_item_key(leaf, &first_key, 0); 1921 1922 /* 1923 * Doing the extra comparison with the first key is cheap, 1924 * taking into account that the first key is very likely 1925 * already in a cache line because it immediately follows 1926 * the extent buffer's header and we have recently accessed 1927 * the header's level field. 1928 */ 1929 ret = comp_keys(&first_key, key); 1930 if (ret < 0) { 1931 /* 1932 * The first key is smaller than the key we want 1933 * to insert, so we are safe to unlock all upper 1934 * nodes and we have to do the binary search. 1935 * 1936 * We do use btrfs_unlock_up_safe() and not 1937 * unlock_up() because the later does not unlock 1938 * nodes with a slot of 0 - we can safely unlock 1939 * any node even if its slot is 0 since in this 1940 * case the key does not end up at slot 0 of the 1941 * leaf and there's no need to split the leaf. 1942 */ 1943 btrfs_unlock_up_safe(path, 1); 1944 search_low_slot = 1; 1945 } else { 1946 /* 1947 * The first key is >= then the key we want to 1948 * insert, so we can skip the binary search as 1949 * the target key will be at slot 0. 1950 * 1951 * We can not unlock upper nodes when the key is 1952 * less than the first key, because we will need 1953 * to update the key at slot 0 of the parent node 1954 * and possibly of other upper nodes too. 1955 * If the key matches the first key, then we can 1956 * unlock all the upper nodes, using 1957 * btrfs_unlock_up_safe() instead of unlock_up() 1958 * as stated above. 1959 */ 1960 if (ret == 0) 1961 btrfs_unlock_up_safe(path, 1); 1962 /* 1963 * ret is already 0 or 1, matching the result of 1964 * a btrfs_bin_search() call, so there is no need 1965 * to adjust it. 1966 */ 1967 do_bin_search = false; 1968 path->slots[0] = 0; 1969 } 1970 } 1971 } 1972 1973 if (do_bin_search) { 1974 ret = search_for_key_slot(leaf, search_low_slot, key, 1975 prev_cmp, &path->slots[0]); 1976 if (ret < 0) 1977 return ret; 1978 } 1979 1980 if (ins_len > 0) { 1981 /* 1982 * Item key already exists. In this case, if we are allowed to 1983 * insert the item (for example, in dir_item case, item key 1984 * collision is allowed), it will be merged with the original 1985 * item. Only the item size grows, no new btrfs item will be 1986 * added. If search_for_extension is not set, ins_len already 1987 * accounts the size btrfs_item, deduct it here so leaf space 1988 * check will be correct. 1989 */ 1990 if (ret == 0 && !path->search_for_extension) { 1991 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1992 ins_len -= sizeof(struct btrfs_item); 1993 } 1994 1995 ASSERT(leaf_free_space >= 0); 1996 1997 if (leaf_free_space < ins_len) { 1998 int err; 1999 2000 err = split_leaf(trans, root, key, path, ins_len, 2001 (ret == 0)); 2002 ASSERT(err <= 0); 2003 if (WARN_ON(err > 0)) 2004 err = -EUCLEAN; 2005 if (err) 2006 ret = err; 2007 } 2008 } 2009 2010 return ret; 2011 } 2012 2013 /* 2014 * btrfs_search_slot - look for a key in a tree and perform necessary 2015 * modifications to preserve tree invariants. 2016 * 2017 * @trans: Handle of transaction, used when modifying the tree 2018 * @p: Holds all btree nodes along the search path 2019 * @root: The root node of the tree 2020 * @key: The key we are looking for 2021 * @ins_len: Indicates purpose of search: 2022 * >0 for inserts it's size of item inserted (*) 2023 * <0 for deletions 2024 * 0 for plain searches, not modifying the tree 2025 * 2026 * (*) If size of item inserted doesn't include 2027 * sizeof(struct btrfs_item), then p->search_for_extension must 2028 * be set. 2029 * @cow: boolean should CoW operations be performed. Must always be 1 2030 * when modifying the tree. 2031 * 2032 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2033 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2034 * 2035 * If @key is found, 0 is returned and you can find the item in the leaf level 2036 * of the path (level 0) 2037 * 2038 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2039 * points to the slot where it should be inserted 2040 * 2041 * If an error is encountered while searching the tree a negative error number 2042 * is returned 2043 */ 2044 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2045 const struct btrfs_key *key, struct btrfs_path *p, 2046 int ins_len, int cow) 2047 { 2048 struct btrfs_fs_info *fs_info = root->fs_info; 2049 struct extent_buffer *b; 2050 int slot; 2051 int ret; 2052 int err; 2053 int level; 2054 int lowest_unlock = 1; 2055 /* everything at write_lock_level or lower must be write locked */ 2056 int write_lock_level = 0; 2057 u8 lowest_level = 0; 2058 int min_write_lock_level; 2059 int prev_cmp; 2060 2061 might_sleep(); 2062 2063 lowest_level = p->lowest_level; 2064 WARN_ON(lowest_level && ins_len > 0); 2065 WARN_ON(p->nodes[0] != NULL); 2066 BUG_ON(!cow && ins_len); 2067 2068 /* 2069 * For now only allow nowait for read only operations. There's no 2070 * strict reason why we can't, we just only need it for reads so it's 2071 * only implemented for reads. 2072 */ 2073 ASSERT(!p->nowait || !cow); 2074 2075 if (ins_len < 0) { 2076 lowest_unlock = 2; 2077 2078 /* when we are removing items, we might have to go up to level 2079 * two as we update tree pointers Make sure we keep write 2080 * for those levels as well 2081 */ 2082 write_lock_level = 2; 2083 } else if (ins_len > 0) { 2084 /* 2085 * for inserting items, make sure we have a write lock on 2086 * level 1 so we can update keys 2087 */ 2088 write_lock_level = 1; 2089 } 2090 2091 if (!cow) 2092 write_lock_level = -1; 2093 2094 if (cow && (p->keep_locks || p->lowest_level)) 2095 write_lock_level = BTRFS_MAX_LEVEL; 2096 2097 min_write_lock_level = write_lock_level; 2098 2099 if (p->need_commit_sem) { 2100 ASSERT(p->search_commit_root); 2101 if (p->nowait) { 2102 if (!down_read_trylock(&fs_info->commit_root_sem)) 2103 return -EAGAIN; 2104 } else { 2105 down_read(&fs_info->commit_root_sem); 2106 } 2107 } 2108 2109 again: 2110 prev_cmp = -1; 2111 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2112 if (IS_ERR(b)) { 2113 ret = PTR_ERR(b); 2114 goto done; 2115 } 2116 2117 while (b) { 2118 int dec = 0; 2119 2120 level = btrfs_header_level(b); 2121 2122 if (cow) { 2123 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2124 2125 /* 2126 * if we don't really need to cow this block 2127 * then we don't want to set the path blocking, 2128 * so we test it here 2129 */ 2130 if (!should_cow_block(trans, root, b)) 2131 goto cow_done; 2132 2133 /* 2134 * must have write locks on this node and the 2135 * parent 2136 */ 2137 if (level > write_lock_level || 2138 (level + 1 > write_lock_level && 2139 level + 1 < BTRFS_MAX_LEVEL && 2140 p->nodes[level + 1])) { 2141 write_lock_level = level + 1; 2142 btrfs_release_path(p); 2143 goto again; 2144 } 2145 2146 if (last_level) 2147 err = btrfs_cow_block(trans, root, b, NULL, 0, 2148 &b, 2149 BTRFS_NESTING_COW); 2150 else 2151 err = btrfs_cow_block(trans, root, b, 2152 p->nodes[level + 1], 2153 p->slots[level + 1], &b, 2154 BTRFS_NESTING_COW); 2155 if (err) { 2156 ret = err; 2157 goto done; 2158 } 2159 } 2160 cow_done: 2161 p->nodes[level] = b; 2162 2163 /* 2164 * we have a lock on b and as long as we aren't changing 2165 * the tree, there is no way to for the items in b to change. 2166 * It is safe to drop the lock on our parent before we 2167 * go through the expensive btree search on b. 2168 * 2169 * If we're inserting or deleting (ins_len != 0), then we might 2170 * be changing slot zero, which may require changing the parent. 2171 * So, we can't drop the lock until after we know which slot 2172 * we're operating on. 2173 */ 2174 if (!ins_len && !p->keep_locks) { 2175 int u = level + 1; 2176 2177 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2178 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2179 p->locks[u] = 0; 2180 } 2181 } 2182 2183 if (level == 0) { 2184 if (ins_len > 0) 2185 ASSERT(write_lock_level >= 1); 2186 2187 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2188 if (!p->search_for_split) 2189 unlock_up(p, level, lowest_unlock, 2190 min_write_lock_level, NULL); 2191 goto done; 2192 } 2193 2194 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2195 if (ret < 0) 2196 goto done; 2197 prev_cmp = ret; 2198 2199 if (ret && slot > 0) { 2200 dec = 1; 2201 slot--; 2202 } 2203 p->slots[level] = slot; 2204 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2205 &write_lock_level); 2206 if (err == -EAGAIN) 2207 goto again; 2208 if (err) { 2209 ret = err; 2210 goto done; 2211 } 2212 b = p->nodes[level]; 2213 slot = p->slots[level]; 2214 2215 /* 2216 * Slot 0 is special, if we change the key we have to update 2217 * the parent pointer which means we must have a write lock on 2218 * the parent 2219 */ 2220 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2221 write_lock_level = level + 1; 2222 btrfs_release_path(p); 2223 goto again; 2224 } 2225 2226 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2227 &write_lock_level); 2228 2229 if (level == lowest_level) { 2230 if (dec) 2231 p->slots[level]++; 2232 goto done; 2233 } 2234 2235 err = read_block_for_search(root, p, &b, level, slot, key); 2236 if (err == -EAGAIN) 2237 goto again; 2238 if (err) { 2239 ret = err; 2240 goto done; 2241 } 2242 2243 if (!p->skip_locking) { 2244 level = btrfs_header_level(b); 2245 2246 btrfs_maybe_reset_lockdep_class(root, b); 2247 2248 if (level <= write_lock_level) { 2249 btrfs_tree_lock(b); 2250 p->locks[level] = BTRFS_WRITE_LOCK; 2251 } else { 2252 if (p->nowait) { 2253 if (!btrfs_try_tree_read_lock(b)) { 2254 free_extent_buffer(b); 2255 ret = -EAGAIN; 2256 goto done; 2257 } 2258 } else { 2259 btrfs_tree_read_lock(b); 2260 } 2261 p->locks[level] = BTRFS_READ_LOCK; 2262 } 2263 p->nodes[level] = b; 2264 } 2265 } 2266 ret = 1; 2267 done: 2268 if (ret < 0 && !p->skip_release_on_error) 2269 btrfs_release_path(p); 2270 2271 if (p->need_commit_sem) { 2272 int ret2; 2273 2274 ret2 = finish_need_commit_sem_search(p); 2275 up_read(&fs_info->commit_root_sem); 2276 if (ret2) 2277 ret = ret2; 2278 } 2279 2280 return ret; 2281 } 2282 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2283 2284 /* 2285 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2286 * current state of the tree together with the operations recorded in the tree 2287 * modification log to search for the key in a previous version of this tree, as 2288 * denoted by the time_seq parameter. 2289 * 2290 * Naturally, there is no support for insert, delete or cow operations. 2291 * 2292 * The resulting path and return value will be set up as if we called 2293 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2294 */ 2295 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2296 struct btrfs_path *p, u64 time_seq) 2297 { 2298 struct btrfs_fs_info *fs_info = root->fs_info; 2299 struct extent_buffer *b; 2300 int slot; 2301 int ret; 2302 int err; 2303 int level; 2304 int lowest_unlock = 1; 2305 u8 lowest_level = 0; 2306 2307 lowest_level = p->lowest_level; 2308 WARN_ON(p->nodes[0] != NULL); 2309 ASSERT(!p->nowait); 2310 2311 if (p->search_commit_root) { 2312 BUG_ON(time_seq); 2313 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2314 } 2315 2316 again: 2317 b = btrfs_get_old_root(root, time_seq); 2318 if (!b) { 2319 ret = -EIO; 2320 goto done; 2321 } 2322 level = btrfs_header_level(b); 2323 p->locks[level] = BTRFS_READ_LOCK; 2324 2325 while (b) { 2326 int dec = 0; 2327 2328 level = btrfs_header_level(b); 2329 p->nodes[level] = b; 2330 2331 /* 2332 * we have a lock on b and as long as we aren't changing 2333 * the tree, there is no way to for the items in b to change. 2334 * It is safe to drop the lock on our parent before we 2335 * go through the expensive btree search on b. 2336 */ 2337 btrfs_unlock_up_safe(p, level + 1); 2338 2339 ret = btrfs_bin_search(b, 0, key, &slot); 2340 if (ret < 0) 2341 goto done; 2342 2343 if (level == 0) { 2344 p->slots[level] = slot; 2345 unlock_up(p, level, lowest_unlock, 0, NULL); 2346 goto done; 2347 } 2348 2349 if (ret && slot > 0) { 2350 dec = 1; 2351 slot--; 2352 } 2353 p->slots[level] = slot; 2354 unlock_up(p, level, lowest_unlock, 0, NULL); 2355 2356 if (level == lowest_level) { 2357 if (dec) 2358 p->slots[level]++; 2359 goto done; 2360 } 2361 2362 err = read_block_for_search(root, p, &b, level, slot, key); 2363 if (err == -EAGAIN) 2364 goto again; 2365 if (err) { 2366 ret = err; 2367 goto done; 2368 } 2369 2370 level = btrfs_header_level(b); 2371 btrfs_tree_read_lock(b); 2372 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2373 if (!b) { 2374 ret = -ENOMEM; 2375 goto done; 2376 } 2377 p->locks[level] = BTRFS_READ_LOCK; 2378 p->nodes[level] = b; 2379 } 2380 ret = 1; 2381 done: 2382 if (ret < 0) 2383 btrfs_release_path(p); 2384 2385 return ret; 2386 } 2387 2388 /* 2389 * Search the tree again to find a leaf with smaller keys. 2390 * Returns 0 if it found something. 2391 * Returns 1 if there are no smaller keys. 2392 * Returns < 0 on error. 2393 * 2394 * This may release the path, and so you may lose any locks held at the 2395 * time you call it. 2396 */ 2397 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2398 { 2399 struct btrfs_key key; 2400 struct btrfs_key orig_key; 2401 struct btrfs_disk_key found_key; 2402 int ret; 2403 2404 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2405 orig_key = key; 2406 2407 if (key.offset > 0) { 2408 key.offset--; 2409 } else if (key.type > 0) { 2410 key.type--; 2411 key.offset = (u64)-1; 2412 } else if (key.objectid > 0) { 2413 key.objectid--; 2414 key.type = (u8)-1; 2415 key.offset = (u64)-1; 2416 } else { 2417 return 1; 2418 } 2419 2420 btrfs_release_path(path); 2421 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2422 if (ret <= 0) 2423 return ret; 2424 2425 /* 2426 * Previous key not found. Even if we were at slot 0 of the leaf we had 2427 * before releasing the path and calling btrfs_search_slot(), we now may 2428 * be in a slot pointing to the same original key - this can happen if 2429 * after we released the path, one of more items were moved from a 2430 * sibling leaf into the front of the leaf we had due to an insertion 2431 * (see push_leaf_right()). 2432 * If we hit this case and our slot is > 0 and just decrement the slot 2433 * so that the caller does not process the same key again, which may or 2434 * may not break the caller, depending on its logic. 2435 */ 2436 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2437 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2438 ret = comp_keys(&found_key, &orig_key); 2439 if (ret == 0) { 2440 if (path->slots[0] > 0) { 2441 path->slots[0]--; 2442 return 0; 2443 } 2444 /* 2445 * At slot 0, same key as before, it means orig_key is 2446 * the lowest, leftmost, key in the tree. We're done. 2447 */ 2448 return 1; 2449 } 2450 } 2451 2452 btrfs_item_key(path->nodes[0], &found_key, 0); 2453 ret = comp_keys(&found_key, &key); 2454 /* 2455 * We might have had an item with the previous key in the tree right 2456 * before we released our path. And after we released our path, that 2457 * item might have been pushed to the first slot (0) of the leaf we 2458 * were holding due to a tree balance. Alternatively, an item with the 2459 * previous key can exist as the only element of a leaf (big fat item). 2460 * Therefore account for these 2 cases, so that our callers (like 2461 * btrfs_previous_item) don't miss an existing item with a key matching 2462 * the previous key we computed above. 2463 */ 2464 if (ret <= 0) 2465 return 0; 2466 return 1; 2467 } 2468 2469 /* 2470 * helper to use instead of search slot if no exact match is needed but 2471 * instead the next or previous item should be returned. 2472 * When find_higher is true, the next higher item is returned, the next lower 2473 * otherwise. 2474 * When return_any and find_higher are both true, and no higher item is found, 2475 * return the next lower instead. 2476 * When return_any is true and find_higher is false, and no lower item is found, 2477 * return the next higher instead. 2478 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2479 * < 0 on error 2480 */ 2481 int btrfs_search_slot_for_read(struct btrfs_root *root, 2482 const struct btrfs_key *key, 2483 struct btrfs_path *p, int find_higher, 2484 int return_any) 2485 { 2486 int ret; 2487 struct extent_buffer *leaf; 2488 2489 again: 2490 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2491 if (ret <= 0) 2492 return ret; 2493 /* 2494 * a return value of 1 means the path is at the position where the 2495 * item should be inserted. Normally this is the next bigger item, 2496 * but in case the previous item is the last in a leaf, path points 2497 * to the first free slot in the previous leaf, i.e. at an invalid 2498 * item. 2499 */ 2500 leaf = p->nodes[0]; 2501 2502 if (find_higher) { 2503 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2504 ret = btrfs_next_leaf(root, p); 2505 if (ret <= 0) 2506 return ret; 2507 if (!return_any) 2508 return 1; 2509 /* 2510 * no higher item found, return the next 2511 * lower instead 2512 */ 2513 return_any = 0; 2514 find_higher = 0; 2515 btrfs_release_path(p); 2516 goto again; 2517 } 2518 } else { 2519 if (p->slots[0] == 0) { 2520 ret = btrfs_prev_leaf(root, p); 2521 if (ret < 0) 2522 return ret; 2523 if (!ret) { 2524 leaf = p->nodes[0]; 2525 if (p->slots[0] == btrfs_header_nritems(leaf)) 2526 p->slots[0]--; 2527 return 0; 2528 } 2529 if (!return_any) 2530 return 1; 2531 /* 2532 * no lower item found, return the next 2533 * higher instead 2534 */ 2535 return_any = 0; 2536 find_higher = 1; 2537 btrfs_release_path(p); 2538 goto again; 2539 } else { 2540 --p->slots[0]; 2541 } 2542 } 2543 return 0; 2544 } 2545 2546 /* 2547 * Execute search and call btrfs_previous_item to traverse backwards if the item 2548 * was not found. 2549 * 2550 * Return 0 if found, 1 if not found and < 0 if error. 2551 */ 2552 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2553 struct btrfs_path *path) 2554 { 2555 int ret; 2556 2557 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2558 if (ret > 0) 2559 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2560 2561 if (ret == 0) 2562 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2563 2564 return ret; 2565 } 2566 2567 /* 2568 * Search for a valid slot for the given path. 2569 * 2570 * @root: The root node of the tree. 2571 * @key: Will contain a valid item if found. 2572 * @path: The starting point to validate the slot. 2573 * 2574 * Return: 0 if the item is valid 2575 * 1 if not found 2576 * <0 if error. 2577 */ 2578 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2579 struct btrfs_path *path) 2580 { 2581 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2582 int ret; 2583 2584 ret = btrfs_next_leaf(root, path); 2585 if (ret) 2586 return ret; 2587 } 2588 2589 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2590 return 0; 2591 } 2592 2593 /* 2594 * adjust the pointers going up the tree, starting at level 2595 * making sure the right key of each node is points to 'key'. 2596 * This is used after shifting pointers to the left, so it stops 2597 * fixing up pointers when a given leaf/node is not in slot 0 of the 2598 * higher levels 2599 * 2600 */ 2601 static void fixup_low_keys(struct btrfs_path *path, 2602 struct btrfs_disk_key *key, int level) 2603 { 2604 int i; 2605 struct extent_buffer *t; 2606 int ret; 2607 2608 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2609 int tslot = path->slots[i]; 2610 2611 if (!path->nodes[i]) 2612 break; 2613 t = path->nodes[i]; 2614 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2615 BTRFS_MOD_LOG_KEY_REPLACE); 2616 BUG_ON(ret < 0); 2617 btrfs_set_node_key(t, key, tslot); 2618 btrfs_mark_buffer_dirty(path->nodes[i]); 2619 if (tslot != 0) 2620 break; 2621 } 2622 } 2623 2624 /* 2625 * update item key. 2626 * 2627 * This function isn't completely safe. It's the caller's responsibility 2628 * that the new key won't break the order 2629 */ 2630 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2631 struct btrfs_path *path, 2632 const struct btrfs_key *new_key) 2633 { 2634 struct btrfs_disk_key disk_key; 2635 struct extent_buffer *eb; 2636 int slot; 2637 2638 eb = path->nodes[0]; 2639 slot = path->slots[0]; 2640 if (slot > 0) { 2641 btrfs_item_key(eb, &disk_key, slot - 1); 2642 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2643 btrfs_print_leaf(eb); 2644 btrfs_crit(fs_info, 2645 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2646 slot, btrfs_disk_key_objectid(&disk_key), 2647 btrfs_disk_key_type(&disk_key), 2648 btrfs_disk_key_offset(&disk_key), 2649 new_key->objectid, new_key->type, 2650 new_key->offset); 2651 BUG(); 2652 } 2653 } 2654 if (slot < btrfs_header_nritems(eb) - 1) { 2655 btrfs_item_key(eb, &disk_key, slot + 1); 2656 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2657 btrfs_print_leaf(eb); 2658 btrfs_crit(fs_info, 2659 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2660 slot, btrfs_disk_key_objectid(&disk_key), 2661 btrfs_disk_key_type(&disk_key), 2662 btrfs_disk_key_offset(&disk_key), 2663 new_key->objectid, new_key->type, 2664 new_key->offset); 2665 BUG(); 2666 } 2667 } 2668 2669 btrfs_cpu_key_to_disk(&disk_key, new_key); 2670 btrfs_set_item_key(eb, &disk_key, slot); 2671 btrfs_mark_buffer_dirty(eb); 2672 if (slot == 0) 2673 fixup_low_keys(path, &disk_key, 1); 2674 } 2675 2676 /* 2677 * Check key order of two sibling extent buffers. 2678 * 2679 * Return true if something is wrong. 2680 * Return false if everything is fine. 2681 * 2682 * Tree-checker only works inside one tree block, thus the following 2683 * corruption can not be detected by tree-checker: 2684 * 2685 * Leaf @left | Leaf @right 2686 * -------------------------------------------------------------- 2687 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2688 * 2689 * Key f6 in leaf @left itself is valid, but not valid when the next 2690 * key in leaf @right is 7. 2691 * This can only be checked at tree block merge time. 2692 * And since tree checker has ensured all key order in each tree block 2693 * is correct, we only need to bother the last key of @left and the first 2694 * key of @right. 2695 */ 2696 static bool check_sibling_keys(struct extent_buffer *left, 2697 struct extent_buffer *right) 2698 { 2699 struct btrfs_key left_last; 2700 struct btrfs_key right_first; 2701 int level = btrfs_header_level(left); 2702 int nr_left = btrfs_header_nritems(left); 2703 int nr_right = btrfs_header_nritems(right); 2704 2705 /* No key to check in one of the tree blocks */ 2706 if (!nr_left || !nr_right) 2707 return false; 2708 2709 if (level) { 2710 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2711 btrfs_node_key_to_cpu(right, &right_first, 0); 2712 } else { 2713 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2714 btrfs_item_key_to_cpu(right, &right_first, 0); 2715 } 2716 2717 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2718 btrfs_crit(left->fs_info, "left extent buffer:"); 2719 btrfs_print_tree(left, false); 2720 btrfs_crit(left->fs_info, "right extent buffer:"); 2721 btrfs_print_tree(right, false); 2722 btrfs_crit(left->fs_info, 2723 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2724 left_last.objectid, left_last.type, 2725 left_last.offset, right_first.objectid, 2726 right_first.type, right_first.offset); 2727 return true; 2728 } 2729 return false; 2730 } 2731 2732 /* 2733 * try to push data from one node into the next node left in the 2734 * tree. 2735 * 2736 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2737 * error, and > 0 if there was no room in the left hand block. 2738 */ 2739 static int push_node_left(struct btrfs_trans_handle *trans, 2740 struct extent_buffer *dst, 2741 struct extent_buffer *src, int empty) 2742 { 2743 struct btrfs_fs_info *fs_info = trans->fs_info; 2744 int push_items = 0; 2745 int src_nritems; 2746 int dst_nritems; 2747 int ret = 0; 2748 2749 src_nritems = btrfs_header_nritems(src); 2750 dst_nritems = btrfs_header_nritems(dst); 2751 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2752 WARN_ON(btrfs_header_generation(src) != trans->transid); 2753 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2754 2755 if (!empty && src_nritems <= 8) 2756 return 1; 2757 2758 if (push_items <= 0) 2759 return 1; 2760 2761 if (empty) { 2762 push_items = min(src_nritems, push_items); 2763 if (push_items < src_nritems) { 2764 /* leave at least 8 pointers in the node if 2765 * we aren't going to empty it 2766 */ 2767 if (src_nritems - push_items < 8) { 2768 if (push_items <= 8) 2769 return 1; 2770 push_items -= 8; 2771 } 2772 } 2773 } else 2774 push_items = min(src_nritems - 8, push_items); 2775 2776 /* dst is the left eb, src is the middle eb */ 2777 if (check_sibling_keys(dst, src)) { 2778 ret = -EUCLEAN; 2779 btrfs_abort_transaction(trans, ret); 2780 return ret; 2781 } 2782 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2783 if (ret) { 2784 btrfs_abort_transaction(trans, ret); 2785 return ret; 2786 } 2787 copy_extent_buffer(dst, src, 2788 btrfs_node_key_ptr_offset(dst, dst_nritems), 2789 btrfs_node_key_ptr_offset(src, 0), 2790 push_items * sizeof(struct btrfs_key_ptr)); 2791 2792 if (push_items < src_nritems) { 2793 /* 2794 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2795 * don't need to do an explicit tree mod log operation for it. 2796 */ 2797 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2798 btrfs_node_key_ptr_offset(src, push_items), 2799 (src_nritems - push_items) * 2800 sizeof(struct btrfs_key_ptr)); 2801 } 2802 btrfs_set_header_nritems(src, src_nritems - push_items); 2803 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2804 btrfs_mark_buffer_dirty(src); 2805 btrfs_mark_buffer_dirty(dst); 2806 2807 return ret; 2808 } 2809 2810 /* 2811 * try to push data from one node into the next node right in the 2812 * tree. 2813 * 2814 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2815 * error, and > 0 if there was no room in the right hand block. 2816 * 2817 * this will only push up to 1/2 the contents of the left node over 2818 */ 2819 static int balance_node_right(struct btrfs_trans_handle *trans, 2820 struct extent_buffer *dst, 2821 struct extent_buffer *src) 2822 { 2823 struct btrfs_fs_info *fs_info = trans->fs_info; 2824 int push_items = 0; 2825 int max_push; 2826 int src_nritems; 2827 int dst_nritems; 2828 int ret = 0; 2829 2830 WARN_ON(btrfs_header_generation(src) != trans->transid); 2831 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2832 2833 src_nritems = btrfs_header_nritems(src); 2834 dst_nritems = btrfs_header_nritems(dst); 2835 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2836 if (push_items <= 0) 2837 return 1; 2838 2839 if (src_nritems < 4) 2840 return 1; 2841 2842 max_push = src_nritems / 2 + 1; 2843 /* don't try to empty the node */ 2844 if (max_push >= src_nritems) 2845 return 1; 2846 2847 if (max_push < push_items) 2848 push_items = max_push; 2849 2850 /* dst is the right eb, src is the middle eb */ 2851 if (check_sibling_keys(src, dst)) { 2852 ret = -EUCLEAN; 2853 btrfs_abort_transaction(trans, ret); 2854 return ret; 2855 } 2856 2857 /* 2858 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2859 * need to do an explicit tree mod log operation for it. 2860 */ 2861 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2862 btrfs_node_key_ptr_offset(dst, 0), 2863 (dst_nritems) * 2864 sizeof(struct btrfs_key_ptr)); 2865 2866 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2867 push_items); 2868 if (ret) { 2869 btrfs_abort_transaction(trans, ret); 2870 return ret; 2871 } 2872 copy_extent_buffer(dst, src, 2873 btrfs_node_key_ptr_offset(dst, 0), 2874 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2875 push_items * sizeof(struct btrfs_key_ptr)); 2876 2877 btrfs_set_header_nritems(src, src_nritems - push_items); 2878 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2879 2880 btrfs_mark_buffer_dirty(src); 2881 btrfs_mark_buffer_dirty(dst); 2882 2883 return ret; 2884 } 2885 2886 /* 2887 * helper function to insert a new root level in the tree. 2888 * A new node is allocated, and a single item is inserted to 2889 * point to the existing root 2890 * 2891 * returns zero on success or < 0 on failure. 2892 */ 2893 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2894 struct btrfs_root *root, 2895 struct btrfs_path *path, int level) 2896 { 2897 struct btrfs_fs_info *fs_info = root->fs_info; 2898 u64 lower_gen; 2899 struct extent_buffer *lower; 2900 struct extent_buffer *c; 2901 struct extent_buffer *old; 2902 struct btrfs_disk_key lower_key; 2903 int ret; 2904 2905 BUG_ON(path->nodes[level]); 2906 BUG_ON(path->nodes[level-1] != root->node); 2907 2908 lower = path->nodes[level-1]; 2909 if (level == 1) 2910 btrfs_item_key(lower, &lower_key, 0); 2911 else 2912 btrfs_node_key(lower, &lower_key, 0); 2913 2914 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2915 &lower_key, level, root->node->start, 0, 2916 BTRFS_NESTING_NEW_ROOT); 2917 if (IS_ERR(c)) 2918 return PTR_ERR(c); 2919 2920 root_add_used(root, fs_info->nodesize); 2921 2922 btrfs_set_header_nritems(c, 1); 2923 btrfs_set_node_key(c, &lower_key, 0); 2924 btrfs_set_node_blockptr(c, 0, lower->start); 2925 lower_gen = btrfs_header_generation(lower); 2926 WARN_ON(lower_gen != trans->transid); 2927 2928 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2929 2930 btrfs_mark_buffer_dirty(c); 2931 2932 old = root->node; 2933 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2934 BUG_ON(ret < 0); 2935 rcu_assign_pointer(root->node, c); 2936 2937 /* the super has an extra ref to root->node */ 2938 free_extent_buffer(old); 2939 2940 add_root_to_dirty_list(root); 2941 atomic_inc(&c->refs); 2942 path->nodes[level] = c; 2943 path->locks[level] = BTRFS_WRITE_LOCK; 2944 path->slots[level] = 0; 2945 return 0; 2946 } 2947 2948 /* 2949 * worker function to insert a single pointer in a node. 2950 * the node should have enough room for the pointer already 2951 * 2952 * slot and level indicate where you want the key to go, and 2953 * blocknr is the block the key points to. 2954 */ 2955 static void insert_ptr(struct btrfs_trans_handle *trans, 2956 struct btrfs_path *path, 2957 struct btrfs_disk_key *key, u64 bytenr, 2958 int slot, int level) 2959 { 2960 struct extent_buffer *lower; 2961 int nritems; 2962 int ret; 2963 2964 BUG_ON(!path->nodes[level]); 2965 btrfs_assert_tree_write_locked(path->nodes[level]); 2966 lower = path->nodes[level]; 2967 nritems = btrfs_header_nritems(lower); 2968 BUG_ON(slot > nritems); 2969 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2970 if (slot != nritems) { 2971 if (level) { 2972 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2973 slot, nritems - slot); 2974 BUG_ON(ret < 0); 2975 } 2976 memmove_extent_buffer(lower, 2977 btrfs_node_key_ptr_offset(lower, slot + 1), 2978 btrfs_node_key_ptr_offset(lower, slot), 2979 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2980 } 2981 if (level) { 2982 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2983 BTRFS_MOD_LOG_KEY_ADD); 2984 BUG_ON(ret < 0); 2985 } 2986 btrfs_set_node_key(lower, key, slot); 2987 btrfs_set_node_blockptr(lower, slot, bytenr); 2988 WARN_ON(trans->transid == 0); 2989 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2990 btrfs_set_header_nritems(lower, nritems + 1); 2991 btrfs_mark_buffer_dirty(lower); 2992 } 2993 2994 /* 2995 * split the node at the specified level in path in two. 2996 * The path is corrected to point to the appropriate node after the split 2997 * 2998 * Before splitting this tries to make some room in the node by pushing 2999 * left and right, if either one works, it returns right away. 3000 * 3001 * returns 0 on success and < 0 on failure 3002 */ 3003 static noinline int split_node(struct btrfs_trans_handle *trans, 3004 struct btrfs_root *root, 3005 struct btrfs_path *path, int level) 3006 { 3007 struct btrfs_fs_info *fs_info = root->fs_info; 3008 struct extent_buffer *c; 3009 struct extent_buffer *split; 3010 struct btrfs_disk_key disk_key; 3011 int mid; 3012 int ret; 3013 u32 c_nritems; 3014 3015 c = path->nodes[level]; 3016 WARN_ON(btrfs_header_generation(c) != trans->transid); 3017 if (c == root->node) { 3018 /* 3019 * trying to split the root, lets make a new one 3020 * 3021 * tree mod log: We don't log_removal old root in 3022 * insert_new_root, because that root buffer will be kept as a 3023 * normal node. We are going to log removal of half of the 3024 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3025 * holding a tree lock on the buffer, which is why we cannot 3026 * race with other tree_mod_log users. 3027 */ 3028 ret = insert_new_root(trans, root, path, level + 1); 3029 if (ret) 3030 return ret; 3031 } else { 3032 ret = push_nodes_for_insert(trans, root, path, level); 3033 c = path->nodes[level]; 3034 if (!ret && btrfs_header_nritems(c) < 3035 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3036 return 0; 3037 if (ret < 0) 3038 return ret; 3039 } 3040 3041 c_nritems = btrfs_header_nritems(c); 3042 mid = (c_nritems + 1) / 2; 3043 btrfs_node_key(c, &disk_key, mid); 3044 3045 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3046 &disk_key, level, c->start, 0, 3047 BTRFS_NESTING_SPLIT); 3048 if (IS_ERR(split)) 3049 return PTR_ERR(split); 3050 3051 root_add_used(root, fs_info->nodesize); 3052 ASSERT(btrfs_header_level(c) == level); 3053 3054 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3055 if (ret) { 3056 btrfs_tree_unlock(split); 3057 free_extent_buffer(split); 3058 btrfs_abort_transaction(trans, ret); 3059 return ret; 3060 } 3061 copy_extent_buffer(split, c, 3062 btrfs_node_key_ptr_offset(split, 0), 3063 btrfs_node_key_ptr_offset(c, mid), 3064 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3065 btrfs_set_header_nritems(split, c_nritems - mid); 3066 btrfs_set_header_nritems(c, mid); 3067 3068 btrfs_mark_buffer_dirty(c); 3069 btrfs_mark_buffer_dirty(split); 3070 3071 insert_ptr(trans, path, &disk_key, split->start, 3072 path->slots[level + 1] + 1, level + 1); 3073 3074 if (path->slots[level] >= mid) { 3075 path->slots[level] -= mid; 3076 btrfs_tree_unlock(c); 3077 free_extent_buffer(c); 3078 path->nodes[level] = split; 3079 path->slots[level + 1] += 1; 3080 } else { 3081 btrfs_tree_unlock(split); 3082 free_extent_buffer(split); 3083 } 3084 return 0; 3085 } 3086 3087 /* 3088 * how many bytes are required to store the items in a leaf. start 3089 * and nr indicate which items in the leaf to check. This totals up the 3090 * space used both by the item structs and the item data 3091 */ 3092 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3093 { 3094 int data_len; 3095 int nritems = btrfs_header_nritems(l); 3096 int end = min(nritems, start + nr) - 1; 3097 3098 if (!nr) 3099 return 0; 3100 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3101 data_len = data_len - btrfs_item_offset(l, end); 3102 data_len += sizeof(struct btrfs_item) * nr; 3103 WARN_ON(data_len < 0); 3104 return data_len; 3105 } 3106 3107 /* 3108 * The space between the end of the leaf items and 3109 * the start of the leaf data. IOW, how much room 3110 * the leaf has left for both items and data 3111 */ 3112 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3113 { 3114 struct btrfs_fs_info *fs_info = leaf->fs_info; 3115 int nritems = btrfs_header_nritems(leaf); 3116 int ret; 3117 3118 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3119 if (ret < 0) { 3120 btrfs_crit(fs_info, 3121 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3122 ret, 3123 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3124 leaf_space_used(leaf, 0, nritems), nritems); 3125 } 3126 return ret; 3127 } 3128 3129 /* 3130 * min slot controls the lowest index we're willing to push to the 3131 * right. We'll push up to and including min_slot, but no lower 3132 */ 3133 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3134 struct btrfs_path *path, 3135 int data_size, int empty, 3136 struct extent_buffer *right, 3137 int free_space, u32 left_nritems, 3138 u32 min_slot) 3139 { 3140 struct btrfs_fs_info *fs_info = right->fs_info; 3141 struct extent_buffer *left = path->nodes[0]; 3142 struct extent_buffer *upper = path->nodes[1]; 3143 struct btrfs_map_token token; 3144 struct btrfs_disk_key disk_key; 3145 int slot; 3146 u32 i; 3147 int push_space = 0; 3148 int push_items = 0; 3149 u32 nr; 3150 u32 right_nritems; 3151 u32 data_end; 3152 u32 this_item_size; 3153 3154 if (empty) 3155 nr = 0; 3156 else 3157 nr = max_t(u32, 1, min_slot); 3158 3159 if (path->slots[0] >= left_nritems) 3160 push_space += data_size; 3161 3162 slot = path->slots[1]; 3163 i = left_nritems - 1; 3164 while (i >= nr) { 3165 if (!empty && push_items > 0) { 3166 if (path->slots[0] > i) 3167 break; 3168 if (path->slots[0] == i) { 3169 int space = btrfs_leaf_free_space(left); 3170 3171 if (space + push_space * 2 > free_space) 3172 break; 3173 } 3174 } 3175 3176 if (path->slots[0] == i) 3177 push_space += data_size; 3178 3179 this_item_size = btrfs_item_size(left, i); 3180 if (this_item_size + sizeof(struct btrfs_item) + 3181 push_space > free_space) 3182 break; 3183 3184 push_items++; 3185 push_space += this_item_size + sizeof(struct btrfs_item); 3186 if (i == 0) 3187 break; 3188 i--; 3189 } 3190 3191 if (push_items == 0) 3192 goto out_unlock; 3193 3194 WARN_ON(!empty && push_items == left_nritems); 3195 3196 /* push left to right */ 3197 right_nritems = btrfs_header_nritems(right); 3198 3199 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3200 push_space -= leaf_data_end(left); 3201 3202 /* make room in the right data area */ 3203 data_end = leaf_data_end(right); 3204 memmove_leaf_data(right, data_end - push_space, data_end, 3205 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3206 3207 /* copy from the left data area */ 3208 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3209 leaf_data_end(left), push_space); 3210 3211 memmove_leaf_items(right, push_items, 0, right_nritems); 3212 3213 /* copy the items from left to right */ 3214 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3215 3216 /* update the item pointers */ 3217 btrfs_init_map_token(&token, right); 3218 right_nritems += push_items; 3219 btrfs_set_header_nritems(right, right_nritems); 3220 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3221 for (i = 0; i < right_nritems; i++) { 3222 push_space -= btrfs_token_item_size(&token, i); 3223 btrfs_set_token_item_offset(&token, i, push_space); 3224 } 3225 3226 left_nritems -= push_items; 3227 btrfs_set_header_nritems(left, left_nritems); 3228 3229 if (left_nritems) 3230 btrfs_mark_buffer_dirty(left); 3231 else 3232 btrfs_clear_buffer_dirty(trans, left); 3233 3234 btrfs_mark_buffer_dirty(right); 3235 3236 btrfs_item_key(right, &disk_key, 0); 3237 btrfs_set_node_key(upper, &disk_key, slot + 1); 3238 btrfs_mark_buffer_dirty(upper); 3239 3240 /* then fixup the leaf pointer in the path */ 3241 if (path->slots[0] >= left_nritems) { 3242 path->slots[0] -= left_nritems; 3243 if (btrfs_header_nritems(path->nodes[0]) == 0) 3244 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3245 btrfs_tree_unlock(path->nodes[0]); 3246 free_extent_buffer(path->nodes[0]); 3247 path->nodes[0] = right; 3248 path->slots[1] += 1; 3249 } else { 3250 btrfs_tree_unlock(right); 3251 free_extent_buffer(right); 3252 } 3253 return 0; 3254 3255 out_unlock: 3256 btrfs_tree_unlock(right); 3257 free_extent_buffer(right); 3258 return 1; 3259 } 3260 3261 /* 3262 * push some data in the path leaf to the right, trying to free up at 3263 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3264 * 3265 * returns 1 if the push failed because the other node didn't have enough 3266 * room, 0 if everything worked out and < 0 if there were major errors. 3267 * 3268 * this will push starting from min_slot to the end of the leaf. It won't 3269 * push any slot lower than min_slot 3270 */ 3271 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3272 *root, struct btrfs_path *path, 3273 int min_data_size, int data_size, 3274 int empty, u32 min_slot) 3275 { 3276 struct extent_buffer *left = path->nodes[0]; 3277 struct extent_buffer *right; 3278 struct extent_buffer *upper; 3279 int slot; 3280 int free_space; 3281 u32 left_nritems; 3282 int ret; 3283 3284 if (!path->nodes[1]) 3285 return 1; 3286 3287 slot = path->slots[1]; 3288 upper = path->nodes[1]; 3289 if (slot >= btrfs_header_nritems(upper) - 1) 3290 return 1; 3291 3292 btrfs_assert_tree_write_locked(path->nodes[1]); 3293 3294 right = btrfs_read_node_slot(upper, slot + 1); 3295 if (IS_ERR(right)) 3296 return PTR_ERR(right); 3297 3298 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3299 3300 free_space = btrfs_leaf_free_space(right); 3301 if (free_space < data_size) 3302 goto out_unlock; 3303 3304 ret = btrfs_cow_block(trans, root, right, upper, 3305 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3306 if (ret) 3307 goto out_unlock; 3308 3309 left_nritems = btrfs_header_nritems(left); 3310 if (left_nritems == 0) 3311 goto out_unlock; 3312 3313 if (check_sibling_keys(left, right)) { 3314 ret = -EUCLEAN; 3315 btrfs_abort_transaction(trans, ret); 3316 btrfs_tree_unlock(right); 3317 free_extent_buffer(right); 3318 return ret; 3319 } 3320 if (path->slots[0] == left_nritems && !empty) { 3321 /* Key greater than all keys in the leaf, right neighbor has 3322 * enough room for it and we're not emptying our leaf to delete 3323 * it, therefore use right neighbor to insert the new item and 3324 * no need to touch/dirty our left leaf. */ 3325 btrfs_tree_unlock(left); 3326 free_extent_buffer(left); 3327 path->nodes[0] = right; 3328 path->slots[0] = 0; 3329 path->slots[1]++; 3330 return 0; 3331 } 3332 3333 return __push_leaf_right(trans, path, min_data_size, empty, right, 3334 free_space, left_nritems, min_slot); 3335 out_unlock: 3336 btrfs_tree_unlock(right); 3337 free_extent_buffer(right); 3338 return 1; 3339 } 3340 3341 /* 3342 * push some data in the path leaf to the left, trying to free up at 3343 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3344 * 3345 * max_slot can put a limit on how far into the leaf we'll push items. The 3346 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3347 * items 3348 */ 3349 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3350 struct btrfs_path *path, int data_size, 3351 int empty, struct extent_buffer *left, 3352 int free_space, u32 right_nritems, 3353 u32 max_slot) 3354 { 3355 struct btrfs_fs_info *fs_info = left->fs_info; 3356 struct btrfs_disk_key disk_key; 3357 struct extent_buffer *right = path->nodes[0]; 3358 int i; 3359 int push_space = 0; 3360 int push_items = 0; 3361 u32 old_left_nritems; 3362 u32 nr; 3363 int ret = 0; 3364 u32 this_item_size; 3365 u32 old_left_item_size; 3366 struct btrfs_map_token token; 3367 3368 if (empty) 3369 nr = min(right_nritems, max_slot); 3370 else 3371 nr = min(right_nritems - 1, max_slot); 3372 3373 for (i = 0; i < nr; i++) { 3374 if (!empty && push_items > 0) { 3375 if (path->slots[0] < i) 3376 break; 3377 if (path->slots[0] == i) { 3378 int space = btrfs_leaf_free_space(right); 3379 3380 if (space + push_space * 2 > free_space) 3381 break; 3382 } 3383 } 3384 3385 if (path->slots[0] == i) 3386 push_space += data_size; 3387 3388 this_item_size = btrfs_item_size(right, i); 3389 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3390 free_space) 3391 break; 3392 3393 push_items++; 3394 push_space += this_item_size + sizeof(struct btrfs_item); 3395 } 3396 3397 if (push_items == 0) { 3398 ret = 1; 3399 goto out; 3400 } 3401 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3402 3403 /* push data from right to left */ 3404 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3405 3406 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3407 btrfs_item_offset(right, push_items - 1); 3408 3409 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3410 btrfs_item_offset(right, push_items - 1), push_space); 3411 old_left_nritems = btrfs_header_nritems(left); 3412 BUG_ON(old_left_nritems <= 0); 3413 3414 btrfs_init_map_token(&token, left); 3415 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3416 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3417 u32 ioff; 3418 3419 ioff = btrfs_token_item_offset(&token, i); 3420 btrfs_set_token_item_offset(&token, i, 3421 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3422 } 3423 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3424 3425 /* fixup right node */ 3426 if (push_items > right_nritems) 3427 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3428 right_nritems); 3429 3430 if (push_items < right_nritems) { 3431 push_space = btrfs_item_offset(right, push_items - 1) - 3432 leaf_data_end(right); 3433 memmove_leaf_data(right, 3434 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3435 leaf_data_end(right), push_space); 3436 3437 memmove_leaf_items(right, 0, push_items, 3438 btrfs_header_nritems(right) - push_items); 3439 } 3440 3441 btrfs_init_map_token(&token, right); 3442 right_nritems -= push_items; 3443 btrfs_set_header_nritems(right, right_nritems); 3444 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3445 for (i = 0; i < right_nritems; i++) { 3446 push_space = push_space - btrfs_token_item_size(&token, i); 3447 btrfs_set_token_item_offset(&token, i, push_space); 3448 } 3449 3450 btrfs_mark_buffer_dirty(left); 3451 if (right_nritems) 3452 btrfs_mark_buffer_dirty(right); 3453 else 3454 btrfs_clear_buffer_dirty(trans, right); 3455 3456 btrfs_item_key(right, &disk_key, 0); 3457 fixup_low_keys(path, &disk_key, 1); 3458 3459 /* then fixup the leaf pointer in the path */ 3460 if (path->slots[0] < push_items) { 3461 path->slots[0] += old_left_nritems; 3462 btrfs_tree_unlock(path->nodes[0]); 3463 free_extent_buffer(path->nodes[0]); 3464 path->nodes[0] = left; 3465 path->slots[1] -= 1; 3466 } else { 3467 btrfs_tree_unlock(left); 3468 free_extent_buffer(left); 3469 path->slots[0] -= push_items; 3470 } 3471 BUG_ON(path->slots[0] < 0); 3472 return ret; 3473 out: 3474 btrfs_tree_unlock(left); 3475 free_extent_buffer(left); 3476 return ret; 3477 } 3478 3479 /* 3480 * push some data in the path leaf to the left, trying to free up at 3481 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3482 * 3483 * max_slot can put a limit on how far into the leaf we'll push items. The 3484 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3485 * items 3486 */ 3487 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3488 *root, struct btrfs_path *path, int min_data_size, 3489 int data_size, int empty, u32 max_slot) 3490 { 3491 struct extent_buffer *right = path->nodes[0]; 3492 struct extent_buffer *left; 3493 int slot; 3494 int free_space; 3495 u32 right_nritems; 3496 int ret = 0; 3497 3498 slot = path->slots[1]; 3499 if (slot == 0) 3500 return 1; 3501 if (!path->nodes[1]) 3502 return 1; 3503 3504 right_nritems = btrfs_header_nritems(right); 3505 if (right_nritems == 0) 3506 return 1; 3507 3508 btrfs_assert_tree_write_locked(path->nodes[1]); 3509 3510 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3511 if (IS_ERR(left)) 3512 return PTR_ERR(left); 3513 3514 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3515 3516 free_space = btrfs_leaf_free_space(left); 3517 if (free_space < data_size) { 3518 ret = 1; 3519 goto out; 3520 } 3521 3522 ret = btrfs_cow_block(trans, root, left, 3523 path->nodes[1], slot - 1, &left, 3524 BTRFS_NESTING_LEFT_COW); 3525 if (ret) { 3526 /* we hit -ENOSPC, but it isn't fatal here */ 3527 if (ret == -ENOSPC) 3528 ret = 1; 3529 goto out; 3530 } 3531 3532 if (check_sibling_keys(left, right)) { 3533 ret = -EUCLEAN; 3534 btrfs_abort_transaction(trans, ret); 3535 goto out; 3536 } 3537 return __push_leaf_left(trans, path, min_data_size, empty, left, 3538 free_space, right_nritems, max_slot); 3539 out: 3540 btrfs_tree_unlock(left); 3541 free_extent_buffer(left); 3542 return ret; 3543 } 3544 3545 /* 3546 * split the path's leaf in two, making sure there is at least data_size 3547 * available for the resulting leaf level of the path. 3548 */ 3549 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3550 struct btrfs_path *path, 3551 struct extent_buffer *l, 3552 struct extent_buffer *right, 3553 int slot, int mid, int nritems) 3554 { 3555 struct btrfs_fs_info *fs_info = trans->fs_info; 3556 int data_copy_size; 3557 int rt_data_off; 3558 int i; 3559 struct btrfs_disk_key disk_key; 3560 struct btrfs_map_token token; 3561 3562 nritems = nritems - mid; 3563 btrfs_set_header_nritems(right, nritems); 3564 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3565 3566 copy_leaf_items(right, l, 0, mid, nritems); 3567 3568 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3569 leaf_data_end(l), data_copy_size); 3570 3571 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3572 3573 btrfs_init_map_token(&token, right); 3574 for (i = 0; i < nritems; i++) { 3575 u32 ioff; 3576 3577 ioff = btrfs_token_item_offset(&token, i); 3578 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3579 } 3580 3581 btrfs_set_header_nritems(l, mid); 3582 btrfs_item_key(right, &disk_key, 0); 3583 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3584 3585 btrfs_mark_buffer_dirty(right); 3586 btrfs_mark_buffer_dirty(l); 3587 BUG_ON(path->slots[0] != slot); 3588 3589 if (mid <= slot) { 3590 btrfs_tree_unlock(path->nodes[0]); 3591 free_extent_buffer(path->nodes[0]); 3592 path->nodes[0] = right; 3593 path->slots[0] -= mid; 3594 path->slots[1] += 1; 3595 } else { 3596 btrfs_tree_unlock(right); 3597 free_extent_buffer(right); 3598 } 3599 3600 BUG_ON(path->slots[0] < 0); 3601 } 3602 3603 /* 3604 * double splits happen when we need to insert a big item in the middle 3605 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3606 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3607 * A B C 3608 * 3609 * We avoid this by trying to push the items on either side of our target 3610 * into the adjacent leaves. If all goes well we can avoid the double split 3611 * completely. 3612 */ 3613 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3614 struct btrfs_root *root, 3615 struct btrfs_path *path, 3616 int data_size) 3617 { 3618 int ret; 3619 int progress = 0; 3620 int slot; 3621 u32 nritems; 3622 int space_needed = data_size; 3623 3624 slot = path->slots[0]; 3625 if (slot < btrfs_header_nritems(path->nodes[0])) 3626 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3627 3628 /* 3629 * try to push all the items after our slot into the 3630 * right leaf 3631 */ 3632 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3633 if (ret < 0) 3634 return ret; 3635 3636 if (ret == 0) 3637 progress++; 3638 3639 nritems = btrfs_header_nritems(path->nodes[0]); 3640 /* 3641 * our goal is to get our slot at the start or end of a leaf. If 3642 * we've done so we're done 3643 */ 3644 if (path->slots[0] == 0 || path->slots[0] == nritems) 3645 return 0; 3646 3647 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3648 return 0; 3649 3650 /* try to push all the items before our slot into the next leaf */ 3651 slot = path->slots[0]; 3652 space_needed = data_size; 3653 if (slot > 0) 3654 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3655 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3656 if (ret < 0) 3657 return ret; 3658 3659 if (ret == 0) 3660 progress++; 3661 3662 if (progress) 3663 return 0; 3664 return 1; 3665 } 3666 3667 /* 3668 * split the path's leaf in two, making sure there is at least data_size 3669 * available for the resulting leaf level of the path. 3670 * 3671 * returns 0 if all went well and < 0 on failure. 3672 */ 3673 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3674 struct btrfs_root *root, 3675 const struct btrfs_key *ins_key, 3676 struct btrfs_path *path, int data_size, 3677 int extend) 3678 { 3679 struct btrfs_disk_key disk_key; 3680 struct extent_buffer *l; 3681 u32 nritems; 3682 int mid; 3683 int slot; 3684 struct extent_buffer *right; 3685 struct btrfs_fs_info *fs_info = root->fs_info; 3686 int ret = 0; 3687 int wret; 3688 int split; 3689 int num_doubles = 0; 3690 int tried_avoid_double = 0; 3691 3692 l = path->nodes[0]; 3693 slot = path->slots[0]; 3694 if (extend && data_size + btrfs_item_size(l, slot) + 3695 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3696 return -EOVERFLOW; 3697 3698 /* first try to make some room by pushing left and right */ 3699 if (data_size && path->nodes[1]) { 3700 int space_needed = data_size; 3701 3702 if (slot < btrfs_header_nritems(l)) 3703 space_needed -= btrfs_leaf_free_space(l); 3704 3705 wret = push_leaf_right(trans, root, path, space_needed, 3706 space_needed, 0, 0); 3707 if (wret < 0) 3708 return wret; 3709 if (wret) { 3710 space_needed = data_size; 3711 if (slot > 0) 3712 space_needed -= btrfs_leaf_free_space(l); 3713 wret = push_leaf_left(trans, root, path, space_needed, 3714 space_needed, 0, (u32)-1); 3715 if (wret < 0) 3716 return wret; 3717 } 3718 l = path->nodes[0]; 3719 3720 /* did the pushes work? */ 3721 if (btrfs_leaf_free_space(l) >= data_size) 3722 return 0; 3723 } 3724 3725 if (!path->nodes[1]) { 3726 ret = insert_new_root(trans, root, path, 1); 3727 if (ret) 3728 return ret; 3729 } 3730 again: 3731 split = 1; 3732 l = path->nodes[0]; 3733 slot = path->slots[0]; 3734 nritems = btrfs_header_nritems(l); 3735 mid = (nritems + 1) / 2; 3736 3737 if (mid <= slot) { 3738 if (nritems == 1 || 3739 leaf_space_used(l, mid, nritems - mid) + data_size > 3740 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3741 if (slot >= nritems) { 3742 split = 0; 3743 } else { 3744 mid = slot; 3745 if (mid != nritems && 3746 leaf_space_used(l, mid, nritems - mid) + 3747 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3748 if (data_size && !tried_avoid_double) 3749 goto push_for_double; 3750 split = 2; 3751 } 3752 } 3753 } 3754 } else { 3755 if (leaf_space_used(l, 0, mid) + data_size > 3756 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3757 if (!extend && data_size && slot == 0) { 3758 split = 0; 3759 } else if ((extend || !data_size) && slot == 0) { 3760 mid = 1; 3761 } else { 3762 mid = slot; 3763 if (mid != nritems && 3764 leaf_space_used(l, mid, nritems - mid) + 3765 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3766 if (data_size && !tried_avoid_double) 3767 goto push_for_double; 3768 split = 2; 3769 } 3770 } 3771 } 3772 } 3773 3774 if (split == 0) 3775 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3776 else 3777 btrfs_item_key(l, &disk_key, mid); 3778 3779 /* 3780 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3781 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3782 * subclasses, which is 8 at the time of this patch, and we've maxed it 3783 * out. In the future we could add a 3784 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3785 * use BTRFS_NESTING_NEW_ROOT. 3786 */ 3787 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3788 &disk_key, 0, l->start, 0, 3789 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3790 BTRFS_NESTING_SPLIT); 3791 if (IS_ERR(right)) 3792 return PTR_ERR(right); 3793 3794 root_add_used(root, fs_info->nodesize); 3795 3796 if (split == 0) { 3797 if (mid <= slot) { 3798 btrfs_set_header_nritems(right, 0); 3799 insert_ptr(trans, path, &disk_key, 3800 right->start, path->slots[1] + 1, 1); 3801 btrfs_tree_unlock(path->nodes[0]); 3802 free_extent_buffer(path->nodes[0]); 3803 path->nodes[0] = right; 3804 path->slots[0] = 0; 3805 path->slots[1] += 1; 3806 } else { 3807 btrfs_set_header_nritems(right, 0); 3808 insert_ptr(trans, path, &disk_key, 3809 right->start, path->slots[1], 1); 3810 btrfs_tree_unlock(path->nodes[0]); 3811 free_extent_buffer(path->nodes[0]); 3812 path->nodes[0] = right; 3813 path->slots[0] = 0; 3814 if (path->slots[1] == 0) 3815 fixup_low_keys(path, &disk_key, 1); 3816 } 3817 /* 3818 * We create a new leaf 'right' for the required ins_len and 3819 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3820 * the content of ins_len to 'right'. 3821 */ 3822 return ret; 3823 } 3824 3825 copy_for_split(trans, path, l, right, slot, mid, nritems); 3826 3827 if (split == 2) { 3828 BUG_ON(num_doubles != 0); 3829 num_doubles++; 3830 goto again; 3831 } 3832 3833 return 0; 3834 3835 push_for_double: 3836 push_for_double_split(trans, root, path, data_size); 3837 tried_avoid_double = 1; 3838 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3839 return 0; 3840 goto again; 3841 } 3842 3843 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3844 struct btrfs_root *root, 3845 struct btrfs_path *path, int ins_len) 3846 { 3847 struct btrfs_key key; 3848 struct extent_buffer *leaf; 3849 struct btrfs_file_extent_item *fi; 3850 u64 extent_len = 0; 3851 u32 item_size; 3852 int ret; 3853 3854 leaf = path->nodes[0]; 3855 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3856 3857 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3858 key.type != BTRFS_EXTENT_CSUM_KEY); 3859 3860 if (btrfs_leaf_free_space(leaf) >= ins_len) 3861 return 0; 3862 3863 item_size = btrfs_item_size(leaf, path->slots[0]); 3864 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3865 fi = btrfs_item_ptr(leaf, path->slots[0], 3866 struct btrfs_file_extent_item); 3867 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3868 } 3869 btrfs_release_path(path); 3870 3871 path->keep_locks = 1; 3872 path->search_for_split = 1; 3873 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3874 path->search_for_split = 0; 3875 if (ret > 0) 3876 ret = -EAGAIN; 3877 if (ret < 0) 3878 goto err; 3879 3880 ret = -EAGAIN; 3881 leaf = path->nodes[0]; 3882 /* if our item isn't there, return now */ 3883 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3884 goto err; 3885 3886 /* the leaf has changed, it now has room. return now */ 3887 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3888 goto err; 3889 3890 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3891 fi = btrfs_item_ptr(leaf, path->slots[0], 3892 struct btrfs_file_extent_item); 3893 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3894 goto err; 3895 } 3896 3897 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3898 if (ret) 3899 goto err; 3900 3901 path->keep_locks = 0; 3902 btrfs_unlock_up_safe(path, 1); 3903 return 0; 3904 err: 3905 path->keep_locks = 0; 3906 return ret; 3907 } 3908 3909 static noinline int split_item(struct btrfs_path *path, 3910 const struct btrfs_key *new_key, 3911 unsigned long split_offset) 3912 { 3913 struct extent_buffer *leaf; 3914 int orig_slot, slot; 3915 char *buf; 3916 u32 nritems; 3917 u32 item_size; 3918 u32 orig_offset; 3919 struct btrfs_disk_key disk_key; 3920 3921 leaf = path->nodes[0]; 3922 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3923 3924 orig_slot = path->slots[0]; 3925 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3926 item_size = btrfs_item_size(leaf, path->slots[0]); 3927 3928 buf = kmalloc(item_size, GFP_NOFS); 3929 if (!buf) 3930 return -ENOMEM; 3931 3932 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3933 path->slots[0]), item_size); 3934 3935 slot = path->slots[0] + 1; 3936 nritems = btrfs_header_nritems(leaf); 3937 if (slot != nritems) { 3938 /* shift the items */ 3939 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3940 } 3941 3942 btrfs_cpu_key_to_disk(&disk_key, new_key); 3943 btrfs_set_item_key(leaf, &disk_key, slot); 3944 3945 btrfs_set_item_offset(leaf, slot, orig_offset); 3946 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3947 3948 btrfs_set_item_offset(leaf, orig_slot, 3949 orig_offset + item_size - split_offset); 3950 btrfs_set_item_size(leaf, orig_slot, split_offset); 3951 3952 btrfs_set_header_nritems(leaf, nritems + 1); 3953 3954 /* write the data for the start of the original item */ 3955 write_extent_buffer(leaf, buf, 3956 btrfs_item_ptr_offset(leaf, path->slots[0]), 3957 split_offset); 3958 3959 /* write the data for the new item */ 3960 write_extent_buffer(leaf, buf + split_offset, 3961 btrfs_item_ptr_offset(leaf, slot), 3962 item_size - split_offset); 3963 btrfs_mark_buffer_dirty(leaf); 3964 3965 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3966 kfree(buf); 3967 return 0; 3968 } 3969 3970 /* 3971 * This function splits a single item into two items, 3972 * giving 'new_key' to the new item and splitting the 3973 * old one at split_offset (from the start of the item). 3974 * 3975 * The path may be released by this operation. After 3976 * the split, the path is pointing to the old item. The 3977 * new item is going to be in the same node as the old one. 3978 * 3979 * Note, the item being split must be smaller enough to live alone on 3980 * a tree block with room for one extra struct btrfs_item 3981 * 3982 * This allows us to split the item in place, keeping a lock on the 3983 * leaf the entire time. 3984 */ 3985 int btrfs_split_item(struct btrfs_trans_handle *trans, 3986 struct btrfs_root *root, 3987 struct btrfs_path *path, 3988 const struct btrfs_key *new_key, 3989 unsigned long split_offset) 3990 { 3991 int ret; 3992 ret = setup_leaf_for_split(trans, root, path, 3993 sizeof(struct btrfs_item)); 3994 if (ret) 3995 return ret; 3996 3997 ret = split_item(path, new_key, split_offset); 3998 return ret; 3999 } 4000 4001 /* 4002 * make the item pointed to by the path smaller. new_size indicates 4003 * how small to make it, and from_end tells us if we just chop bytes 4004 * off the end of the item or if we shift the item to chop bytes off 4005 * the front. 4006 */ 4007 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 4008 { 4009 int slot; 4010 struct extent_buffer *leaf; 4011 u32 nritems; 4012 unsigned int data_end; 4013 unsigned int old_data_start; 4014 unsigned int old_size; 4015 unsigned int size_diff; 4016 int i; 4017 struct btrfs_map_token token; 4018 4019 leaf = path->nodes[0]; 4020 slot = path->slots[0]; 4021 4022 old_size = btrfs_item_size(leaf, slot); 4023 if (old_size == new_size) 4024 return; 4025 4026 nritems = btrfs_header_nritems(leaf); 4027 data_end = leaf_data_end(leaf); 4028 4029 old_data_start = btrfs_item_offset(leaf, slot); 4030 4031 size_diff = old_size - new_size; 4032 4033 BUG_ON(slot < 0); 4034 BUG_ON(slot >= nritems); 4035 4036 /* 4037 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4038 */ 4039 /* first correct the data pointers */ 4040 btrfs_init_map_token(&token, leaf); 4041 for (i = slot; i < nritems; i++) { 4042 u32 ioff; 4043 4044 ioff = btrfs_token_item_offset(&token, i); 4045 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4046 } 4047 4048 /* shift the data */ 4049 if (from_end) { 4050 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4051 old_data_start + new_size - data_end); 4052 } else { 4053 struct btrfs_disk_key disk_key; 4054 u64 offset; 4055 4056 btrfs_item_key(leaf, &disk_key, slot); 4057 4058 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4059 unsigned long ptr; 4060 struct btrfs_file_extent_item *fi; 4061 4062 fi = btrfs_item_ptr(leaf, slot, 4063 struct btrfs_file_extent_item); 4064 fi = (struct btrfs_file_extent_item *)( 4065 (unsigned long)fi - size_diff); 4066 4067 if (btrfs_file_extent_type(leaf, fi) == 4068 BTRFS_FILE_EXTENT_INLINE) { 4069 ptr = btrfs_item_ptr_offset(leaf, slot); 4070 memmove_extent_buffer(leaf, ptr, 4071 (unsigned long)fi, 4072 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4073 } 4074 } 4075 4076 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4077 old_data_start - data_end); 4078 4079 offset = btrfs_disk_key_offset(&disk_key); 4080 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4081 btrfs_set_item_key(leaf, &disk_key, slot); 4082 if (slot == 0) 4083 fixup_low_keys(path, &disk_key, 1); 4084 } 4085 4086 btrfs_set_item_size(leaf, slot, new_size); 4087 btrfs_mark_buffer_dirty(leaf); 4088 4089 if (btrfs_leaf_free_space(leaf) < 0) { 4090 btrfs_print_leaf(leaf); 4091 BUG(); 4092 } 4093 } 4094 4095 /* 4096 * make the item pointed to by the path bigger, data_size is the added size. 4097 */ 4098 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4099 { 4100 int slot; 4101 struct extent_buffer *leaf; 4102 u32 nritems; 4103 unsigned int data_end; 4104 unsigned int old_data; 4105 unsigned int old_size; 4106 int i; 4107 struct btrfs_map_token token; 4108 4109 leaf = path->nodes[0]; 4110 4111 nritems = btrfs_header_nritems(leaf); 4112 data_end = leaf_data_end(leaf); 4113 4114 if (btrfs_leaf_free_space(leaf) < data_size) { 4115 btrfs_print_leaf(leaf); 4116 BUG(); 4117 } 4118 slot = path->slots[0]; 4119 old_data = btrfs_item_data_end(leaf, slot); 4120 4121 BUG_ON(slot < 0); 4122 if (slot >= nritems) { 4123 btrfs_print_leaf(leaf); 4124 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4125 slot, nritems); 4126 BUG(); 4127 } 4128 4129 /* 4130 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4131 */ 4132 /* first correct the data pointers */ 4133 btrfs_init_map_token(&token, leaf); 4134 for (i = slot; i < nritems; i++) { 4135 u32 ioff; 4136 4137 ioff = btrfs_token_item_offset(&token, i); 4138 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4139 } 4140 4141 /* shift the data */ 4142 memmove_leaf_data(leaf, data_end - data_size, data_end, 4143 old_data - data_end); 4144 4145 data_end = old_data; 4146 old_size = btrfs_item_size(leaf, slot); 4147 btrfs_set_item_size(leaf, slot, old_size + data_size); 4148 btrfs_mark_buffer_dirty(leaf); 4149 4150 if (btrfs_leaf_free_space(leaf) < 0) { 4151 btrfs_print_leaf(leaf); 4152 BUG(); 4153 } 4154 } 4155 4156 /* 4157 * Make space in the node before inserting one or more items. 4158 * 4159 * @root: root we are inserting items to 4160 * @path: points to the leaf/slot where we are going to insert new items 4161 * @batch: information about the batch of items to insert 4162 * 4163 * Main purpose is to save stack depth by doing the bulk of the work in a 4164 * function that doesn't call btrfs_search_slot 4165 */ 4166 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4167 const struct btrfs_item_batch *batch) 4168 { 4169 struct btrfs_fs_info *fs_info = root->fs_info; 4170 int i; 4171 u32 nritems; 4172 unsigned int data_end; 4173 struct btrfs_disk_key disk_key; 4174 struct extent_buffer *leaf; 4175 int slot; 4176 struct btrfs_map_token token; 4177 u32 total_size; 4178 4179 /* 4180 * Before anything else, update keys in the parent and other ancestors 4181 * if needed, then release the write locks on them, so that other tasks 4182 * can use them while we modify the leaf. 4183 */ 4184 if (path->slots[0] == 0) { 4185 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4186 fixup_low_keys(path, &disk_key, 1); 4187 } 4188 btrfs_unlock_up_safe(path, 1); 4189 4190 leaf = path->nodes[0]; 4191 slot = path->slots[0]; 4192 4193 nritems = btrfs_header_nritems(leaf); 4194 data_end = leaf_data_end(leaf); 4195 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4196 4197 if (btrfs_leaf_free_space(leaf) < total_size) { 4198 btrfs_print_leaf(leaf); 4199 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4200 total_size, btrfs_leaf_free_space(leaf)); 4201 BUG(); 4202 } 4203 4204 btrfs_init_map_token(&token, leaf); 4205 if (slot != nritems) { 4206 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4207 4208 if (old_data < data_end) { 4209 btrfs_print_leaf(leaf); 4210 btrfs_crit(fs_info, 4211 "item at slot %d with data offset %u beyond data end of leaf %u", 4212 slot, old_data, data_end); 4213 BUG(); 4214 } 4215 /* 4216 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4217 */ 4218 /* first correct the data pointers */ 4219 for (i = slot; i < nritems; i++) { 4220 u32 ioff; 4221 4222 ioff = btrfs_token_item_offset(&token, i); 4223 btrfs_set_token_item_offset(&token, i, 4224 ioff - batch->total_data_size); 4225 } 4226 /* shift the items */ 4227 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4228 4229 /* shift the data */ 4230 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4231 data_end, old_data - data_end); 4232 data_end = old_data; 4233 } 4234 4235 /* setup the item for the new data */ 4236 for (i = 0; i < batch->nr; i++) { 4237 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4238 btrfs_set_item_key(leaf, &disk_key, slot + i); 4239 data_end -= batch->data_sizes[i]; 4240 btrfs_set_token_item_offset(&token, slot + i, data_end); 4241 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4242 } 4243 4244 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4245 btrfs_mark_buffer_dirty(leaf); 4246 4247 if (btrfs_leaf_free_space(leaf) < 0) { 4248 btrfs_print_leaf(leaf); 4249 BUG(); 4250 } 4251 } 4252 4253 /* 4254 * Insert a new item into a leaf. 4255 * 4256 * @root: The root of the btree. 4257 * @path: A path pointing to the target leaf and slot. 4258 * @key: The key of the new item. 4259 * @data_size: The size of the data associated with the new key. 4260 */ 4261 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4262 struct btrfs_path *path, 4263 const struct btrfs_key *key, 4264 u32 data_size) 4265 { 4266 struct btrfs_item_batch batch; 4267 4268 batch.keys = key; 4269 batch.data_sizes = &data_size; 4270 batch.total_data_size = data_size; 4271 batch.nr = 1; 4272 4273 setup_items_for_insert(root, path, &batch); 4274 } 4275 4276 /* 4277 * Given a key and some data, insert items into the tree. 4278 * This does all the path init required, making room in the tree if needed. 4279 */ 4280 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4281 struct btrfs_root *root, 4282 struct btrfs_path *path, 4283 const struct btrfs_item_batch *batch) 4284 { 4285 int ret = 0; 4286 int slot; 4287 u32 total_size; 4288 4289 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4290 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4291 if (ret == 0) 4292 return -EEXIST; 4293 if (ret < 0) 4294 return ret; 4295 4296 slot = path->slots[0]; 4297 BUG_ON(slot < 0); 4298 4299 setup_items_for_insert(root, path, batch); 4300 return 0; 4301 } 4302 4303 /* 4304 * Given a key and some data, insert an item into the tree. 4305 * This does all the path init required, making room in the tree if needed. 4306 */ 4307 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4308 const struct btrfs_key *cpu_key, void *data, 4309 u32 data_size) 4310 { 4311 int ret = 0; 4312 struct btrfs_path *path; 4313 struct extent_buffer *leaf; 4314 unsigned long ptr; 4315 4316 path = btrfs_alloc_path(); 4317 if (!path) 4318 return -ENOMEM; 4319 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4320 if (!ret) { 4321 leaf = path->nodes[0]; 4322 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4323 write_extent_buffer(leaf, data, ptr, data_size); 4324 btrfs_mark_buffer_dirty(leaf); 4325 } 4326 btrfs_free_path(path); 4327 return ret; 4328 } 4329 4330 /* 4331 * This function duplicates an item, giving 'new_key' to the new item. 4332 * It guarantees both items live in the same tree leaf and the new item is 4333 * contiguous with the original item. 4334 * 4335 * This allows us to split a file extent in place, keeping a lock on the leaf 4336 * the entire time. 4337 */ 4338 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4339 struct btrfs_root *root, 4340 struct btrfs_path *path, 4341 const struct btrfs_key *new_key) 4342 { 4343 struct extent_buffer *leaf; 4344 int ret; 4345 u32 item_size; 4346 4347 leaf = path->nodes[0]; 4348 item_size = btrfs_item_size(leaf, path->slots[0]); 4349 ret = setup_leaf_for_split(trans, root, path, 4350 item_size + sizeof(struct btrfs_item)); 4351 if (ret) 4352 return ret; 4353 4354 path->slots[0]++; 4355 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4356 leaf = path->nodes[0]; 4357 memcpy_extent_buffer(leaf, 4358 btrfs_item_ptr_offset(leaf, path->slots[0]), 4359 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4360 item_size); 4361 return 0; 4362 } 4363 4364 /* 4365 * delete the pointer from a given node. 4366 * 4367 * the tree should have been previously balanced so the deletion does not 4368 * empty a node. 4369 * 4370 * This is exported for use inside btrfs-progs, don't un-export it. 4371 */ 4372 void btrfs_del_ptr(struct btrfs_root *root, struct btrfs_path *path, int level, 4373 int slot) 4374 { 4375 struct extent_buffer *parent = path->nodes[level]; 4376 u32 nritems; 4377 int ret; 4378 4379 nritems = btrfs_header_nritems(parent); 4380 if (slot != nritems - 1) { 4381 if (level) { 4382 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4383 slot + 1, nritems - slot - 1); 4384 BUG_ON(ret < 0); 4385 } 4386 memmove_extent_buffer(parent, 4387 btrfs_node_key_ptr_offset(parent, slot), 4388 btrfs_node_key_ptr_offset(parent, slot + 1), 4389 sizeof(struct btrfs_key_ptr) * 4390 (nritems - slot - 1)); 4391 } else if (level) { 4392 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4393 BTRFS_MOD_LOG_KEY_REMOVE); 4394 BUG_ON(ret < 0); 4395 } 4396 4397 nritems--; 4398 btrfs_set_header_nritems(parent, nritems); 4399 if (nritems == 0 && parent == root->node) { 4400 BUG_ON(btrfs_header_level(root->node) != 1); 4401 /* just turn the root into a leaf and break */ 4402 btrfs_set_header_level(root->node, 0); 4403 } else if (slot == 0) { 4404 struct btrfs_disk_key disk_key; 4405 4406 btrfs_node_key(parent, &disk_key, 0); 4407 fixup_low_keys(path, &disk_key, level + 1); 4408 } 4409 btrfs_mark_buffer_dirty(parent); 4410 } 4411 4412 /* 4413 * a helper function to delete the leaf pointed to by path->slots[1] and 4414 * path->nodes[1]. 4415 * 4416 * This deletes the pointer in path->nodes[1] and frees the leaf 4417 * block extent. zero is returned if it all worked out, < 0 otherwise. 4418 * 4419 * The path must have already been setup for deleting the leaf, including 4420 * all the proper balancing. path->nodes[1] must be locked. 4421 */ 4422 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4423 struct btrfs_root *root, 4424 struct btrfs_path *path, 4425 struct extent_buffer *leaf) 4426 { 4427 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4428 btrfs_del_ptr(root, path, 1, path->slots[1]); 4429 4430 /* 4431 * btrfs_free_extent is expensive, we want to make sure we 4432 * aren't holding any locks when we call it 4433 */ 4434 btrfs_unlock_up_safe(path, 0); 4435 4436 root_sub_used(root, leaf->len); 4437 4438 atomic_inc(&leaf->refs); 4439 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4440 free_extent_buffer_stale(leaf); 4441 } 4442 /* 4443 * delete the item at the leaf level in path. If that empties 4444 * the leaf, remove it from the tree 4445 */ 4446 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4447 struct btrfs_path *path, int slot, int nr) 4448 { 4449 struct btrfs_fs_info *fs_info = root->fs_info; 4450 struct extent_buffer *leaf; 4451 int ret = 0; 4452 int wret; 4453 u32 nritems; 4454 4455 leaf = path->nodes[0]; 4456 nritems = btrfs_header_nritems(leaf); 4457 4458 if (slot + nr != nritems) { 4459 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4460 const int data_end = leaf_data_end(leaf); 4461 struct btrfs_map_token token; 4462 u32 dsize = 0; 4463 int i; 4464 4465 for (i = 0; i < nr; i++) 4466 dsize += btrfs_item_size(leaf, slot + i); 4467 4468 memmove_leaf_data(leaf, data_end + dsize, data_end, 4469 last_off - data_end); 4470 4471 btrfs_init_map_token(&token, leaf); 4472 for (i = slot + nr; i < nritems; i++) { 4473 u32 ioff; 4474 4475 ioff = btrfs_token_item_offset(&token, i); 4476 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4477 } 4478 4479 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4480 } 4481 btrfs_set_header_nritems(leaf, nritems - nr); 4482 nritems -= nr; 4483 4484 /* delete the leaf if we've emptied it */ 4485 if (nritems == 0) { 4486 if (leaf == root->node) { 4487 btrfs_set_header_level(leaf, 0); 4488 } else { 4489 btrfs_clear_buffer_dirty(trans, leaf); 4490 btrfs_del_leaf(trans, root, path, leaf); 4491 } 4492 } else { 4493 int used = leaf_space_used(leaf, 0, nritems); 4494 if (slot == 0) { 4495 struct btrfs_disk_key disk_key; 4496 4497 btrfs_item_key(leaf, &disk_key, 0); 4498 fixup_low_keys(path, &disk_key, 1); 4499 } 4500 4501 /* 4502 * Try to delete the leaf if it is mostly empty. We do this by 4503 * trying to move all its items into its left and right neighbours. 4504 * If we can't move all the items, then we don't delete it - it's 4505 * not ideal, but future insertions might fill the leaf with more 4506 * items, or items from other leaves might be moved later into our 4507 * leaf due to deletions on those leaves. 4508 */ 4509 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4510 u32 min_push_space; 4511 4512 /* push_leaf_left fixes the path. 4513 * make sure the path still points to our leaf 4514 * for possible call to btrfs_del_ptr below 4515 */ 4516 slot = path->slots[1]; 4517 atomic_inc(&leaf->refs); 4518 /* 4519 * We want to be able to at least push one item to the 4520 * left neighbour leaf, and that's the first item. 4521 */ 4522 min_push_space = sizeof(struct btrfs_item) + 4523 btrfs_item_size(leaf, 0); 4524 wret = push_leaf_left(trans, root, path, 0, 4525 min_push_space, 1, (u32)-1); 4526 if (wret < 0 && wret != -ENOSPC) 4527 ret = wret; 4528 4529 if (path->nodes[0] == leaf && 4530 btrfs_header_nritems(leaf)) { 4531 /* 4532 * If we were not able to push all items from our 4533 * leaf to its left neighbour, then attempt to 4534 * either push all the remaining items to the 4535 * right neighbour or none. There's no advantage 4536 * in pushing only some items, instead of all, as 4537 * it's pointless to end up with a leaf having 4538 * too few items while the neighbours can be full 4539 * or nearly full. 4540 */ 4541 nritems = btrfs_header_nritems(leaf); 4542 min_push_space = leaf_space_used(leaf, 0, nritems); 4543 wret = push_leaf_right(trans, root, path, 0, 4544 min_push_space, 1, 0); 4545 if (wret < 0 && wret != -ENOSPC) 4546 ret = wret; 4547 } 4548 4549 if (btrfs_header_nritems(leaf) == 0) { 4550 path->slots[1] = slot; 4551 btrfs_del_leaf(trans, root, path, leaf); 4552 free_extent_buffer(leaf); 4553 ret = 0; 4554 } else { 4555 /* if we're still in the path, make sure 4556 * we're dirty. Otherwise, one of the 4557 * push_leaf functions must have already 4558 * dirtied this buffer 4559 */ 4560 if (path->nodes[0] == leaf) 4561 btrfs_mark_buffer_dirty(leaf); 4562 free_extent_buffer(leaf); 4563 } 4564 } else { 4565 btrfs_mark_buffer_dirty(leaf); 4566 } 4567 } 4568 return ret; 4569 } 4570 4571 /* 4572 * A helper function to walk down the tree starting at min_key, and looking 4573 * for nodes or leaves that are have a minimum transaction id. 4574 * This is used by the btree defrag code, and tree logging 4575 * 4576 * This does not cow, but it does stuff the starting key it finds back 4577 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4578 * key and get a writable path. 4579 * 4580 * This honors path->lowest_level to prevent descent past a given level 4581 * of the tree. 4582 * 4583 * min_trans indicates the oldest transaction that you are interested 4584 * in walking through. Any nodes or leaves older than min_trans are 4585 * skipped over (without reading them). 4586 * 4587 * returns zero if something useful was found, < 0 on error and 1 if there 4588 * was nothing in the tree that matched the search criteria. 4589 */ 4590 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4591 struct btrfs_path *path, 4592 u64 min_trans) 4593 { 4594 struct extent_buffer *cur; 4595 struct btrfs_key found_key; 4596 int slot; 4597 int sret; 4598 u32 nritems; 4599 int level; 4600 int ret = 1; 4601 int keep_locks = path->keep_locks; 4602 4603 ASSERT(!path->nowait); 4604 path->keep_locks = 1; 4605 again: 4606 cur = btrfs_read_lock_root_node(root); 4607 level = btrfs_header_level(cur); 4608 WARN_ON(path->nodes[level]); 4609 path->nodes[level] = cur; 4610 path->locks[level] = BTRFS_READ_LOCK; 4611 4612 if (btrfs_header_generation(cur) < min_trans) { 4613 ret = 1; 4614 goto out; 4615 } 4616 while (1) { 4617 nritems = btrfs_header_nritems(cur); 4618 level = btrfs_header_level(cur); 4619 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4620 if (sret < 0) { 4621 ret = sret; 4622 goto out; 4623 } 4624 4625 /* at the lowest level, we're done, setup the path and exit */ 4626 if (level == path->lowest_level) { 4627 if (slot >= nritems) 4628 goto find_next_key; 4629 ret = 0; 4630 path->slots[level] = slot; 4631 btrfs_item_key_to_cpu(cur, &found_key, slot); 4632 goto out; 4633 } 4634 if (sret && slot > 0) 4635 slot--; 4636 /* 4637 * check this node pointer against the min_trans parameters. 4638 * If it is too old, skip to the next one. 4639 */ 4640 while (slot < nritems) { 4641 u64 gen; 4642 4643 gen = btrfs_node_ptr_generation(cur, slot); 4644 if (gen < min_trans) { 4645 slot++; 4646 continue; 4647 } 4648 break; 4649 } 4650 find_next_key: 4651 /* 4652 * we didn't find a candidate key in this node, walk forward 4653 * and find another one 4654 */ 4655 if (slot >= nritems) { 4656 path->slots[level] = slot; 4657 sret = btrfs_find_next_key(root, path, min_key, level, 4658 min_trans); 4659 if (sret == 0) { 4660 btrfs_release_path(path); 4661 goto again; 4662 } else { 4663 goto out; 4664 } 4665 } 4666 /* save our key for returning back */ 4667 btrfs_node_key_to_cpu(cur, &found_key, slot); 4668 path->slots[level] = slot; 4669 if (level == path->lowest_level) { 4670 ret = 0; 4671 goto out; 4672 } 4673 cur = btrfs_read_node_slot(cur, slot); 4674 if (IS_ERR(cur)) { 4675 ret = PTR_ERR(cur); 4676 goto out; 4677 } 4678 4679 btrfs_tree_read_lock(cur); 4680 4681 path->locks[level - 1] = BTRFS_READ_LOCK; 4682 path->nodes[level - 1] = cur; 4683 unlock_up(path, level, 1, 0, NULL); 4684 } 4685 out: 4686 path->keep_locks = keep_locks; 4687 if (ret == 0) { 4688 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4689 memcpy(min_key, &found_key, sizeof(found_key)); 4690 } 4691 return ret; 4692 } 4693 4694 /* 4695 * this is similar to btrfs_next_leaf, but does not try to preserve 4696 * and fixup the path. It looks for and returns the next key in the 4697 * tree based on the current path and the min_trans parameters. 4698 * 4699 * 0 is returned if another key is found, < 0 if there are any errors 4700 * and 1 is returned if there are no higher keys in the tree 4701 * 4702 * path->keep_locks should be set to 1 on the search made before 4703 * calling this function. 4704 */ 4705 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4706 struct btrfs_key *key, int level, u64 min_trans) 4707 { 4708 int slot; 4709 struct extent_buffer *c; 4710 4711 WARN_ON(!path->keep_locks && !path->skip_locking); 4712 while (level < BTRFS_MAX_LEVEL) { 4713 if (!path->nodes[level]) 4714 return 1; 4715 4716 slot = path->slots[level] + 1; 4717 c = path->nodes[level]; 4718 next: 4719 if (slot >= btrfs_header_nritems(c)) { 4720 int ret; 4721 int orig_lowest; 4722 struct btrfs_key cur_key; 4723 if (level + 1 >= BTRFS_MAX_LEVEL || 4724 !path->nodes[level + 1]) 4725 return 1; 4726 4727 if (path->locks[level + 1] || path->skip_locking) { 4728 level++; 4729 continue; 4730 } 4731 4732 slot = btrfs_header_nritems(c) - 1; 4733 if (level == 0) 4734 btrfs_item_key_to_cpu(c, &cur_key, slot); 4735 else 4736 btrfs_node_key_to_cpu(c, &cur_key, slot); 4737 4738 orig_lowest = path->lowest_level; 4739 btrfs_release_path(path); 4740 path->lowest_level = level; 4741 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4742 0, 0); 4743 path->lowest_level = orig_lowest; 4744 if (ret < 0) 4745 return ret; 4746 4747 c = path->nodes[level]; 4748 slot = path->slots[level]; 4749 if (ret == 0) 4750 slot++; 4751 goto next; 4752 } 4753 4754 if (level == 0) 4755 btrfs_item_key_to_cpu(c, key, slot); 4756 else { 4757 u64 gen = btrfs_node_ptr_generation(c, slot); 4758 4759 if (gen < min_trans) { 4760 slot++; 4761 goto next; 4762 } 4763 btrfs_node_key_to_cpu(c, key, slot); 4764 } 4765 return 0; 4766 } 4767 return 1; 4768 } 4769 4770 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4771 u64 time_seq) 4772 { 4773 int slot; 4774 int level; 4775 struct extent_buffer *c; 4776 struct extent_buffer *next; 4777 struct btrfs_fs_info *fs_info = root->fs_info; 4778 struct btrfs_key key; 4779 bool need_commit_sem = false; 4780 u32 nritems; 4781 int ret; 4782 int i; 4783 4784 /* 4785 * The nowait semantics are used only for write paths, where we don't 4786 * use the tree mod log and sequence numbers. 4787 */ 4788 if (time_seq) 4789 ASSERT(!path->nowait); 4790 4791 nritems = btrfs_header_nritems(path->nodes[0]); 4792 if (nritems == 0) 4793 return 1; 4794 4795 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4796 again: 4797 level = 1; 4798 next = NULL; 4799 btrfs_release_path(path); 4800 4801 path->keep_locks = 1; 4802 4803 if (time_seq) { 4804 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4805 } else { 4806 if (path->need_commit_sem) { 4807 path->need_commit_sem = 0; 4808 need_commit_sem = true; 4809 if (path->nowait) { 4810 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4811 ret = -EAGAIN; 4812 goto done; 4813 } 4814 } else { 4815 down_read(&fs_info->commit_root_sem); 4816 } 4817 } 4818 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4819 } 4820 path->keep_locks = 0; 4821 4822 if (ret < 0) 4823 goto done; 4824 4825 nritems = btrfs_header_nritems(path->nodes[0]); 4826 /* 4827 * by releasing the path above we dropped all our locks. A balance 4828 * could have added more items next to the key that used to be 4829 * at the very end of the block. So, check again here and 4830 * advance the path if there are now more items available. 4831 */ 4832 if (nritems > 0 && path->slots[0] < nritems - 1) { 4833 if (ret == 0) 4834 path->slots[0]++; 4835 ret = 0; 4836 goto done; 4837 } 4838 /* 4839 * So the above check misses one case: 4840 * - after releasing the path above, someone has removed the item that 4841 * used to be at the very end of the block, and balance between leafs 4842 * gets another one with bigger key.offset to replace it. 4843 * 4844 * This one should be returned as well, or we can get leaf corruption 4845 * later(esp. in __btrfs_drop_extents()). 4846 * 4847 * And a bit more explanation about this check, 4848 * with ret > 0, the key isn't found, the path points to the slot 4849 * where it should be inserted, so the path->slots[0] item must be the 4850 * bigger one. 4851 */ 4852 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4853 ret = 0; 4854 goto done; 4855 } 4856 4857 while (level < BTRFS_MAX_LEVEL) { 4858 if (!path->nodes[level]) { 4859 ret = 1; 4860 goto done; 4861 } 4862 4863 slot = path->slots[level] + 1; 4864 c = path->nodes[level]; 4865 if (slot >= btrfs_header_nritems(c)) { 4866 level++; 4867 if (level == BTRFS_MAX_LEVEL) { 4868 ret = 1; 4869 goto done; 4870 } 4871 continue; 4872 } 4873 4874 4875 /* 4876 * Our current level is where we're going to start from, and to 4877 * make sure lockdep doesn't complain we need to drop our locks 4878 * and nodes from 0 to our current level. 4879 */ 4880 for (i = 0; i < level; i++) { 4881 if (path->locks[level]) { 4882 btrfs_tree_read_unlock(path->nodes[i]); 4883 path->locks[i] = 0; 4884 } 4885 free_extent_buffer(path->nodes[i]); 4886 path->nodes[i] = NULL; 4887 } 4888 4889 next = c; 4890 ret = read_block_for_search(root, path, &next, level, 4891 slot, &key); 4892 if (ret == -EAGAIN && !path->nowait) 4893 goto again; 4894 4895 if (ret < 0) { 4896 btrfs_release_path(path); 4897 goto done; 4898 } 4899 4900 if (!path->skip_locking) { 4901 ret = btrfs_try_tree_read_lock(next); 4902 if (!ret && path->nowait) { 4903 ret = -EAGAIN; 4904 goto done; 4905 } 4906 if (!ret && time_seq) { 4907 /* 4908 * If we don't get the lock, we may be racing 4909 * with push_leaf_left, holding that lock while 4910 * itself waiting for the leaf we've currently 4911 * locked. To solve this situation, we give up 4912 * on our lock and cycle. 4913 */ 4914 free_extent_buffer(next); 4915 btrfs_release_path(path); 4916 cond_resched(); 4917 goto again; 4918 } 4919 if (!ret) 4920 btrfs_tree_read_lock(next); 4921 } 4922 break; 4923 } 4924 path->slots[level] = slot; 4925 while (1) { 4926 level--; 4927 path->nodes[level] = next; 4928 path->slots[level] = 0; 4929 if (!path->skip_locking) 4930 path->locks[level] = BTRFS_READ_LOCK; 4931 if (!level) 4932 break; 4933 4934 ret = read_block_for_search(root, path, &next, level, 4935 0, &key); 4936 if (ret == -EAGAIN && !path->nowait) 4937 goto again; 4938 4939 if (ret < 0) { 4940 btrfs_release_path(path); 4941 goto done; 4942 } 4943 4944 if (!path->skip_locking) { 4945 if (path->nowait) { 4946 if (!btrfs_try_tree_read_lock(next)) { 4947 ret = -EAGAIN; 4948 goto done; 4949 } 4950 } else { 4951 btrfs_tree_read_lock(next); 4952 } 4953 } 4954 } 4955 ret = 0; 4956 done: 4957 unlock_up(path, 0, 1, 0, NULL); 4958 if (need_commit_sem) { 4959 int ret2; 4960 4961 path->need_commit_sem = 1; 4962 ret2 = finish_need_commit_sem_search(path); 4963 up_read(&fs_info->commit_root_sem); 4964 if (ret2) 4965 ret = ret2; 4966 } 4967 4968 return ret; 4969 } 4970 4971 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 4972 { 4973 path->slots[0]++; 4974 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 4975 return btrfs_next_old_leaf(root, path, time_seq); 4976 return 0; 4977 } 4978 4979 /* 4980 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4981 * searching until it gets past min_objectid or finds an item of 'type' 4982 * 4983 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4984 */ 4985 int btrfs_previous_item(struct btrfs_root *root, 4986 struct btrfs_path *path, u64 min_objectid, 4987 int type) 4988 { 4989 struct btrfs_key found_key; 4990 struct extent_buffer *leaf; 4991 u32 nritems; 4992 int ret; 4993 4994 while (1) { 4995 if (path->slots[0] == 0) { 4996 ret = btrfs_prev_leaf(root, path); 4997 if (ret != 0) 4998 return ret; 4999 } else { 5000 path->slots[0]--; 5001 } 5002 leaf = path->nodes[0]; 5003 nritems = btrfs_header_nritems(leaf); 5004 if (nritems == 0) 5005 return 1; 5006 if (path->slots[0] == nritems) 5007 path->slots[0]--; 5008 5009 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5010 if (found_key.objectid < min_objectid) 5011 break; 5012 if (found_key.type == type) 5013 return 0; 5014 if (found_key.objectid == min_objectid && 5015 found_key.type < type) 5016 break; 5017 } 5018 return 1; 5019 } 5020 5021 /* 5022 * search in extent tree to find a previous Metadata/Data extent item with 5023 * min objecitd. 5024 * 5025 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5026 */ 5027 int btrfs_previous_extent_item(struct btrfs_root *root, 5028 struct btrfs_path *path, u64 min_objectid) 5029 { 5030 struct btrfs_key found_key; 5031 struct extent_buffer *leaf; 5032 u32 nritems; 5033 int ret; 5034 5035 while (1) { 5036 if (path->slots[0] == 0) { 5037 ret = btrfs_prev_leaf(root, path); 5038 if (ret != 0) 5039 return ret; 5040 } else { 5041 path->slots[0]--; 5042 } 5043 leaf = path->nodes[0]; 5044 nritems = btrfs_header_nritems(leaf); 5045 if (nritems == 0) 5046 return 1; 5047 if (path->slots[0] == nritems) 5048 path->slots[0]--; 5049 5050 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5051 if (found_key.objectid < min_objectid) 5052 break; 5053 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5054 found_key.type == BTRFS_METADATA_ITEM_KEY) 5055 return 0; 5056 if (found_key.objectid == min_objectid && 5057 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5058 break; 5059 } 5060 return 1; 5061 } 5062 5063 int __init btrfs_ctree_init(void) 5064 { 5065 btrfs_path_cachep = kmem_cache_create("btrfs_path", 5066 sizeof(struct btrfs_path), 0, 5067 SLAB_MEM_SPREAD, NULL); 5068 if (!btrfs_path_cachep) 5069 return -ENOMEM; 5070 return 0; 5071 } 5072 5073 void __cold btrfs_ctree_exit(void) 5074 { 5075 kmem_cache_destroy(btrfs_path_cachep); 5076 } 5077