1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 41 int level, int slot); 42 43 static const struct btrfs_csums { 44 u16 size; 45 const char name[10]; 46 const char driver[12]; 47 } btrfs_csums[] = { 48 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 49 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 50 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 51 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 52 .driver = "blake2b-256" }, 53 }; 54 55 /* 56 * The leaf data grows from end-to-front in the node. this returns the address 57 * of the start of the last item, which is the stop of the leaf data stack. 58 */ 59 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 60 { 61 u32 nr = btrfs_header_nritems(leaf); 62 63 if (nr == 0) 64 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 65 return btrfs_item_offset(leaf, nr - 1); 66 } 67 68 /* 69 * Move data in a @leaf (using memmove, safe for overlapping ranges). 70 * 71 * @leaf: leaf that we're doing a memmove on 72 * @dst_offset: item data offset we're moving to 73 * @src_offset: item data offset were' moving from 74 * @len: length of the data we're moving 75 * 76 * Wrapper around memmove_extent_buffer() that takes into account the header on 77 * the leaf. The btrfs_item offset's start directly after the header, so we 78 * have to adjust any offsets to account for the header in the leaf. This 79 * handles that math to simplify the callers. 80 */ 81 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 82 unsigned long dst_offset, 83 unsigned long src_offset, 84 unsigned long len) 85 { 86 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 87 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 88 } 89 90 /* 91 * Copy item data from @src into @dst at the given @offset. 92 * 93 * @dst: destination leaf that we're copying into 94 * @src: source leaf that we're copying from 95 * @dst_offset: item data offset we're copying to 96 * @src_offset: item data offset were' copying from 97 * @len: length of the data we're copying 98 * 99 * Wrapper around copy_extent_buffer() that takes into account the header on 100 * the leaf. The btrfs_item offset's start directly after the header, so we 101 * have to adjust any offsets to account for the header in the leaf. This 102 * handles that math to simplify the callers. 103 */ 104 static inline void copy_leaf_data(const struct extent_buffer *dst, 105 const struct extent_buffer *src, 106 unsigned long dst_offset, 107 unsigned long src_offset, unsigned long len) 108 { 109 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 110 btrfs_item_nr_offset(src, 0) + src_offset, len); 111 } 112 113 /* 114 * Move items in a @leaf (using memmove). 115 * 116 * @dst: destination leaf for the items 117 * @dst_item: the item nr we're copying into 118 * @src_item: the item nr we're copying from 119 * @nr_items: the number of items to copy 120 * 121 * Wrapper around memmove_extent_buffer() that does the math to get the 122 * appropriate offsets into the leaf from the item numbers. 123 */ 124 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 125 int dst_item, int src_item, int nr_items) 126 { 127 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 128 btrfs_item_nr_offset(leaf, src_item), 129 nr_items * sizeof(struct btrfs_item)); 130 } 131 132 /* 133 * Copy items from @src into @dst at the given @offset. 134 * 135 * @dst: destination leaf for the items 136 * @src: source leaf for the items 137 * @dst_item: the item nr we're copying into 138 * @src_item: the item nr we're copying from 139 * @nr_items: the number of items to copy 140 * 141 * Wrapper around copy_extent_buffer() that does the math to get the 142 * appropriate offsets into the leaf from the item numbers. 143 */ 144 static inline void copy_leaf_items(const struct extent_buffer *dst, 145 const struct extent_buffer *src, 146 int dst_item, int src_item, int nr_items) 147 { 148 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 149 btrfs_item_nr_offset(src, src_item), 150 nr_items * sizeof(struct btrfs_item)); 151 } 152 153 int btrfs_super_csum_size(const struct btrfs_super_block *s) 154 { 155 u16 t = btrfs_super_csum_type(s); 156 /* 157 * csum type is validated at mount time 158 */ 159 return btrfs_csums[t].size; 160 } 161 162 const char *btrfs_super_csum_name(u16 csum_type) 163 { 164 /* csum type is validated at mount time */ 165 return btrfs_csums[csum_type].name; 166 } 167 168 /* 169 * Return driver name if defined, otherwise the name that's also a valid driver 170 * name 171 */ 172 const char *btrfs_super_csum_driver(u16 csum_type) 173 { 174 /* csum type is validated at mount time */ 175 return btrfs_csums[csum_type].driver[0] ? 176 btrfs_csums[csum_type].driver : 177 btrfs_csums[csum_type].name; 178 } 179 180 size_t __attribute_const__ btrfs_get_num_csums(void) 181 { 182 return ARRAY_SIZE(btrfs_csums); 183 } 184 185 struct btrfs_path *btrfs_alloc_path(void) 186 { 187 might_sleep(); 188 189 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 190 } 191 192 /* this also releases the path */ 193 void btrfs_free_path(struct btrfs_path *p) 194 { 195 if (!p) 196 return; 197 btrfs_release_path(p); 198 kmem_cache_free(btrfs_path_cachep, p); 199 } 200 201 /* 202 * path release drops references on the extent buffers in the path 203 * and it drops any locks held by this path 204 * 205 * It is safe to call this on paths that no locks or extent buffers held. 206 */ 207 noinline void btrfs_release_path(struct btrfs_path *p) 208 { 209 int i; 210 211 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 212 p->slots[i] = 0; 213 if (!p->nodes[i]) 214 continue; 215 if (p->locks[i]) { 216 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 217 p->locks[i] = 0; 218 } 219 free_extent_buffer(p->nodes[i]); 220 p->nodes[i] = NULL; 221 } 222 } 223 224 /* 225 * We want the transaction abort to print stack trace only for errors where the 226 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 227 * caused by external factors. 228 */ 229 bool __cold abort_should_print_stack(int errno) 230 { 231 switch (errno) { 232 case -EIO: 233 case -EROFS: 234 case -ENOMEM: 235 return false; 236 } 237 return true; 238 } 239 240 /* 241 * safely gets a reference on the root node of a tree. A lock 242 * is not taken, so a concurrent writer may put a different node 243 * at the root of the tree. See btrfs_lock_root_node for the 244 * looping required. 245 * 246 * The extent buffer returned by this has a reference taken, so 247 * it won't disappear. It may stop being the root of the tree 248 * at any time because there are no locks held. 249 */ 250 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 251 { 252 struct extent_buffer *eb; 253 254 while (1) { 255 rcu_read_lock(); 256 eb = rcu_dereference(root->node); 257 258 /* 259 * RCU really hurts here, we could free up the root node because 260 * it was COWed but we may not get the new root node yet so do 261 * the inc_not_zero dance and if it doesn't work then 262 * synchronize_rcu and try again. 263 */ 264 if (atomic_inc_not_zero(&eb->refs)) { 265 rcu_read_unlock(); 266 break; 267 } 268 rcu_read_unlock(); 269 synchronize_rcu(); 270 } 271 return eb; 272 } 273 274 /* 275 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 276 * just get put onto a simple dirty list. Transaction walks this list to make 277 * sure they get properly updated on disk. 278 */ 279 static void add_root_to_dirty_list(struct btrfs_root *root) 280 { 281 struct btrfs_fs_info *fs_info = root->fs_info; 282 283 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 284 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 285 return; 286 287 spin_lock(&fs_info->trans_lock); 288 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 289 /* Want the extent tree to be the last on the list */ 290 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 291 list_move_tail(&root->dirty_list, 292 &fs_info->dirty_cowonly_roots); 293 else 294 list_move(&root->dirty_list, 295 &fs_info->dirty_cowonly_roots); 296 } 297 spin_unlock(&fs_info->trans_lock); 298 } 299 300 /* 301 * used by snapshot creation to make a copy of a root for a tree with 302 * a given objectid. The buffer with the new root node is returned in 303 * cow_ret, and this func returns zero on success or a negative error code. 304 */ 305 int btrfs_copy_root(struct btrfs_trans_handle *trans, 306 struct btrfs_root *root, 307 struct extent_buffer *buf, 308 struct extent_buffer **cow_ret, u64 new_root_objectid) 309 { 310 struct btrfs_fs_info *fs_info = root->fs_info; 311 struct extent_buffer *cow; 312 int ret = 0; 313 int level; 314 struct btrfs_disk_key disk_key; 315 316 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 317 trans->transid != fs_info->running_transaction->transid); 318 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 319 trans->transid != root->last_trans); 320 321 level = btrfs_header_level(buf); 322 if (level == 0) 323 btrfs_item_key(buf, &disk_key, 0); 324 else 325 btrfs_node_key(buf, &disk_key, 0); 326 327 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 328 &disk_key, level, buf->start, 0, 329 BTRFS_NESTING_NEW_ROOT); 330 if (IS_ERR(cow)) 331 return PTR_ERR(cow); 332 333 copy_extent_buffer_full(cow, buf); 334 btrfs_set_header_bytenr(cow, cow->start); 335 btrfs_set_header_generation(cow, trans->transid); 336 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 337 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 338 BTRFS_HEADER_FLAG_RELOC); 339 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 340 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 341 else 342 btrfs_set_header_owner(cow, new_root_objectid); 343 344 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 345 346 WARN_ON(btrfs_header_generation(buf) > trans->transid); 347 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 348 ret = btrfs_inc_ref(trans, root, cow, 1); 349 else 350 ret = btrfs_inc_ref(trans, root, cow, 0); 351 if (ret) { 352 btrfs_tree_unlock(cow); 353 free_extent_buffer(cow); 354 btrfs_abort_transaction(trans, ret); 355 return ret; 356 } 357 358 btrfs_mark_buffer_dirty(cow); 359 *cow_ret = cow; 360 return 0; 361 } 362 363 /* 364 * check if the tree block can be shared by multiple trees 365 */ 366 int btrfs_block_can_be_shared(struct btrfs_root *root, 367 struct extent_buffer *buf) 368 { 369 /* 370 * Tree blocks not in shareable trees and tree roots are never shared. 371 * If a block was allocated after the last snapshot and the block was 372 * not allocated by tree relocation, we know the block is not shared. 373 */ 374 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 375 buf != root->node && buf != root->commit_root && 376 (btrfs_header_generation(buf) <= 377 btrfs_root_last_snapshot(&root->root_item) || 378 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 379 return 1; 380 381 return 0; 382 } 383 384 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 385 struct btrfs_root *root, 386 struct extent_buffer *buf, 387 struct extent_buffer *cow, 388 int *last_ref) 389 { 390 struct btrfs_fs_info *fs_info = root->fs_info; 391 u64 refs; 392 u64 owner; 393 u64 flags; 394 u64 new_flags = 0; 395 int ret; 396 397 /* 398 * Backrefs update rules: 399 * 400 * Always use full backrefs for extent pointers in tree block 401 * allocated by tree relocation. 402 * 403 * If a shared tree block is no longer referenced by its owner 404 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 405 * use full backrefs for extent pointers in tree block. 406 * 407 * If a tree block is been relocating 408 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 409 * use full backrefs for extent pointers in tree block. 410 * The reason for this is some operations (such as drop tree) 411 * are only allowed for blocks use full backrefs. 412 */ 413 414 if (btrfs_block_can_be_shared(root, buf)) { 415 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 416 btrfs_header_level(buf), 1, 417 &refs, &flags); 418 if (ret) 419 return ret; 420 if (refs == 0) { 421 ret = -EROFS; 422 btrfs_handle_fs_error(fs_info, ret, NULL); 423 return ret; 424 } 425 } else { 426 refs = 1; 427 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 428 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 429 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 430 else 431 flags = 0; 432 } 433 434 owner = btrfs_header_owner(buf); 435 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 436 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 437 438 if (refs > 1) { 439 if ((owner == root->root_key.objectid || 440 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 441 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 442 ret = btrfs_inc_ref(trans, root, buf, 1); 443 if (ret) 444 return ret; 445 446 if (root->root_key.objectid == 447 BTRFS_TREE_RELOC_OBJECTID) { 448 ret = btrfs_dec_ref(trans, root, buf, 0); 449 if (ret) 450 return ret; 451 ret = btrfs_inc_ref(trans, root, cow, 1); 452 if (ret) 453 return ret; 454 } 455 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 456 } else { 457 458 if (root->root_key.objectid == 459 BTRFS_TREE_RELOC_OBJECTID) 460 ret = btrfs_inc_ref(trans, root, cow, 1); 461 else 462 ret = btrfs_inc_ref(trans, root, cow, 0); 463 if (ret) 464 return ret; 465 } 466 if (new_flags != 0) { 467 int level = btrfs_header_level(buf); 468 469 ret = btrfs_set_disk_extent_flags(trans, buf, 470 new_flags, level); 471 if (ret) 472 return ret; 473 } 474 } else { 475 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 476 if (root->root_key.objectid == 477 BTRFS_TREE_RELOC_OBJECTID) 478 ret = btrfs_inc_ref(trans, root, cow, 1); 479 else 480 ret = btrfs_inc_ref(trans, root, cow, 0); 481 if (ret) 482 return ret; 483 ret = btrfs_dec_ref(trans, root, buf, 1); 484 if (ret) 485 return ret; 486 } 487 btrfs_clear_buffer_dirty(trans, buf); 488 *last_ref = 1; 489 } 490 return 0; 491 } 492 493 /* 494 * does the dirty work in cow of a single block. The parent block (if 495 * supplied) is updated to point to the new cow copy. The new buffer is marked 496 * dirty and returned locked. If you modify the block it needs to be marked 497 * dirty again. 498 * 499 * search_start -- an allocation hint for the new block 500 * 501 * empty_size -- a hint that you plan on doing more cow. This is the size in 502 * bytes the allocator should try to find free next to the block it returns. 503 * This is just a hint and may be ignored by the allocator. 504 */ 505 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 506 struct btrfs_root *root, 507 struct extent_buffer *buf, 508 struct extent_buffer *parent, int parent_slot, 509 struct extent_buffer **cow_ret, 510 u64 search_start, u64 empty_size, 511 enum btrfs_lock_nesting nest) 512 { 513 struct btrfs_fs_info *fs_info = root->fs_info; 514 struct btrfs_disk_key disk_key; 515 struct extent_buffer *cow; 516 int level, ret; 517 int last_ref = 0; 518 int unlock_orig = 0; 519 u64 parent_start = 0; 520 521 if (*cow_ret == buf) 522 unlock_orig = 1; 523 524 btrfs_assert_tree_write_locked(buf); 525 526 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 527 trans->transid != fs_info->running_transaction->transid); 528 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 529 trans->transid != root->last_trans); 530 531 level = btrfs_header_level(buf); 532 533 if (level == 0) 534 btrfs_item_key(buf, &disk_key, 0); 535 else 536 btrfs_node_key(buf, &disk_key, 0); 537 538 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 539 parent_start = parent->start; 540 541 cow = btrfs_alloc_tree_block(trans, root, parent_start, 542 root->root_key.objectid, &disk_key, level, 543 search_start, empty_size, nest); 544 if (IS_ERR(cow)) 545 return PTR_ERR(cow); 546 547 /* cow is set to blocking by btrfs_init_new_buffer */ 548 549 copy_extent_buffer_full(cow, buf); 550 btrfs_set_header_bytenr(cow, cow->start); 551 btrfs_set_header_generation(cow, trans->transid); 552 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 553 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 554 BTRFS_HEADER_FLAG_RELOC); 555 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 556 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 557 else 558 btrfs_set_header_owner(cow, root->root_key.objectid); 559 560 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 561 562 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 563 if (ret) { 564 btrfs_tree_unlock(cow); 565 free_extent_buffer(cow); 566 btrfs_abort_transaction(trans, ret); 567 return ret; 568 } 569 570 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 571 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 572 if (ret) { 573 btrfs_tree_unlock(cow); 574 free_extent_buffer(cow); 575 btrfs_abort_transaction(trans, ret); 576 return ret; 577 } 578 } 579 580 if (buf == root->node) { 581 WARN_ON(parent && parent != buf); 582 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 583 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 584 parent_start = buf->start; 585 586 atomic_inc(&cow->refs); 587 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 588 BUG_ON(ret < 0); 589 rcu_assign_pointer(root->node, cow); 590 591 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 592 parent_start, last_ref); 593 free_extent_buffer(buf); 594 add_root_to_dirty_list(root); 595 } else { 596 WARN_ON(trans->transid != btrfs_header_generation(parent)); 597 btrfs_tree_mod_log_insert_key(parent, parent_slot, 598 BTRFS_MOD_LOG_KEY_REPLACE); 599 btrfs_set_node_blockptr(parent, parent_slot, 600 cow->start); 601 btrfs_set_node_ptr_generation(parent, parent_slot, 602 trans->transid); 603 btrfs_mark_buffer_dirty(parent); 604 if (last_ref) { 605 ret = btrfs_tree_mod_log_free_eb(buf); 606 if (ret) { 607 btrfs_tree_unlock(cow); 608 free_extent_buffer(cow); 609 btrfs_abort_transaction(trans, ret); 610 return ret; 611 } 612 } 613 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 614 parent_start, last_ref); 615 } 616 if (unlock_orig) 617 btrfs_tree_unlock(buf); 618 free_extent_buffer_stale(buf); 619 btrfs_mark_buffer_dirty(cow); 620 *cow_ret = cow; 621 return 0; 622 } 623 624 static inline int should_cow_block(struct btrfs_trans_handle *trans, 625 struct btrfs_root *root, 626 struct extent_buffer *buf) 627 { 628 if (btrfs_is_testing(root->fs_info)) 629 return 0; 630 631 /* Ensure we can see the FORCE_COW bit */ 632 smp_mb__before_atomic(); 633 634 /* 635 * We do not need to cow a block if 636 * 1) this block is not created or changed in this transaction; 637 * 2) this block does not belong to TREE_RELOC tree; 638 * 3) the root is not forced COW. 639 * 640 * What is forced COW: 641 * when we create snapshot during committing the transaction, 642 * after we've finished copying src root, we must COW the shared 643 * block to ensure the metadata consistency. 644 */ 645 if (btrfs_header_generation(buf) == trans->transid && 646 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 647 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 648 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 649 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 650 return 0; 651 return 1; 652 } 653 654 /* 655 * cows a single block, see __btrfs_cow_block for the real work. 656 * This version of it has extra checks so that a block isn't COWed more than 657 * once per transaction, as long as it hasn't been written yet 658 */ 659 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 660 struct btrfs_root *root, struct extent_buffer *buf, 661 struct extent_buffer *parent, int parent_slot, 662 struct extent_buffer **cow_ret, 663 enum btrfs_lock_nesting nest) 664 { 665 struct btrfs_fs_info *fs_info = root->fs_info; 666 u64 search_start; 667 int ret; 668 669 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 670 btrfs_err(fs_info, 671 "COW'ing blocks on a fs root that's being dropped"); 672 673 if (trans->transaction != fs_info->running_transaction) 674 WARN(1, KERN_CRIT "trans %llu running %llu\n", 675 trans->transid, 676 fs_info->running_transaction->transid); 677 678 if (trans->transid != fs_info->generation) 679 WARN(1, KERN_CRIT "trans %llu running %llu\n", 680 trans->transid, fs_info->generation); 681 682 if (!should_cow_block(trans, root, buf)) { 683 *cow_ret = buf; 684 return 0; 685 } 686 687 search_start = buf->start & ~((u64)SZ_1G - 1); 688 689 /* 690 * Before CoWing this block for later modification, check if it's 691 * the subtree root and do the delayed subtree trace if needed. 692 * 693 * Also We don't care about the error, as it's handled internally. 694 */ 695 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 696 ret = __btrfs_cow_block(trans, root, buf, parent, 697 parent_slot, cow_ret, search_start, 0, nest); 698 699 trace_btrfs_cow_block(root, buf, *cow_ret); 700 701 return ret; 702 } 703 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 704 705 /* 706 * helper function for defrag to decide if two blocks pointed to by a 707 * node are actually close by 708 */ 709 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 710 { 711 if (blocknr < other && other - (blocknr + blocksize) < 32768) 712 return 1; 713 if (blocknr > other && blocknr - (other + blocksize) < 32768) 714 return 1; 715 return 0; 716 } 717 718 #ifdef __LITTLE_ENDIAN 719 720 /* 721 * Compare two keys, on little-endian the disk order is same as CPU order and 722 * we can avoid the conversion. 723 */ 724 static int comp_keys(const struct btrfs_disk_key *disk_key, 725 const struct btrfs_key *k2) 726 { 727 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 728 729 return btrfs_comp_cpu_keys(k1, k2); 730 } 731 732 #else 733 734 /* 735 * compare two keys in a memcmp fashion 736 */ 737 static int comp_keys(const struct btrfs_disk_key *disk, 738 const struct btrfs_key *k2) 739 { 740 struct btrfs_key k1; 741 742 btrfs_disk_key_to_cpu(&k1, disk); 743 744 return btrfs_comp_cpu_keys(&k1, k2); 745 } 746 #endif 747 748 /* 749 * same as comp_keys only with two btrfs_key's 750 */ 751 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 752 { 753 if (k1->objectid > k2->objectid) 754 return 1; 755 if (k1->objectid < k2->objectid) 756 return -1; 757 if (k1->type > k2->type) 758 return 1; 759 if (k1->type < k2->type) 760 return -1; 761 if (k1->offset > k2->offset) 762 return 1; 763 if (k1->offset < k2->offset) 764 return -1; 765 return 0; 766 } 767 768 /* 769 * this is used by the defrag code to go through all the 770 * leaves pointed to by a node and reallocate them so that 771 * disk order is close to key order 772 */ 773 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 774 struct btrfs_root *root, struct extent_buffer *parent, 775 int start_slot, u64 *last_ret, 776 struct btrfs_key *progress) 777 { 778 struct btrfs_fs_info *fs_info = root->fs_info; 779 struct extent_buffer *cur; 780 u64 blocknr; 781 u64 search_start = *last_ret; 782 u64 last_block = 0; 783 u64 other; 784 u32 parent_nritems; 785 int end_slot; 786 int i; 787 int err = 0; 788 u32 blocksize; 789 int progress_passed = 0; 790 struct btrfs_disk_key disk_key; 791 792 WARN_ON(trans->transaction != fs_info->running_transaction); 793 WARN_ON(trans->transid != fs_info->generation); 794 795 parent_nritems = btrfs_header_nritems(parent); 796 blocksize = fs_info->nodesize; 797 end_slot = parent_nritems - 1; 798 799 if (parent_nritems <= 1) 800 return 0; 801 802 for (i = start_slot; i <= end_slot; i++) { 803 int close = 1; 804 805 btrfs_node_key(parent, &disk_key, i); 806 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 807 continue; 808 809 progress_passed = 1; 810 blocknr = btrfs_node_blockptr(parent, i); 811 if (last_block == 0) 812 last_block = blocknr; 813 814 if (i > 0) { 815 other = btrfs_node_blockptr(parent, i - 1); 816 close = close_blocks(blocknr, other, blocksize); 817 } 818 if (!close && i < end_slot) { 819 other = btrfs_node_blockptr(parent, i + 1); 820 close = close_blocks(blocknr, other, blocksize); 821 } 822 if (close) { 823 last_block = blocknr; 824 continue; 825 } 826 827 cur = btrfs_read_node_slot(parent, i); 828 if (IS_ERR(cur)) 829 return PTR_ERR(cur); 830 if (search_start == 0) 831 search_start = last_block; 832 833 btrfs_tree_lock(cur); 834 err = __btrfs_cow_block(trans, root, cur, parent, i, 835 &cur, search_start, 836 min(16 * blocksize, 837 (end_slot - i) * blocksize), 838 BTRFS_NESTING_COW); 839 if (err) { 840 btrfs_tree_unlock(cur); 841 free_extent_buffer(cur); 842 break; 843 } 844 search_start = cur->start; 845 last_block = cur->start; 846 *last_ret = search_start; 847 btrfs_tree_unlock(cur); 848 free_extent_buffer(cur); 849 } 850 return err; 851 } 852 853 /* 854 * Search for a key in the given extent_buffer. 855 * 856 * The lower boundary for the search is specified by the slot number @first_slot. 857 * Use a value of 0 to search over the whole extent buffer. Works for both 858 * leaves and nodes. 859 * 860 * The slot in the extent buffer is returned via @slot. If the key exists in the 861 * extent buffer, then @slot will point to the slot where the key is, otherwise 862 * it points to the slot where you would insert the key. 863 * 864 * Slot may point to the total number of items (i.e. one position beyond the last 865 * key) if the key is bigger than the last key in the extent buffer. 866 */ 867 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 868 const struct btrfs_key *key, int *slot) 869 { 870 unsigned long p; 871 int item_size; 872 /* 873 * Use unsigned types for the low and high slots, so that we get a more 874 * efficient division in the search loop below. 875 */ 876 u32 low = first_slot; 877 u32 high = btrfs_header_nritems(eb); 878 int ret; 879 const int key_size = sizeof(struct btrfs_disk_key); 880 881 if (unlikely(low > high)) { 882 btrfs_err(eb->fs_info, 883 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 884 __func__, low, high, eb->start, 885 btrfs_header_owner(eb), btrfs_header_level(eb)); 886 return -EINVAL; 887 } 888 889 if (btrfs_header_level(eb) == 0) { 890 p = offsetof(struct btrfs_leaf, items); 891 item_size = sizeof(struct btrfs_item); 892 } else { 893 p = offsetof(struct btrfs_node, ptrs); 894 item_size = sizeof(struct btrfs_key_ptr); 895 } 896 897 while (low < high) { 898 unsigned long oip; 899 unsigned long offset; 900 struct btrfs_disk_key *tmp; 901 struct btrfs_disk_key unaligned; 902 int mid; 903 904 mid = (low + high) / 2; 905 offset = p + mid * item_size; 906 oip = offset_in_page(offset); 907 908 if (oip + key_size <= PAGE_SIZE) { 909 const unsigned long idx = get_eb_page_index(offset); 910 char *kaddr = page_address(eb->pages[idx]); 911 912 oip = get_eb_offset_in_page(eb, offset); 913 tmp = (struct btrfs_disk_key *)(kaddr + oip); 914 } else { 915 read_extent_buffer(eb, &unaligned, offset, key_size); 916 tmp = &unaligned; 917 } 918 919 ret = comp_keys(tmp, key); 920 921 if (ret < 0) 922 low = mid + 1; 923 else if (ret > 0) 924 high = mid; 925 else { 926 *slot = mid; 927 return 0; 928 } 929 } 930 *slot = low; 931 return 1; 932 } 933 934 static void root_add_used(struct btrfs_root *root, u32 size) 935 { 936 spin_lock(&root->accounting_lock); 937 btrfs_set_root_used(&root->root_item, 938 btrfs_root_used(&root->root_item) + size); 939 spin_unlock(&root->accounting_lock); 940 } 941 942 static void root_sub_used(struct btrfs_root *root, u32 size) 943 { 944 spin_lock(&root->accounting_lock); 945 btrfs_set_root_used(&root->root_item, 946 btrfs_root_used(&root->root_item) - size); 947 spin_unlock(&root->accounting_lock); 948 } 949 950 /* given a node and slot number, this reads the blocks it points to. The 951 * extent buffer is returned with a reference taken (but unlocked). 952 */ 953 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 954 int slot) 955 { 956 int level = btrfs_header_level(parent); 957 struct btrfs_tree_parent_check check = { 0 }; 958 struct extent_buffer *eb; 959 960 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 961 return ERR_PTR(-ENOENT); 962 963 ASSERT(level); 964 965 check.level = level - 1; 966 check.transid = btrfs_node_ptr_generation(parent, slot); 967 check.owner_root = btrfs_header_owner(parent); 968 check.has_first_key = true; 969 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 970 971 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 972 &check); 973 if (IS_ERR(eb)) 974 return eb; 975 if (!extent_buffer_uptodate(eb)) { 976 free_extent_buffer(eb); 977 return ERR_PTR(-EIO); 978 } 979 980 return eb; 981 } 982 983 /* 984 * node level balancing, used to make sure nodes are in proper order for 985 * item deletion. We balance from the top down, so we have to make sure 986 * that a deletion won't leave an node completely empty later on. 987 */ 988 static noinline int balance_level(struct btrfs_trans_handle *trans, 989 struct btrfs_root *root, 990 struct btrfs_path *path, int level) 991 { 992 struct btrfs_fs_info *fs_info = root->fs_info; 993 struct extent_buffer *right = NULL; 994 struct extent_buffer *mid; 995 struct extent_buffer *left = NULL; 996 struct extent_buffer *parent = NULL; 997 int ret = 0; 998 int wret; 999 int pslot; 1000 int orig_slot = path->slots[level]; 1001 u64 orig_ptr; 1002 1003 ASSERT(level > 0); 1004 1005 mid = path->nodes[level]; 1006 1007 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1008 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1009 1010 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1011 1012 if (level < BTRFS_MAX_LEVEL - 1) { 1013 parent = path->nodes[level + 1]; 1014 pslot = path->slots[level + 1]; 1015 } 1016 1017 /* 1018 * deal with the case where there is only one pointer in the root 1019 * by promoting the node below to a root 1020 */ 1021 if (!parent) { 1022 struct extent_buffer *child; 1023 1024 if (btrfs_header_nritems(mid) != 1) 1025 return 0; 1026 1027 /* promote the child to a root */ 1028 child = btrfs_read_node_slot(mid, 0); 1029 if (IS_ERR(child)) { 1030 ret = PTR_ERR(child); 1031 btrfs_handle_fs_error(fs_info, ret, NULL); 1032 goto enospc; 1033 } 1034 1035 btrfs_tree_lock(child); 1036 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1037 BTRFS_NESTING_COW); 1038 if (ret) { 1039 btrfs_tree_unlock(child); 1040 free_extent_buffer(child); 1041 goto enospc; 1042 } 1043 1044 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 1045 BUG_ON(ret < 0); 1046 rcu_assign_pointer(root->node, child); 1047 1048 add_root_to_dirty_list(root); 1049 btrfs_tree_unlock(child); 1050 1051 path->locks[level] = 0; 1052 path->nodes[level] = NULL; 1053 btrfs_clear_buffer_dirty(trans, mid); 1054 btrfs_tree_unlock(mid); 1055 /* once for the path */ 1056 free_extent_buffer(mid); 1057 1058 root_sub_used(root, mid->len); 1059 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1060 /* once for the root ptr */ 1061 free_extent_buffer_stale(mid); 1062 return 0; 1063 } 1064 if (btrfs_header_nritems(mid) > 1065 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1066 return 0; 1067 1068 if (pslot) { 1069 left = btrfs_read_node_slot(parent, pslot - 1); 1070 if (IS_ERR(left)) { 1071 ret = PTR_ERR(left); 1072 left = NULL; 1073 goto enospc; 1074 } 1075 1076 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1077 wret = btrfs_cow_block(trans, root, left, 1078 parent, pslot - 1, &left, 1079 BTRFS_NESTING_LEFT_COW); 1080 if (wret) { 1081 ret = wret; 1082 goto enospc; 1083 } 1084 } 1085 1086 if (pslot + 1 < btrfs_header_nritems(parent)) { 1087 right = btrfs_read_node_slot(parent, pslot + 1); 1088 if (IS_ERR(right)) { 1089 ret = PTR_ERR(right); 1090 right = NULL; 1091 goto enospc; 1092 } 1093 1094 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1095 wret = btrfs_cow_block(trans, root, right, 1096 parent, pslot + 1, &right, 1097 BTRFS_NESTING_RIGHT_COW); 1098 if (wret) { 1099 ret = wret; 1100 goto enospc; 1101 } 1102 } 1103 1104 /* first, try to make some room in the middle buffer */ 1105 if (left) { 1106 orig_slot += btrfs_header_nritems(left); 1107 wret = push_node_left(trans, left, mid, 1); 1108 if (wret < 0) 1109 ret = wret; 1110 } 1111 1112 /* 1113 * then try to empty the right most buffer into the middle 1114 */ 1115 if (right) { 1116 wret = push_node_left(trans, mid, right, 1); 1117 if (wret < 0 && wret != -ENOSPC) 1118 ret = wret; 1119 if (btrfs_header_nritems(right) == 0) { 1120 btrfs_clear_buffer_dirty(trans, right); 1121 btrfs_tree_unlock(right); 1122 del_ptr(root, path, level + 1, pslot + 1); 1123 root_sub_used(root, right->len); 1124 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1125 0, 1); 1126 free_extent_buffer_stale(right); 1127 right = NULL; 1128 } else { 1129 struct btrfs_disk_key right_key; 1130 btrfs_node_key(right, &right_key, 0); 1131 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1132 BTRFS_MOD_LOG_KEY_REPLACE); 1133 BUG_ON(ret < 0); 1134 btrfs_set_node_key(parent, &right_key, pslot + 1); 1135 btrfs_mark_buffer_dirty(parent); 1136 } 1137 } 1138 if (btrfs_header_nritems(mid) == 1) { 1139 /* 1140 * we're not allowed to leave a node with one item in the 1141 * tree during a delete. A deletion from lower in the tree 1142 * could try to delete the only pointer in this node. 1143 * So, pull some keys from the left. 1144 * There has to be a left pointer at this point because 1145 * otherwise we would have pulled some pointers from the 1146 * right 1147 */ 1148 if (!left) { 1149 ret = -EROFS; 1150 btrfs_handle_fs_error(fs_info, ret, NULL); 1151 goto enospc; 1152 } 1153 wret = balance_node_right(trans, mid, left); 1154 if (wret < 0) { 1155 ret = wret; 1156 goto enospc; 1157 } 1158 if (wret == 1) { 1159 wret = push_node_left(trans, left, mid, 1); 1160 if (wret < 0) 1161 ret = wret; 1162 } 1163 BUG_ON(wret == 1); 1164 } 1165 if (btrfs_header_nritems(mid) == 0) { 1166 btrfs_clear_buffer_dirty(trans, mid); 1167 btrfs_tree_unlock(mid); 1168 del_ptr(root, path, level + 1, pslot); 1169 root_sub_used(root, mid->len); 1170 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1171 free_extent_buffer_stale(mid); 1172 mid = NULL; 1173 } else { 1174 /* update the parent key to reflect our changes */ 1175 struct btrfs_disk_key mid_key; 1176 btrfs_node_key(mid, &mid_key, 0); 1177 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1178 BTRFS_MOD_LOG_KEY_REPLACE); 1179 BUG_ON(ret < 0); 1180 btrfs_set_node_key(parent, &mid_key, pslot); 1181 btrfs_mark_buffer_dirty(parent); 1182 } 1183 1184 /* update the path */ 1185 if (left) { 1186 if (btrfs_header_nritems(left) > orig_slot) { 1187 atomic_inc(&left->refs); 1188 /* left was locked after cow */ 1189 path->nodes[level] = left; 1190 path->slots[level + 1] -= 1; 1191 path->slots[level] = orig_slot; 1192 if (mid) { 1193 btrfs_tree_unlock(mid); 1194 free_extent_buffer(mid); 1195 } 1196 } else { 1197 orig_slot -= btrfs_header_nritems(left); 1198 path->slots[level] = orig_slot; 1199 } 1200 } 1201 /* double check we haven't messed things up */ 1202 if (orig_ptr != 1203 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1204 BUG(); 1205 enospc: 1206 if (right) { 1207 btrfs_tree_unlock(right); 1208 free_extent_buffer(right); 1209 } 1210 if (left) { 1211 if (path->nodes[level] != left) 1212 btrfs_tree_unlock(left); 1213 free_extent_buffer(left); 1214 } 1215 return ret; 1216 } 1217 1218 /* Node balancing for insertion. Here we only split or push nodes around 1219 * when they are completely full. This is also done top down, so we 1220 * have to be pessimistic. 1221 */ 1222 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1223 struct btrfs_root *root, 1224 struct btrfs_path *path, int level) 1225 { 1226 struct btrfs_fs_info *fs_info = root->fs_info; 1227 struct extent_buffer *right = NULL; 1228 struct extent_buffer *mid; 1229 struct extent_buffer *left = NULL; 1230 struct extent_buffer *parent = NULL; 1231 int ret = 0; 1232 int wret; 1233 int pslot; 1234 int orig_slot = path->slots[level]; 1235 1236 if (level == 0) 1237 return 1; 1238 1239 mid = path->nodes[level]; 1240 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1241 1242 if (level < BTRFS_MAX_LEVEL - 1) { 1243 parent = path->nodes[level + 1]; 1244 pslot = path->slots[level + 1]; 1245 } 1246 1247 if (!parent) 1248 return 1; 1249 1250 /* first, try to make some room in the middle buffer */ 1251 if (pslot) { 1252 u32 left_nr; 1253 1254 left = btrfs_read_node_slot(parent, pslot - 1); 1255 if (IS_ERR(left)) 1256 return PTR_ERR(left); 1257 1258 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1259 1260 left_nr = btrfs_header_nritems(left); 1261 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1262 wret = 1; 1263 } else { 1264 ret = btrfs_cow_block(trans, root, left, parent, 1265 pslot - 1, &left, 1266 BTRFS_NESTING_LEFT_COW); 1267 if (ret) 1268 wret = 1; 1269 else { 1270 wret = push_node_left(trans, left, mid, 0); 1271 } 1272 } 1273 if (wret < 0) 1274 ret = wret; 1275 if (wret == 0) { 1276 struct btrfs_disk_key disk_key; 1277 orig_slot += left_nr; 1278 btrfs_node_key(mid, &disk_key, 0); 1279 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1280 BTRFS_MOD_LOG_KEY_REPLACE); 1281 BUG_ON(ret < 0); 1282 btrfs_set_node_key(parent, &disk_key, pslot); 1283 btrfs_mark_buffer_dirty(parent); 1284 if (btrfs_header_nritems(left) > orig_slot) { 1285 path->nodes[level] = left; 1286 path->slots[level + 1] -= 1; 1287 path->slots[level] = orig_slot; 1288 btrfs_tree_unlock(mid); 1289 free_extent_buffer(mid); 1290 } else { 1291 orig_slot -= 1292 btrfs_header_nritems(left); 1293 path->slots[level] = orig_slot; 1294 btrfs_tree_unlock(left); 1295 free_extent_buffer(left); 1296 } 1297 return 0; 1298 } 1299 btrfs_tree_unlock(left); 1300 free_extent_buffer(left); 1301 } 1302 1303 /* 1304 * then try to empty the right most buffer into the middle 1305 */ 1306 if (pslot + 1 < btrfs_header_nritems(parent)) { 1307 u32 right_nr; 1308 1309 right = btrfs_read_node_slot(parent, pslot + 1); 1310 if (IS_ERR(right)) 1311 return PTR_ERR(right); 1312 1313 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1314 1315 right_nr = btrfs_header_nritems(right); 1316 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1317 wret = 1; 1318 } else { 1319 ret = btrfs_cow_block(trans, root, right, 1320 parent, pslot + 1, 1321 &right, BTRFS_NESTING_RIGHT_COW); 1322 if (ret) 1323 wret = 1; 1324 else { 1325 wret = balance_node_right(trans, right, mid); 1326 } 1327 } 1328 if (wret < 0) 1329 ret = wret; 1330 if (wret == 0) { 1331 struct btrfs_disk_key disk_key; 1332 1333 btrfs_node_key(right, &disk_key, 0); 1334 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1335 BTRFS_MOD_LOG_KEY_REPLACE); 1336 BUG_ON(ret < 0); 1337 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1338 btrfs_mark_buffer_dirty(parent); 1339 1340 if (btrfs_header_nritems(mid) <= orig_slot) { 1341 path->nodes[level] = right; 1342 path->slots[level + 1] += 1; 1343 path->slots[level] = orig_slot - 1344 btrfs_header_nritems(mid); 1345 btrfs_tree_unlock(mid); 1346 free_extent_buffer(mid); 1347 } else { 1348 btrfs_tree_unlock(right); 1349 free_extent_buffer(right); 1350 } 1351 return 0; 1352 } 1353 btrfs_tree_unlock(right); 1354 free_extent_buffer(right); 1355 } 1356 return 1; 1357 } 1358 1359 /* 1360 * readahead one full node of leaves, finding things that are close 1361 * to the block in 'slot', and triggering ra on them. 1362 */ 1363 static void reada_for_search(struct btrfs_fs_info *fs_info, 1364 struct btrfs_path *path, 1365 int level, int slot, u64 objectid) 1366 { 1367 struct extent_buffer *node; 1368 struct btrfs_disk_key disk_key; 1369 u32 nritems; 1370 u64 search; 1371 u64 target; 1372 u64 nread = 0; 1373 u64 nread_max; 1374 u32 nr; 1375 u32 blocksize; 1376 u32 nscan = 0; 1377 1378 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1379 return; 1380 1381 if (!path->nodes[level]) 1382 return; 1383 1384 node = path->nodes[level]; 1385 1386 /* 1387 * Since the time between visiting leaves is much shorter than the time 1388 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1389 * much IO at once (possibly random). 1390 */ 1391 if (path->reada == READA_FORWARD_ALWAYS) { 1392 if (level > 1) 1393 nread_max = node->fs_info->nodesize; 1394 else 1395 nread_max = SZ_128K; 1396 } else { 1397 nread_max = SZ_64K; 1398 } 1399 1400 search = btrfs_node_blockptr(node, slot); 1401 blocksize = fs_info->nodesize; 1402 if (path->reada != READA_FORWARD_ALWAYS) { 1403 struct extent_buffer *eb; 1404 1405 eb = find_extent_buffer(fs_info, search); 1406 if (eb) { 1407 free_extent_buffer(eb); 1408 return; 1409 } 1410 } 1411 1412 target = search; 1413 1414 nritems = btrfs_header_nritems(node); 1415 nr = slot; 1416 1417 while (1) { 1418 if (path->reada == READA_BACK) { 1419 if (nr == 0) 1420 break; 1421 nr--; 1422 } else if (path->reada == READA_FORWARD || 1423 path->reada == READA_FORWARD_ALWAYS) { 1424 nr++; 1425 if (nr >= nritems) 1426 break; 1427 } 1428 if (path->reada == READA_BACK && objectid) { 1429 btrfs_node_key(node, &disk_key, nr); 1430 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1431 break; 1432 } 1433 search = btrfs_node_blockptr(node, nr); 1434 if (path->reada == READA_FORWARD_ALWAYS || 1435 (search <= target && target - search <= 65536) || 1436 (search > target && search - target <= 65536)) { 1437 btrfs_readahead_node_child(node, nr); 1438 nread += blocksize; 1439 } 1440 nscan++; 1441 if (nread > nread_max || nscan > 32) 1442 break; 1443 } 1444 } 1445 1446 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1447 { 1448 struct extent_buffer *parent; 1449 int slot; 1450 int nritems; 1451 1452 parent = path->nodes[level + 1]; 1453 if (!parent) 1454 return; 1455 1456 nritems = btrfs_header_nritems(parent); 1457 slot = path->slots[level + 1]; 1458 1459 if (slot > 0) 1460 btrfs_readahead_node_child(parent, slot - 1); 1461 if (slot + 1 < nritems) 1462 btrfs_readahead_node_child(parent, slot + 1); 1463 } 1464 1465 1466 /* 1467 * when we walk down the tree, it is usually safe to unlock the higher layers 1468 * in the tree. The exceptions are when our path goes through slot 0, because 1469 * operations on the tree might require changing key pointers higher up in the 1470 * tree. 1471 * 1472 * callers might also have set path->keep_locks, which tells this code to keep 1473 * the lock if the path points to the last slot in the block. This is part of 1474 * walking through the tree, and selecting the next slot in the higher block. 1475 * 1476 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1477 * if lowest_unlock is 1, level 0 won't be unlocked 1478 */ 1479 static noinline void unlock_up(struct btrfs_path *path, int level, 1480 int lowest_unlock, int min_write_lock_level, 1481 int *write_lock_level) 1482 { 1483 int i; 1484 int skip_level = level; 1485 bool check_skip = true; 1486 1487 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1488 if (!path->nodes[i]) 1489 break; 1490 if (!path->locks[i]) 1491 break; 1492 1493 if (check_skip) { 1494 if (path->slots[i] == 0) { 1495 skip_level = i + 1; 1496 continue; 1497 } 1498 1499 if (path->keep_locks) { 1500 u32 nritems; 1501 1502 nritems = btrfs_header_nritems(path->nodes[i]); 1503 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1504 skip_level = i + 1; 1505 continue; 1506 } 1507 } 1508 } 1509 1510 if (i >= lowest_unlock && i > skip_level) { 1511 check_skip = false; 1512 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1513 path->locks[i] = 0; 1514 if (write_lock_level && 1515 i > min_write_lock_level && 1516 i <= *write_lock_level) { 1517 *write_lock_level = i - 1; 1518 } 1519 } 1520 } 1521 } 1522 1523 /* 1524 * Helper function for btrfs_search_slot() and other functions that do a search 1525 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1526 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1527 * its pages from disk. 1528 * 1529 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1530 * whole btree search, starting again from the current root node. 1531 */ 1532 static int 1533 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1534 struct extent_buffer **eb_ret, int level, int slot, 1535 const struct btrfs_key *key) 1536 { 1537 struct btrfs_fs_info *fs_info = root->fs_info; 1538 struct btrfs_tree_parent_check check = { 0 }; 1539 u64 blocknr; 1540 u64 gen; 1541 struct extent_buffer *tmp; 1542 int ret; 1543 int parent_level; 1544 bool unlock_up; 1545 1546 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1547 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1548 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1549 parent_level = btrfs_header_level(*eb_ret); 1550 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1551 check.has_first_key = true; 1552 check.level = parent_level - 1; 1553 check.transid = gen; 1554 check.owner_root = root->root_key.objectid; 1555 1556 /* 1557 * If we need to read an extent buffer from disk and we are holding locks 1558 * on upper level nodes, we unlock all the upper nodes before reading the 1559 * extent buffer, and then return -EAGAIN to the caller as it needs to 1560 * restart the search. We don't release the lock on the current level 1561 * because we need to walk this node to figure out which blocks to read. 1562 */ 1563 tmp = find_extent_buffer(fs_info, blocknr); 1564 if (tmp) { 1565 if (p->reada == READA_FORWARD_ALWAYS) 1566 reada_for_search(fs_info, p, level, slot, key->objectid); 1567 1568 /* first we do an atomic uptodate check */ 1569 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1570 /* 1571 * Do extra check for first_key, eb can be stale due to 1572 * being cached, read from scrub, or have multiple 1573 * parents (shared tree blocks). 1574 */ 1575 if (btrfs_verify_level_key(tmp, 1576 parent_level - 1, &check.first_key, gen)) { 1577 free_extent_buffer(tmp); 1578 return -EUCLEAN; 1579 } 1580 *eb_ret = tmp; 1581 return 0; 1582 } 1583 1584 if (p->nowait) { 1585 free_extent_buffer(tmp); 1586 return -EAGAIN; 1587 } 1588 1589 if (unlock_up) 1590 btrfs_unlock_up_safe(p, level + 1); 1591 1592 /* now we're allowed to do a blocking uptodate check */ 1593 ret = btrfs_read_extent_buffer(tmp, &check); 1594 if (ret) { 1595 free_extent_buffer(tmp); 1596 btrfs_release_path(p); 1597 return -EIO; 1598 } 1599 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1600 free_extent_buffer(tmp); 1601 btrfs_release_path(p); 1602 return -EUCLEAN; 1603 } 1604 1605 if (unlock_up) 1606 ret = -EAGAIN; 1607 1608 goto out; 1609 } else if (p->nowait) { 1610 return -EAGAIN; 1611 } 1612 1613 if (unlock_up) { 1614 btrfs_unlock_up_safe(p, level + 1); 1615 ret = -EAGAIN; 1616 } else { 1617 ret = 0; 1618 } 1619 1620 if (p->reada != READA_NONE) 1621 reada_for_search(fs_info, p, level, slot, key->objectid); 1622 1623 tmp = read_tree_block(fs_info, blocknr, &check); 1624 if (IS_ERR(tmp)) { 1625 btrfs_release_path(p); 1626 return PTR_ERR(tmp); 1627 } 1628 /* 1629 * If the read above didn't mark this buffer up to date, 1630 * it will never end up being up to date. Set ret to EIO now 1631 * and give up so that our caller doesn't loop forever 1632 * on our EAGAINs. 1633 */ 1634 if (!extent_buffer_uptodate(tmp)) 1635 ret = -EIO; 1636 1637 out: 1638 if (ret == 0) { 1639 *eb_ret = tmp; 1640 } else { 1641 free_extent_buffer(tmp); 1642 btrfs_release_path(p); 1643 } 1644 1645 return ret; 1646 } 1647 1648 /* 1649 * helper function for btrfs_search_slot. This does all of the checks 1650 * for node-level blocks and does any balancing required based on 1651 * the ins_len. 1652 * 1653 * If no extra work was required, zero is returned. If we had to 1654 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1655 * start over 1656 */ 1657 static int 1658 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1659 struct btrfs_root *root, struct btrfs_path *p, 1660 struct extent_buffer *b, int level, int ins_len, 1661 int *write_lock_level) 1662 { 1663 struct btrfs_fs_info *fs_info = root->fs_info; 1664 int ret = 0; 1665 1666 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1667 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1668 1669 if (*write_lock_level < level + 1) { 1670 *write_lock_level = level + 1; 1671 btrfs_release_path(p); 1672 return -EAGAIN; 1673 } 1674 1675 reada_for_balance(p, level); 1676 ret = split_node(trans, root, p, level); 1677 1678 b = p->nodes[level]; 1679 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1680 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1681 1682 if (*write_lock_level < level + 1) { 1683 *write_lock_level = level + 1; 1684 btrfs_release_path(p); 1685 return -EAGAIN; 1686 } 1687 1688 reada_for_balance(p, level); 1689 ret = balance_level(trans, root, p, level); 1690 if (ret) 1691 return ret; 1692 1693 b = p->nodes[level]; 1694 if (!b) { 1695 btrfs_release_path(p); 1696 return -EAGAIN; 1697 } 1698 BUG_ON(btrfs_header_nritems(b) == 1); 1699 } 1700 return ret; 1701 } 1702 1703 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1704 u64 iobjectid, u64 ioff, u8 key_type, 1705 struct btrfs_key *found_key) 1706 { 1707 int ret; 1708 struct btrfs_key key; 1709 struct extent_buffer *eb; 1710 1711 ASSERT(path); 1712 ASSERT(found_key); 1713 1714 key.type = key_type; 1715 key.objectid = iobjectid; 1716 key.offset = ioff; 1717 1718 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1719 if (ret < 0) 1720 return ret; 1721 1722 eb = path->nodes[0]; 1723 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1724 ret = btrfs_next_leaf(fs_root, path); 1725 if (ret) 1726 return ret; 1727 eb = path->nodes[0]; 1728 } 1729 1730 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1731 if (found_key->type != key.type || 1732 found_key->objectid != key.objectid) 1733 return 1; 1734 1735 return 0; 1736 } 1737 1738 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1739 struct btrfs_path *p, 1740 int write_lock_level) 1741 { 1742 struct extent_buffer *b; 1743 int root_lock = 0; 1744 int level = 0; 1745 1746 if (p->search_commit_root) { 1747 b = root->commit_root; 1748 atomic_inc(&b->refs); 1749 level = btrfs_header_level(b); 1750 /* 1751 * Ensure that all callers have set skip_locking when 1752 * p->search_commit_root = 1. 1753 */ 1754 ASSERT(p->skip_locking == 1); 1755 1756 goto out; 1757 } 1758 1759 if (p->skip_locking) { 1760 b = btrfs_root_node(root); 1761 level = btrfs_header_level(b); 1762 goto out; 1763 } 1764 1765 /* We try very hard to do read locks on the root */ 1766 root_lock = BTRFS_READ_LOCK; 1767 1768 /* 1769 * If the level is set to maximum, we can skip trying to get the read 1770 * lock. 1771 */ 1772 if (write_lock_level < BTRFS_MAX_LEVEL) { 1773 /* 1774 * We don't know the level of the root node until we actually 1775 * have it read locked 1776 */ 1777 if (p->nowait) { 1778 b = btrfs_try_read_lock_root_node(root); 1779 if (IS_ERR(b)) 1780 return b; 1781 } else { 1782 b = btrfs_read_lock_root_node(root); 1783 } 1784 level = btrfs_header_level(b); 1785 if (level > write_lock_level) 1786 goto out; 1787 1788 /* Whoops, must trade for write lock */ 1789 btrfs_tree_read_unlock(b); 1790 free_extent_buffer(b); 1791 } 1792 1793 b = btrfs_lock_root_node(root); 1794 root_lock = BTRFS_WRITE_LOCK; 1795 1796 /* The level might have changed, check again */ 1797 level = btrfs_header_level(b); 1798 1799 out: 1800 /* 1801 * The root may have failed to write out at some point, and thus is no 1802 * longer valid, return an error in this case. 1803 */ 1804 if (!extent_buffer_uptodate(b)) { 1805 if (root_lock) 1806 btrfs_tree_unlock_rw(b, root_lock); 1807 free_extent_buffer(b); 1808 return ERR_PTR(-EIO); 1809 } 1810 1811 p->nodes[level] = b; 1812 if (!p->skip_locking) 1813 p->locks[level] = root_lock; 1814 /* 1815 * Callers are responsible for dropping b's references. 1816 */ 1817 return b; 1818 } 1819 1820 /* 1821 * Replace the extent buffer at the lowest level of the path with a cloned 1822 * version. The purpose is to be able to use it safely, after releasing the 1823 * commit root semaphore, even if relocation is happening in parallel, the 1824 * transaction used for relocation is committed and the extent buffer is 1825 * reallocated in the next transaction. 1826 * 1827 * This is used in a context where the caller does not prevent transaction 1828 * commits from happening, either by holding a transaction handle or holding 1829 * some lock, while it's doing searches through a commit root. 1830 * At the moment it's only used for send operations. 1831 */ 1832 static int finish_need_commit_sem_search(struct btrfs_path *path) 1833 { 1834 const int i = path->lowest_level; 1835 const int slot = path->slots[i]; 1836 struct extent_buffer *lowest = path->nodes[i]; 1837 struct extent_buffer *clone; 1838 1839 ASSERT(path->need_commit_sem); 1840 1841 if (!lowest) 1842 return 0; 1843 1844 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1845 1846 clone = btrfs_clone_extent_buffer(lowest); 1847 if (!clone) 1848 return -ENOMEM; 1849 1850 btrfs_release_path(path); 1851 path->nodes[i] = clone; 1852 path->slots[i] = slot; 1853 1854 return 0; 1855 } 1856 1857 static inline int search_for_key_slot(struct extent_buffer *eb, 1858 int search_low_slot, 1859 const struct btrfs_key *key, 1860 int prev_cmp, 1861 int *slot) 1862 { 1863 /* 1864 * If a previous call to btrfs_bin_search() on a parent node returned an 1865 * exact match (prev_cmp == 0), we can safely assume the target key will 1866 * always be at slot 0 on lower levels, since each key pointer 1867 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1868 * subtree it points to. Thus we can skip searching lower levels. 1869 */ 1870 if (prev_cmp == 0) { 1871 *slot = 0; 1872 return 0; 1873 } 1874 1875 return btrfs_bin_search(eb, search_low_slot, key, slot); 1876 } 1877 1878 static int search_leaf(struct btrfs_trans_handle *trans, 1879 struct btrfs_root *root, 1880 const struct btrfs_key *key, 1881 struct btrfs_path *path, 1882 int ins_len, 1883 int prev_cmp) 1884 { 1885 struct extent_buffer *leaf = path->nodes[0]; 1886 int leaf_free_space = -1; 1887 int search_low_slot = 0; 1888 int ret; 1889 bool do_bin_search = true; 1890 1891 /* 1892 * If we are doing an insertion, the leaf has enough free space and the 1893 * destination slot for the key is not slot 0, then we can unlock our 1894 * write lock on the parent, and any other upper nodes, before doing the 1895 * binary search on the leaf (with search_for_key_slot()), allowing other 1896 * tasks to lock the parent and any other upper nodes. 1897 */ 1898 if (ins_len > 0) { 1899 /* 1900 * Cache the leaf free space, since we will need it later and it 1901 * will not change until then. 1902 */ 1903 leaf_free_space = btrfs_leaf_free_space(leaf); 1904 1905 /* 1906 * !path->locks[1] means we have a single node tree, the leaf is 1907 * the root of the tree. 1908 */ 1909 if (path->locks[1] && leaf_free_space >= ins_len) { 1910 struct btrfs_disk_key first_key; 1911 1912 ASSERT(btrfs_header_nritems(leaf) > 0); 1913 btrfs_item_key(leaf, &first_key, 0); 1914 1915 /* 1916 * Doing the extra comparison with the first key is cheap, 1917 * taking into account that the first key is very likely 1918 * already in a cache line because it immediately follows 1919 * the extent buffer's header and we have recently accessed 1920 * the header's level field. 1921 */ 1922 ret = comp_keys(&first_key, key); 1923 if (ret < 0) { 1924 /* 1925 * The first key is smaller than the key we want 1926 * to insert, so we are safe to unlock all upper 1927 * nodes and we have to do the binary search. 1928 * 1929 * We do use btrfs_unlock_up_safe() and not 1930 * unlock_up() because the later does not unlock 1931 * nodes with a slot of 0 - we can safely unlock 1932 * any node even if its slot is 0 since in this 1933 * case the key does not end up at slot 0 of the 1934 * leaf and there's no need to split the leaf. 1935 */ 1936 btrfs_unlock_up_safe(path, 1); 1937 search_low_slot = 1; 1938 } else { 1939 /* 1940 * The first key is >= then the key we want to 1941 * insert, so we can skip the binary search as 1942 * the target key will be at slot 0. 1943 * 1944 * We can not unlock upper nodes when the key is 1945 * less than the first key, because we will need 1946 * to update the key at slot 0 of the parent node 1947 * and possibly of other upper nodes too. 1948 * If the key matches the first key, then we can 1949 * unlock all the upper nodes, using 1950 * btrfs_unlock_up_safe() instead of unlock_up() 1951 * as stated above. 1952 */ 1953 if (ret == 0) 1954 btrfs_unlock_up_safe(path, 1); 1955 /* 1956 * ret is already 0 or 1, matching the result of 1957 * a btrfs_bin_search() call, so there is no need 1958 * to adjust it. 1959 */ 1960 do_bin_search = false; 1961 path->slots[0] = 0; 1962 } 1963 } 1964 } 1965 1966 if (do_bin_search) { 1967 ret = search_for_key_slot(leaf, search_low_slot, key, 1968 prev_cmp, &path->slots[0]); 1969 if (ret < 0) 1970 return ret; 1971 } 1972 1973 if (ins_len > 0) { 1974 /* 1975 * Item key already exists. In this case, if we are allowed to 1976 * insert the item (for example, in dir_item case, item key 1977 * collision is allowed), it will be merged with the original 1978 * item. Only the item size grows, no new btrfs item will be 1979 * added. If search_for_extension is not set, ins_len already 1980 * accounts the size btrfs_item, deduct it here so leaf space 1981 * check will be correct. 1982 */ 1983 if (ret == 0 && !path->search_for_extension) { 1984 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1985 ins_len -= sizeof(struct btrfs_item); 1986 } 1987 1988 ASSERT(leaf_free_space >= 0); 1989 1990 if (leaf_free_space < ins_len) { 1991 int err; 1992 1993 err = split_leaf(trans, root, key, path, ins_len, 1994 (ret == 0)); 1995 ASSERT(err <= 0); 1996 if (WARN_ON(err > 0)) 1997 err = -EUCLEAN; 1998 if (err) 1999 ret = err; 2000 } 2001 } 2002 2003 return ret; 2004 } 2005 2006 /* 2007 * btrfs_search_slot - look for a key in a tree and perform necessary 2008 * modifications to preserve tree invariants. 2009 * 2010 * @trans: Handle of transaction, used when modifying the tree 2011 * @p: Holds all btree nodes along the search path 2012 * @root: The root node of the tree 2013 * @key: The key we are looking for 2014 * @ins_len: Indicates purpose of search: 2015 * >0 for inserts it's size of item inserted (*) 2016 * <0 for deletions 2017 * 0 for plain searches, not modifying the tree 2018 * 2019 * (*) If size of item inserted doesn't include 2020 * sizeof(struct btrfs_item), then p->search_for_extension must 2021 * be set. 2022 * @cow: boolean should CoW operations be performed. Must always be 1 2023 * when modifying the tree. 2024 * 2025 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2026 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2027 * 2028 * If @key is found, 0 is returned and you can find the item in the leaf level 2029 * of the path (level 0) 2030 * 2031 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2032 * points to the slot where it should be inserted 2033 * 2034 * If an error is encountered while searching the tree a negative error number 2035 * is returned 2036 */ 2037 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2038 const struct btrfs_key *key, struct btrfs_path *p, 2039 int ins_len, int cow) 2040 { 2041 struct btrfs_fs_info *fs_info = root->fs_info; 2042 struct extent_buffer *b; 2043 int slot; 2044 int ret; 2045 int err; 2046 int level; 2047 int lowest_unlock = 1; 2048 /* everything at write_lock_level or lower must be write locked */ 2049 int write_lock_level = 0; 2050 u8 lowest_level = 0; 2051 int min_write_lock_level; 2052 int prev_cmp; 2053 2054 might_sleep(); 2055 2056 lowest_level = p->lowest_level; 2057 WARN_ON(lowest_level && ins_len > 0); 2058 WARN_ON(p->nodes[0] != NULL); 2059 BUG_ON(!cow && ins_len); 2060 2061 /* 2062 * For now only allow nowait for read only operations. There's no 2063 * strict reason why we can't, we just only need it for reads so it's 2064 * only implemented for reads. 2065 */ 2066 ASSERT(!p->nowait || !cow); 2067 2068 if (ins_len < 0) { 2069 lowest_unlock = 2; 2070 2071 /* when we are removing items, we might have to go up to level 2072 * two as we update tree pointers Make sure we keep write 2073 * for those levels as well 2074 */ 2075 write_lock_level = 2; 2076 } else if (ins_len > 0) { 2077 /* 2078 * for inserting items, make sure we have a write lock on 2079 * level 1 so we can update keys 2080 */ 2081 write_lock_level = 1; 2082 } 2083 2084 if (!cow) 2085 write_lock_level = -1; 2086 2087 if (cow && (p->keep_locks || p->lowest_level)) 2088 write_lock_level = BTRFS_MAX_LEVEL; 2089 2090 min_write_lock_level = write_lock_level; 2091 2092 if (p->need_commit_sem) { 2093 ASSERT(p->search_commit_root); 2094 if (p->nowait) { 2095 if (!down_read_trylock(&fs_info->commit_root_sem)) 2096 return -EAGAIN; 2097 } else { 2098 down_read(&fs_info->commit_root_sem); 2099 } 2100 } 2101 2102 again: 2103 prev_cmp = -1; 2104 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2105 if (IS_ERR(b)) { 2106 ret = PTR_ERR(b); 2107 goto done; 2108 } 2109 2110 while (b) { 2111 int dec = 0; 2112 2113 level = btrfs_header_level(b); 2114 2115 if (cow) { 2116 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2117 2118 /* 2119 * if we don't really need to cow this block 2120 * then we don't want to set the path blocking, 2121 * so we test it here 2122 */ 2123 if (!should_cow_block(trans, root, b)) 2124 goto cow_done; 2125 2126 /* 2127 * must have write locks on this node and the 2128 * parent 2129 */ 2130 if (level > write_lock_level || 2131 (level + 1 > write_lock_level && 2132 level + 1 < BTRFS_MAX_LEVEL && 2133 p->nodes[level + 1])) { 2134 write_lock_level = level + 1; 2135 btrfs_release_path(p); 2136 goto again; 2137 } 2138 2139 if (last_level) 2140 err = btrfs_cow_block(trans, root, b, NULL, 0, 2141 &b, 2142 BTRFS_NESTING_COW); 2143 else 2144 err = btrfs_cow_block(trans, root, b, 2145 p->nodes[level + 1], 2146 p->slots[level + 1], &b, 2147 BTRFS_NESTING_COW); 2148 if (err) { 2149 ret = err; 2150 goto done; 2151 } 2152 } 2153 cow_done: 2154 p->nodes[level] = b; 2155 2156 /* 2157 * we have a lock on b and as long as we aren't changing 2158 * the tree, there is no way to for the items in b to change. 2159 * It is safe to drop the lock on our parent before we 2160 * go through the expensive btree search on b. 2161 * 2162 * If we're inserting or deleting (ins_len != 0), then we might 2163 * be changing slot zero, which may require changing the parent. 2164 * So, we can't drop the lock until after we know which slot 2165 * we're operating on. 2166 */ 2167 if (!ins_len && !p->keep_locks) { 2168 int u = level + 1; 2169 2170 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2171 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2172 p->locks[u] = 0; 2173 } 2174 } 2175 2176 if (level == 0) { 2177 if (ins_len > 0) 2178 ASSERT(write_lock_level >= 1); 2179 2180 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2181 if (!p->search_for_split) 2182 unlock_up(p, level, lowest_unlock, 2183 min_write_lock_level, NULL); 2184 goto done; 2185 } 2186 2187 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2188 if (ret < 0) 2189 goto done; 2190 prev_cmp = ret; 2191 2192 if (ret && slot > 0) { 2193 dec = 1; 2194 slot--; 2195 } 2196 p->slots[level] = slot; 2197 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2198 &write_lock_level); 2199 if (err == -EAGAIN) 2200 goto again; 2201 if (err) { 2202 ret = err; 2203 goto done; 2204 } 2205 b = p->nodes[level]; 2206 slot = p->slots[level]; 2207 2208 /* 2209 * Slot 0 is special, if we change the key we have to update 2210 * the parent pointer which means we must have a write lock on 2211 * the parent 2212 */ 2213 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2214 write_lock_level = level + 1; 2215 btrfs_release_path(p); 2216 goto again; 2217 } 2218 2219 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2220 &write_lock_level); 2221 2222 if (level == lowest_level) { 2223 if (dec) 2224 p->slots[level]++; 2225 goto done; 2226 } 2227 2228 err = read_block_for_search(root, p, &b, level, slot, key); 2229 if (err == -EAGAIN) 2230 goto again; 2231 if (err) { 2232 ret = err; 2233 goto done; 2234 } 2235 2236 if (!p->skip_locking) { 2237 level = btrfs_header_level(b); 2238 2239 btrfs_maybe_reset_lockdep_class(root, b); 2240 2241 if (level <= write_lock_level) { 2242 btrfs_tree_lock(b); 2243 p->locks[level] = BTRFS_WRITE_LOCK; 2244 } else { 2245 if (p->nowait) { 2246 if (!btrfs_try_tree_read_lock(b)) { 2247 free_extent_buffer(b); 2248 ret = -EAGAIN; 2249 goto done; 2250 } 2251 } else { 2252 btrfs_tree_read_lock(b); 2253 } 2254 p->locks[level] = BTRFS_READ_LOCK; 2255 } 2256 p->nodes[level] = b; 2257 } 2258 } 2259 ret = 1; 2260 done: 2261 if (ret < 0 && !p->skip_release_on_error) 2262 btrfs_release_path(p); 2263 2264 if (p->need_commit_sem) { 2265 int ret2; 2266 2267 ret2 = finish_need_commit_sem_search(p); 2268 up_read(&fs_info->commit_root_sem); 2269 if (ret2) 2270 ret = ret2; 2271 } 2272 2273 return ret; 2274 } 2275 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2276 2277 /* 2278 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2279 * current state of the tree together with the operations recorded in the tree 2280 * modification log to search for the key in a previous version of this tree, as 2281 * denoted by the time_seq parameter. 2282 * 2283 * Naturally, there is no support for insert, delete or cow operations. 2284 * 2285 * The resulting path and return value will be set up as if we called 2286 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2287 */ 2288 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2289 struct btrfs_path *p, u64 time_seq) 2290 { 2291 struct btrfs_fs_info *fs_info = root->fs_info; 2292 struct extent_buffer *b; 2293 int slot; 2294 int ret; 2295 int err; 2296 int level; 2297 int lowest_unlock = 1; 2298 u8 lowest_level = 0; 2299 2300 lowest_level = p->lowest_level; 2301 WARN_ON(p->nodes[0] != NULL); 2302 ASSERT(!p->nowait); 2303 2304 if (p->search_commit_root) { 2305 BUG_ON(time_seq); 2306 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2307 } 2308 2309 again: 2310 b = btrfs_get_old_root(root, time_seq); 2311 if (!b) { 2312 ret = -EIO; 2313 goto done; 2314 } 2315 level = btrfs_header_level(b); 2316 p->locks[level] = BTRFS_READ_LOCK; 2317 2318 while (b) { 2319 int dec = 0; 2320 2321 level = btrfs_header_level(b); 2322 p->nodes[level] = b; 2323 2324 /* 2325 * we have a lock on b and as long as we aren't changing 2326 * the tree, there is no way to for the items in b to change. 2327 * It is safe to drop the lock on our parent before we 2328 * go through the expensive btree search on b. 2329 */ 2330 btrfs_unlock_up_safe(p, level + 1); 2331 2332 ret = btrfs_bin_search(b, 0, key, &slot); 2333 if (ret < 0) 2334 goto done; 2335 2336 if (level == 0) { 2337 p->slots[level] = slot; 2338 unlock_up(p, level, lowest_unlock, 0, NULL); 2339 goto done; 2340 } 2341 2342 if (ret && slot > 0) { 2343 dec = 1; 2344 slot--; 2345 } 2346 p->slots[level] = slot; 2347 unlock_up(p, level, lowest_unlock, 0, NULL); 2348 2349 if (level == lowest_level) { 2350 if (dec) 2351 p->slots[level]++; 2352 goto done; 2353 } 2354 2355 err = read_block_for_search(root, p, &b, level, slot, key); 2356 if (err == -EAGAIN) 2357 goto again; 2358 if (err) { 2359 ret = err; 2360 goto done; 2361 } 2362 2363 level = btrfs_header_level(b); 2364 btrfs_tree_read_lock(b); 2365 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2366 if (!b) { 2367 ret = -ENOMEM; 2368 goto done; 2369 } 2370 p->locks[level] = BTRFS_READ_LOCK; 2371 p->nodes[level] = b; 2372 } 2373 ret = 1; 2374 done: 2375 if (ret < 0) 2376 btrfs_release_path(p); 2377 2378 return ret; 2379 } 2380 2381 /* 2382 * Search the tree again to find a leaf with smaller keys. 2383 * Returns 0 if it found something. 2384 * Returns 1 if there are no smaller keys. 2385 * Returns < 0 on error. 2386 * 2387 * This may release the path, and so you may lose any locks held at the 2388 * time you call it. 2389 */ 2390 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2391 { 2392 struct btrfs_key key; 2393 struct btrfs_key orig_key; 2394 struct btrfs_disk_key found_key; 2395 int ret; 2396 2397 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2398 orig_key = key; 2399 2400 if (key.offset > 0) { 2401 key.offset--; 2402 } else if (key.type > 0) { 2403 key.type--; 2404 key.offset = (u64)-1; 2405 } else if (key.objectid > 0) { 2406 key.objectid--; 2407 key.type = (u8)-1; 2408 key.offset = (u64)-1; 2409 } else { 2410 return 1; 2411 } 2412 2413 btrfs_release_path(path); 2414 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2415 if (ret <= 0) 2416 return ret; 2417 2418 /* 2419 * Previous key not found. Even if we were at slot 0 of the leaf we had 2420 * before releasing the path and calling btrfs_search_slot(), we now may 2421 * be in a slot pointing to the same original key - this can happen if 2422 * after we released the path, one of more items were moved from a 2423 * sibling leaf into the front of the leaf we had due to an insertion 2424 * (see push_leaf_right()). 2425 * If we hit this case and our slot is > 0 and just decrement the slot 2426 * so that the caller does not process the same key again, which may or 2427 * may not break the caller, depending on its logic. 2428 */ 2429 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2430 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2431 ret = comp_keys(&found_key, &orig_key); 2432 if (ret == 0) { 2433 if (path->slots[0] > 0) { 2434 path->slots[0]--; 2435 return 0; 2436 } 2437 /* 2438 * At slot 0, same key as before, it means orig_key is 2439 * the lowest, leftmost, key in the tree. We're done. 2440 */ 2441 return 1; 2442 } 2443 } 2444 2445 btrfs_item_key(path->nodes[0], &found_key, 0); 2446 ret = comp_keys(&found_key, &key); 2447 /* 2448 * We might have had an item with the previous key in the tree right 2449 * before we released our path. And after we released our path, that 2450 * item might have been pushed to the first slot (0) of the leaf we 2451 * were holding due to a tree balance. Alternatively, an item with the 2452 * previous key can exist as the only element of a leaf (big fat item). 2453 * Therefore account for these 2 cases, so that our callers (like 2454 * btrfs_previous_item) don't miss an existing item with a key matching 2455 * the previous key we computed above. 2456 */ 2457 if (ret <= 0) 2458 return 0; 2459 return 1; 2460 } 2461 2462 /* 2463 * helper to use instead of search slot if no exact match is needed but 2464 * instead the next or previous item should be returned. 2465 * When find_higher is true, the next higher item is returned, the next lower 2466 * otherwise. 2467 * When return_any and find_higher are both true, and no higher item is found, 2468 * return the next lower instead. 2469 * When return_any is true and find_higher is false, and no lower item is found, 2470 * return the next higher instead. 2471 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2472 * < 0 on error 2473 */ 2474 int btrfs_search_slot_for_read(struct btrfs_root *root, 2475 const struct btrfs_key *key, 2476 struct btrfs_path *p, int find_higher, 2477 int return_any) 2478 { 2479 int ret; 2480 struct extent_buffer *leaf; 2481 2482 again: 2483 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2484 if (ret <= 0) 2485 return ret; 2486 /* 2487 * a return value of 1 means the path is at the position where the 2488 * item should be inserted. Normally this is the next bigger item, 2489 * but in case the previous item is the last in a leaf, path points 2490 * to the first free slot in the previous leaf, i.e. at an invalid 2491 * item. 2492 */ 2493 leaf = p->nodes[0]; 2494 2495 if (find_higher) { 2496 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2497 ret = btrfs_next_leaf(root, p); 2498 if (ret <= 0) 2499 return ret; 2500 if (!return_any) 2501 return 1; 2502 /* 2503 * no higher item found, return the next 2504 * lower instead 2505 */ 2506 return_any = 0; 2507 find_higher = 0; 2508 btrfs_release_path(p); 2509 goto again; 2510 } 2511 } else { 2512 if (p->slots[0] == 0) { 2513 ret = btrfs_prev_leaf(root, p); 2514 if (ret < 0) 2515 return ret; 2516 if (!ret) { 2517 leaf = p->nodes[0]; 2518 if (p->slots[0] == btrfs_header_nritems(leaf)) 2519 p->slots[0]--; 2520 return 0; 2521 } 2522 if (!return_any) 2523 return 1; 2524 /* 2525 * no lower item found, return the next 2526 * higher instead 2527 */ 2528 return_any = 0; 2529 find_higher = 1; 2530 btrfs_release_path(p); 2531 goto again; 2532 } else { 2533 --p->slots[0]; 2534 } 2535 } 2536 return 0; 2537 } 2538 2539 /* 2540 * Execute search and call btrfs_previous_item to traverse backwards if the item 2541 * was not found. 2542 * 2543 * Return 0 if found, 1 if not found and < 0 if error. 2544 */ 2545 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2546 struct btrfs_path *path) 2547 { 2548 int ret; 2549 2550 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2551 if (ret > 0) 2552 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2553 2554 if (ret == 0) 2555 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2556 2557 return ret; 2558 } 2559 2560 /* 2561 * Search for a valid slot for the given path. 2562 * 2563 * @root: The root node of the tree. 2564 * @key: Will contain a valid item if found. 2565 * @path: The starting point to validate the slot. 2566 * 2567 * Return: 0 if the item is valid 2568 * 1 if not found 2569 * <0 if error. 2570 */ 2571 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2572 struct btrfs_path *path) 2573 { 2574 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2575 int ret; 2576 2577 ret = btrfs_next_leaf(root, path); 2578 if (ret) 2579 return ret; 2580 } 2581 2582 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2583 return 0; 2584 } 2585 2586 /* 2587 * adjust the pointers going up the tree, starting at level 2588 * making sure the right key of each node is points to 'key'. 2589 * This is used after shifting pointers to the left, so it stops 2590 * fixing up pointers when a given leaf/node is not in slot 0 of the 2591 * higher levels 2592 * 2593 */ 2594 static void fixup_low_keys(struct btrfs_path *path, 2595 struct btrfs_disk_key *key, int level) 2596 { 2597 int i; 2598 struct extent_buffer *t; 2599 int ret; 2600 2601 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2602 int tslot = path->slots[i]; 2603 2604 if (!path->nodes[i]) 2605 break; 2606 t = path->nodes[i]; 2607 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2608 BTRFS_MOD_LOG_KEY_REPLACE); 2609 BUG_ON(ret < 0); 2610 btrfs_set_node_key(t, key, tslot); 2611 btrfs_mark_buffer_dirty(path->nodes[i]); 2612 if (tslot != 0) 2613 break; 2614 } 2615 } 2616 2617 /* 2618 * update item key. 2619 * 2620 * This function isn't completely safe. It's the caller's responsibility 2621 * that the new key won't break the order 2622 */ 2623 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2624 struct btrfs_path *path, 2625 const struct btrfs_key *new_key) 2626 { 2627 struct btrfs_disk_key disk_key; 2628 struct extent_buffer *eb; 2629 int slot; 2630 2631 eb = path->nodes[0]; 2632 slot = path->slots[0]; 2633 if (slot > 0) { 2634 btrfs_item_key(eb, &disk_key, slot - 1); 2635 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2636 btrfs_print_leaf(eb); 2637 btrfs_crit(fs_info, 2638 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2639 slot, btrfs_disk_key_objectid(&disk_key), 2640 btrfs_disk_key_type(&disk_key), 2641 btrfs_disk_key_offset(&disk_key), 2642 new_key->objectid, new_key->type, 2643 new_key->offset); 2644 BUG(); 2645 } 2646 } 2647 if (slot < btrfs_header_nritems(eb) - 1) { 2648 btrfs_item_key(eb, &disk_key, slot + 1); 2649 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2650 btrfs_print_leaf(eb); 2651 btrfs_crit(fs_info, 2652 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2653 slot, btrfs_disk_key_objectid(&disk_key), 2654 btrfs_disk_key_type(&disk_key), 2655 btrfs_disk_key_offset(&disk_key), 2656 new_key->objectid, new_key->type, 2657 new_key->offset); 2658 BUG(); 2659 } 2660 } 2661 2662 btrfs_cpu_key_to_disk(&disk_key, new_key); 2663 btrfs_set_item_key(eb, &disk_key, slot); 2664 btrfs_mark_buffer_dirty(eb); 2665 if (slot == 0) 2666 fixup_low_keys(path, &disk_key, 1); 2667 } 2668 2669 /* 2670 * Check key order of two sibling extent buffers. 2671 * 2672 * Return true if something is wrong. 2673 * Return false if everything is fine. 2674 * 2675 * Tree-checker only works inside one tree block, thus the following 2676 * corruption can not be detected by tree-checker: 2677 * 2678 * Leaf @left | Leaf @right 2679 * -------------------------------------------------------------- 2680 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2681 * 2682 * Key f6 in leaf @left itself is valid, but not valid when the next 2683 * key in leaf @right is 7. 2684 * This can only be checked at tree block merge time. 2685 * And since tree checker has ensured all key order in each tree block 2686 * is correct, we only need to bother the last key of @left and the first 2687 * key of @right. 2688 */ 2689 static bool check_sibling_keys(struct extent_buffer *left, 2690 struct extent_buffer *right) 2691 { 2692 struct btrfs_key left_last; 2693 struct btrfs_key right_first; 2694 int level = btrfs_header_level(left); 2695 int nr_left = btrfs_header_nritems(left); 2696 int nr_right = btrfs_header_nritems(right); 2697 2698 /* No key to check in one of the tree blocks */ 2699 if (!nr_left || !nr_right) 2700 return false; 2701 2702 if (level) { 2703 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2704 btrfs_node_key_to_cpu(right, &right_first, 0); 2705 } else { 2706 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2707 btrfs_item_key_to_cpu(right, &right_first, 0); 2708 } 2709 2710 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2711 btrfs_crit(left->fs_info, "left extent buffer:"); 2712 btrfs_print_tree(left, false); 2713 btrfs_crit(left->fs_info, "right extent buffer:"); 2714 btrfs_print_tree(right, false); 2715 btrfs_crit(left->fs_info, 2716 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2717 left_last.objectid, left_last.type, 2718 left_last.offset, right_first.objectid, 2719 right_first.type, right_first.offset); 2720 return true; 2721 } 2722 return false; 2723 } 2724 2725 /* 2726 * try to push data from one node into the next node left in the 2727 * tree. 2728 * 2729 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2730 * error, and > 0 if there was no room in the left hand block. 2731 */ 2732 static int push_node_left(struct btrfs_trans_handle *trans, 2733 struct extent_buffer *dst, 2734 struct extent_buffer *src, int empty) 2735 { 2736 struct btrfs_fs_info *fs_info = trans->fs_info; 2737 int push_items = 0; 2738 int src_nritems; 2739 int dst_nritems; 2740 int ret = 0; 2741 2742 src_nritems = btrfs_header_nritems(src); 2743 dst_nritems = btrfs_header_nritems(dst); 2744 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2745 WARN_ON(btrfs_header_generation(src) != trans->transid); 2746 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2747 2748 if (!empty && src_nritems <= 8) 2749 return 1; 2750 2751 if (push_items <= 0) 2752 return 1; 2753 2754 if (empty) { 2755 push_items = min(src_nritems, push_items); 2756 if (push_items < src_nritems) { 2757 /* leave at least 8 pointers in the node if 2758 * we aren't going to empty it 2759 */ 2760 if (src_nritems - push_items < 8) { 2761 if (push_items <= 8) 2762 return 1; 2763 push_items -= 8; 2764 } 2765 } 2766 } else 2767 push_items = min(src_nritems - 8, push_items); 2768 2769 /* dst is the left eb, src is the middle eb */ 2770 if (check_sibling_keys(dst, src)) { 2771 ret = -EUCLEAN; 2772 btrfs_abort_transaction(trans, ret); 2773 return ret; 2774 } 2775 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2776 if (ret) { 2777 btrfs_abort_transaction(trans, ret); 2778 return ret; 2779 } 2780 copy_extent_buffer(dst, src, 2781 btrfs_node_key_ptr_offset(dst, dst_nritems), 2782 btrfs_node_key_ptr_offset(src, 0), 2783 push_items * sizeof(struct btrfs_key_ptr)); 2784 2785 if (push_items < src_nritems) { 2786 /* 2787 * Don't call btrfs_tree_mod_log_insert_move() here, key removal 2788 * was already fully logged by btrfs_tree_mod_log_eb_copy() above. 2789 */ 2790 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2791 btrfs_node_key_ptr_offset(src, push_items), 2792 (src_nritems - push_items) * 2793 sizeof(struct btrfs_key_ptr)); 2794 } 2795 btrfs_set_header_nritems(src, src_nritems - push_items); 2796 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2797 btrfs_mark_buffer_dirty(src); 2798 btrfs_mark_buffer_dirty(dst); 2799 2800 return ret; 2801 } 2802 2803 /* 2804 * try to push data from one node into the next node right in the 2805 * tree. 2806 * 2807 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2808 * error, and > 0 if there was no room in the right hand block. 2809 * 2810 * this will only push up to 1/2 the contents of the left node over 2811 */ 2812 static int balance_node_right(struct btrfs_trans_handle *trans, 2813 struct extent_buffer *dst, 2814 struct extent_buffer *src) 2815 { 2816 struct btrfs_fs_info *fs_info = trans->fs_info; 2817 int push_items = 0; 2818 int max_push; 2819 int src_nritems; 2820 int dst_nritems; 2821 int ret = 0; 2822 2823 WARN_ON(btrfs_header_generation(src) != trans->transid); 2824 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2825 2826 src_nritems = btrfs_header_nritems(src); 2827 dst_nritems = btrfs_header_nritems(dst); 2828 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2829 if (push_items <= 0) 2830 return 1; 2831 2832 if (src_nritems < 4) 2833 return 1; 2834 2835 max_push = src_nritems / 2 + 1; 2836 /* don't try to empty the node */ 2837 if (max_push >= src_nritems) 2838 return 1; 2839 2840 if (max_push < push_items) 2841 push_items = max_push; 2842 2843 /* dst is the right eb, src is the middle eb */ 2844 if (check_sibling_keys(src, dst)) { 2845 ret = -EUCLEAN; 2846 btrfs_abort_transaction(trans, ret); 2847 return ret; 2848 } 2849 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems); 2850 BUG_ON(ret < 0); 2851 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2852 btrfs_node_key_ptr_offset(dst, 0), 2853 (dst_nritems) * 2854 sizeof(struct btrfs_key_ptr)); 2855 2856 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2857 push_items); 2858 if (ret) { 2859 btrfs_abort_transaction(trans, ret); 2860 return ret; 2861 } 2862 copy_extent_buffer(dst, src, 2863 btrfs_node_key_ptr_offset(dst, 0), 2864 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2865 push_items * sizeof(struct btrfs_key_ptr)); 2866 2867 btrfs_set_header_nritems(src, src_nritems - push_items); 2868 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2869 2870 btrfs_mark_buffer_dirty(src); 2871 btrfs_mark_buffer_dirty(dst); 2872 2873 return ret; 2874 } 2875 2876 /* 2877 * helper function to insert a new root level in the tree. 2878 * A new node is allocated, and a single item is inserted to 2879 * point to the existing root 2880 * 2881 * returns zero on success or < 0 on failure. 2882 */ 2883 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2884 struct btrfs_root *root, 2885 struct btrfs_path *path, int level) 2886 { 2887 struct btrfs_fs_info *fs_info = root->fs_info; 2888 u64 lower_gen; 2889 struct extent_buffer *lower; 2890 struct extent_buffer *c; 2891 struct extent_buffer *old; 2892 struct btrfs_disk_key lower_key; 2893 int ret; 2894 2895 BUG_ON(path->nodes[level]); 2896 BUG_ON(path->nodes[level-1] != root->node); 2897 2898 lower = path->nodes[level-1]; 2899 if (level == 1) 2900 btrfs_item_key(lower, &lower_key, 0); 2901 else 2902 btrfs_node_key(lower, &lower_key, 0); 2903 2904 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2905 &lower_key, level, root->node->start, 0, 2906 BTRFS_NESTING_NEW_ROOT); 2907 if (IS_ERR(c)) 2908 return PTR_ERR(c); 2909 2910 root_add_used(root, fs_info->nodesize); 2911 2912 btrfs_set_header_nritems(c, 1); 2913 btrfs_set_node_key(c, &lower_key, 0); 2914 btrfs_set_node_blockptr(c, 0, lower->start); 2915 lower_gen = btrfs_header_generation(lower); 2916 WARN_ON(lower_gen != trans->transid); 2917 2918 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2919 2920 btrfs_mark_buffer_dirty(c); 2921 2922 old = root->node; 2923 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2924 BUG_ON(ret < 0); 2925 rcu_assign_pointer(root->node, c); 2926 2927 /* the super has an extra ref to root->node */ 2928 free_extent_buffer(old); 2929 2930 add_root_to_dirty_list(root); 2931 atomic_inc(&c->refs); 2932 path->nodes[level] = c; 2933 path->locks[level] = BTRFS_WRITE_LOCK; 2934 path->slots[level] = 0; 2935 return 0; 2936 } 2937 2938 /* 2939 * worker function to insert a single pointer in a node. 2940 * the node should have enough room for the pointer already 2941 * 2942 * slot and level indicate where you want the key to go, and 2943 * blocknr is the block the key points to. 2944 */ 2945 static void insert_ptr(struct btrfs_trans_handle *trans, 2946 struct btrfs_path *path, 2947 struct btrfs_disk_key *key, u64 bytenr, 2948 int slot, int level) 2949 { 2950 struct extent_buffer *lower; 2951 int nritems; 2952 int ret; 2953 2954 BUG_ON(!path->nodes[level]); 2955 btrfs_assert_tree_write_locked(path->nodes[level]); 2956 lower = path->nodes[level]; 2957 nritems = btrfs_header_nritems(lower); 2958 BUG_ON(slot > nritems); 2959 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2960 if (slot != nritems) { 2961 if (level) { 2962 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2963 slot, nritems - slot); 2964 BUG_ON(ret < 0); 2965 } 2966 memmove_extent_buffer(lower, 2967 btrfs_node_key_ptr_offset(lower, slot + 1), 2968 btrfs_node_key_ptr_offset(lower, slot), 2969 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2970 } 2971 if (level) { 2972 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2973 BTRFS_MOD_LOG_KEY_ADD); 2974 BUG_ON(ret < 0); 2975 } 2976 btrfs_set_node_key(lower, key, slot); 2977 btrfs_set_node_blockptr(lower, slot, bytenr); 2978 WARN_ON(trans->transid == 0); 2979 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2980 btrfs_set_header_nritems(lower, nritems + 1); 2981 btrfs_mark_buffer_dirty(lower); 2982 } 2983 2984 /* 2985 * split the node at the specified level in path in two. 2986 * The path is corrected to point to the appropriate node after the split 2987 * 2988 * Before splitting this tries to make some room in the node by pushing 2989 * left and right, if either one works, it returns right away. 2990 * 2991 * returns 0 on success and < 0 on failure 2992 */ 2993 static noinline int split_node(struct btrfs_trans_handle *trans, 2994 struct btrfs_root *root, 2995 struct btrfs_path *path, int level) 2996 { 2997 struct btrfs_fs_info *fs_info = root->fs_info; 2998 struct extent_buffer *c; 2999 struct extent_buffer *split; 3000 struct btrfs_disk_key disk_key; 3001 int mid; 3002 int ret; 3003 u32 c_nritems; 3004 3005 c = path->nodes[level]; 3006 WARN_ON(btrfs_header_generation(c) != trans->transid); 3007 if (c == root->node) { 3008 /* 3009 * trying to split the root, lets make a new one 3010 * 3011 * tree mod log: We don't log_removal old root in 3012 * insert_new_root, because that root buffer will be kept as a 3013 * normal node. We are going to log removal of half of the 3014 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3015 * holding a tree lock on the buffer, which is why we cannot 3016 * race with other tree_mod_log users. 3017 */ 3018 ret = insert_new_root(trans, root, path, level + 1); 3019 if (ret) 3020 return ret; 3021 } else { 3022 ret = push_nodes_for_insert(trans, root, path, level); 3023 c = path->nodes[level]; 3024 if (!ret && btrfs_header_nritems(c) < 3025 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3026 return 0; 3027 if (ret < 0) 3028 return ret; 3029 } 3030 3031 c_nritems = btrfs_header_nritems(c); 3032 mid = (c_nritems + 1) / 2; 3033 btrfs_node_key(c, &disk_key, mid); 3034 3035 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3036 &disk_key, level, c->start, 0, 3037 BTRFS_NESTING_SPLIT); 3038 if (IS_ERR(split)) 3039 return PTR_ERR(split); 3040 3041 root_add_used(root, fs_info->nodesize); 3042 ASSERT(btrfs_header_level(c) == level); 3043 3044 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3045 if (ret) { 3046 btrfs_abort_transaction(trans, ret); 3047 return ret; 3048 } 3049 copy_extent_buffer(split, c, 3050 btrfs_node_key_ptr_offset(split, 0), 3051 btrfs_node_key_ptr_offset(c, mid), 3052 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3053 btrfs_set_header_nritems(split, c_nritems - mid); 3054 btrfs_set_header_nritems(c, mid); 3055 3056 btrfs_mark_buffer_dirty(c); 3057 btrfs_mark_buffer_dirty(split); 3058 3059 insert_ptr(trans, path, &disk_key, split->start, 3060 path->slots[level + 1] + 1, level + 1); 3061 3062 if (path->slots[level] >= mid) { 3063 path->slots[level] -= mid; 3064 btrfs_tree_unlock(c); 3065 free_extent_buffer(c); 3066 path->nodes[level] = split; 3067 path->slots[level + 1] += 1; 3068 } else { 3069 btrfs_tree_unlock(split); 3070 free_extent_buffer(split); 3071 } 3072 return 0; 3073 } 3074 3075 /* 3076 * how many bytes are required to store the items in a leaf. start 3077 * and nr indicate which items in the leaf to check. This totals up the 3078 * space used both by the item structs and the item data 3079 */ 3080 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3081 { 3082 int data_len; 3083 int nritems = btrfs_header_nritems(l); 3084 int end = min(nritems, start + nr) - 1; 3085 3086 if (!nr) 3087 return 0; 3088 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3089 data_len = data_len - btrfs_item_offset(l, end); 3090 data_len += sizeof(struct btrfs_item) * nr; 3091 WARN_ON(data_len < 0); 3092 return data_len; 3093 } 3094 3095 /* 3096 * The space between the end of the leaf items and 3097 * the start of the leaf data. IOW, how much room 3098 * the leaf has left for both items and data 3099 */ 3100 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3101 { 3102 struct btrfs_fs_info *fs_info = leaf->fs_info; 3103 int nritems = btrfs_header_nritems(leaf); 3104 int ret; 3105 3106 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3107 if (ret < 0) { 3108 btrfs_crit(fs_info, 3109 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3110 ret, 3111 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3112 leaf_space_used(leaf, 0, nritems), nritems); 3113 } 3114 return ret; 3115 } 3116 3117 /* 3118 * min slot controls the lowest index we're willing to push to the 3119 * right. We'll push up to and including min_slot, but no lower 3120 */ 3121 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3122 struct btrfs_path *path, 3123 int data_size, int empty, 3124 struct extent_buffer *right, 3125 int free_space, u32 left_nritems, 3126 u32 min_slot) 3127 { 3128 struct btrfs_fs_info *fs_info = right->fs_info; 3129 struct extent_buffer *left = path->nodes[0]; 3130 struct extent_buffer *upper = path->nodes[1]; 3131 struct btrfs_map_token token; 3132 struct btrfs_disk_key disk_key; 3133 int slot; 3134 u32 i; 3135 int push_space = 0; 3136 int push_items = 0; 3137 u32 nr; 3138 u32 right_nritems; 3139 u32 data_end; 3140 u32 this_item_size; 3141 3142 if (empty) 3143 nr = 0; 3144 else 3145 nr = max_t(u32, 1, min_slot); 3146 3147 if (path->slots[0] >= left_nritems) 3148 push_space += data_size; 3149 3150 slot = path->slots[1]; 3151 i = left_nritems - 1; 3152 while (i >= nr) { 3153 if (!empty && push_items > 0) { 3154 if (path->slots[0] > i) 3155 break; 3156 if (path->slots[0] == i) { 3157 int space = btrfs_leaf_free_space(left); 3158 3159 if (space + push_space * 2 > free_space) 3160 break; 3161 } 3162 } 3163 3164 if (path->slots[0] == i) 3165 push_space += data_size; 3166 3167 this_item_size = btrfs_item_size(left, i); 3168 if (this_item_size + sizeof(struct btrfs_item) + 3169 push_space > free_space) 3170 break; 3171 3172 push_items++; 3173 push_space += this_item_size + sizeof(struct btrfs_item); 3174 if (i == 0) 3175 break; 3176 i--; 3177 } 3178 3179 if (push_items == 0) 3180 goto out_unlock; 3181 3182 WARN_ON(!empty && push_items == left_nritems); 3183 3184 /* push left to right */ 3185 right_nritems = btrfs_header_nritems(right); 3186 3187 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3188 push_space -= leaf_data_end(left); 3189 3190 /* make room in the right data area */ 3191 data_end = leaf_data_end(right); 3192 memmove_leaf_data(right, data_end - push_space, data_end, 3193 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3194 3195 /* copy from the left data area */ 3196 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3197 leaf_data_end(left), push_space); 3198 3199 memmove_leaf_items(right, push_items, 0, right_nritems); 3200 3201 /* copy the items from left to right */ 3202 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3203 3204 /* update the item pointers */ 3205 btrfs_init_map_token(&token, right); 3206 right_nritems += push_items; 3207 btrfs_set_header_nritems(right, right_nritems); 3208 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3209 for (i = 0; i < right_nritems; i++) { 3210 push_space -= btrfs_token_item_size(&token, i); 3211 btrfs_set_token_item_offset(&token, i, push_space); 3212 } 3213 3214 left_nritems -= push_items; 3215 btrfs_set_header_nritems(left, left_nritems); 3216 3217 if (left_nritems) 3218 btrfs_mark_buffer_dirty(left); 3219 else 3220 btrfs_clear_buffer_dirty(trans, left); 3221 3222 btrfs_mark_buffer_dirty(right); 3223 3224 btrfs_item_key(right, &disk_key, 0); 3225 btrfs_set_node_key(upper, &disk_key, slot + 1); 3226 btrfs_mark_buffer_dirty(upper); 3227 3228 /* then fixup the leaf pointer in the path */ 3229 if (path->slots[0] >= left_nritems) { 3230 path->slots[0] -= left_nritems; 3231 if (btrfs_header_nritems(path->nodes[0]) == 0) 3232 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3233 btrfs_tree_unlock(path->nodes[0]); 3234 free_extent_buffer(path->nodes[0]); 3235 path->nodes[0] = right; 3236 path->slots[1] += 1; 3237 } else { 3238 btrfs_tree_unlock(right); 3239 free_extent_buffer(right); 3240 } 3241 return 0; 3242 3243 out_unlock: 3244 btrfs_tree_unlock(right); 3245 free_extent_buffer(right); 3246 return 1; 3247 } 3248 3249 /* 3250 * push some data in the path leaf to the right, trying to free up at 3251 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3252 * 3253 * returns 1 if the push failed because the other node didn't have enough 3254 * room, 0 if everything worked out and < 0 if there were major errors. 3255 * 3256 * this will push starting from min_slot to the end of the leaf. It won't 3257 * push any slot lower than min_slot 3258 */ 3259 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3260 *root, struct btrfs_path *path, 3261 int min_data_size, int data_size, 3262 int empty, u32 min_slot) 3263 { 3264 struct extent_buffer *left = path->nodes[0]; 3265 struct extent_buffer *right; 3266 struct extent_buffer *upper; 3267 int slot; 3268 int free_space; 3269 u32 left_nritems; 3270 int ret; 3271 3272 if (!path->nodes[1]) 3273 return 1; 3274 3275 slot = path->slots[1]; 3276 upper = path->nodes[1]; 3277 if (slot >= btrfs_header_nritems(upper) - 1) 3278 return 1; 3279 3280 btrfs_assert_tree_write_locked(path->nodes[1]); 3281 3282 right = btrfs_read_node_slot(upper, slot + 1); 3283 if (IS_ERR(right)) 3284 return PTR_ERR(right); 3285 3286 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3287 3288 free_space = btrfs_leaf_free_space(right); 3289 if (free_space < data_size) 3290 goto out_unlock; 3291 3292 ret = btrfs_cow_block(trans, root, right, upper, 3293 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3294 if (ret) 3295 goto out_unlock; 3296 3297 left_nritems = btrfs_header_nritems(left); 3298 if (left_nritems == 0) 3299 goto out_unlock; 3300 3301 if (check_sibling_keys(left, right)) { 3302 ret = -EUCLEAN; 3303 btrfs_abort_transaction(trans, ret); 3304 btrfs_tree_unlock(right); 3305 free_extent_buffer(right); 3306 return ret; 3307 } 3308 if (path->slots[0] == left_nritems && !empty) { 3309 /* Key greater than all keys in the leaf, right neighbor has 3310 * enough room for it and we're not emptying our leaf to delete 3311 * it, therefore use right neighbor to insert the new item and 3312 * no need to touch/dirty our left leaf. */ 3313 btrfs_tree_unlock(left); 3314 free_extent_buffer(left); 3315 path->nodes[0] = right; 3316 path->slots[0] = 0; 3317 path->slots[1]++; 3318 return 0; 3319 } 3320 3321 return __push_leaf_right(trans, path, min_data_size, empty, right, 3322 free_space, left_nritems, min_slot); 3323 out_unlock: 3324 btrfs_tree_unlock(right); 3325 free_extent_buffer(right); 3326 return 1; 3327 } 3328 3329 /* 3330 * push some data in the path leaf to the left, trying to free up at 3331 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3332 * 3333 * max_slot can put a limit on how far into the leaf we'll push items. The 3334 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3335 * items 3336 */ 3337 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3338 struct btrfs_path *path, int data_size, 3339 int empty, struct extent_buffer *left, 3340 int free_space, u32 right_nritems, 3341 u32 max_slot) 3342 { 3343 struct btrfs_fs_info *fs_info = left->fs_info; 3344 struct btrfs_disk_key disk_key; 3345 struct extent_buffer *right = path->nodes[0]; 3346 int i; 3347 int push_space = 0; 3348 int push_items = 0; 3349 u32 old_left_nritems; 3350 u32 nr; 3351 int ret = 0; 3352 u32 this_item_size; 3353 u32 old_left_item_size; 3354 struct btrfs_map_token token; 3355 3356 if (empty) 3357 nr = min(right_nritems, max_slot); 3358 else 3359 nr = min(right_nritems - 1, max_slot); 3360 3361 for (i = 0; i < nr; i++) { 3362 if (!empty && push_items > 0) { 3363 if (path->slots[0] < i) 3364 break; 3365 if (path->slots[0] == i) { 3366 int space = btrfs_leaf_free_space(right); 3367 3368 if (space + push_space * 2 > free_space) 3369 break; 3370 } 3371 } 3372 3373 if (path->slots[0] == i) 3374 push_space += data_size; 3375 3376 this_item_size = btrfs_item_size(right, i); 3377 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3378 free_space) 3379 break; 3380 3381 push_items++; 3382 push_space += this_item_size + sizeof(struct btrfs_item); 3383 } 3384 3385 if (push_items == 0) { 3386 ret = 1; 3387 goto out; 3388 } 3389 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3390 3391 /* push data from right to left */ 3392 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3393 3394 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3395 btrfs_item_offset(right, push_items - 1); 3396 3397 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3398 btrfs_item_offset(right, push_items - 1), push_space); 3399 old_left_nritems = btrfs_header_nritems(left); 3400 BUG_ON(old_left_nritems <= 0); 3401 3402 btrfs_init_map_token(&token, left); 3403 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3404 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3405 u32 ioff; 3406 3407 ioff = btrfs_token_item_offset(&token, i); 3408 btrfs_set_token_item_offset(&token, i, 3409 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3410 } 3411 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3412 3413 /* fixup right node */ 3414 if (push_items > right_nritems) 3415 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3416 right_nritems); 3417 3418 if (push_items < right_nritems) { 3419 push_space = btrfs_item_offset(right, push_items - 1) - 3420 leaf_data_end(right); 3421 memmove_leaf_data(right, 3422 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3423 leaf_data_end(right), push_space); 3424 3425 memmove_leaf_items(right, 0, push_items, 3426 btrfs_header_nritems(right) - push_items); 3427 } 3428 3429 btrfs_init_map_token(&token, right); 3430 right_nritems -= push_items; 3431 btrfs_set_header_nritems(right, right_nritems); 3432 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3433 for (i = 0; i < right_nritems; i++) { 3434 push_space = push_space - btrfs_token_item_size(&token, i); 3435 btrfs_set_token_item_offset(&token, i, push_space); 3436 } 3437 3438 btrfs_mark_buffer_dirty(left); 3439 if (right_nritems) 3440 btrfs_mark_buffer_dirty(right); 3441 else 3442 btrfs_clear_buffer_dirty(trans, right); 3443 3444 btrfs_item_key(right, &disk_key, 0); 3445 fixup_low_keys(path, &disk_key, 1); 3446 3447 /* then fixup the leaf pointer in the path */ 3448 if (path->slots[0] < push_items) { 3449 path->slots[0] += old_left_nritems; 3450 btrfs_tree_unlock(path->nodes[0]); 3451 free_extent_buffer(path->nodes[0]); 3452 path->nodes[0] = left; 3453 path->slots[1] -= 1; 3454 } else { 3455 btrfs_tree_unlock(left); 3456 free_extent_buffer(left); 3457 path->slots[0] -= push_items; 3458 } 3459 BUG_ON(path->slots[0] < 0); 3460 return ret; 3461 out: 3462 btrfs_tree_unlock(left); 3463 free_extent_buffer(left); 3464 return ret; 3465 } 3466 3467 /* 3468 * push some data in the path leaf to the left, trying to free up at 3469 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3470 * 3471 * max_slot can put a limit on how far into the leaf we'll push items. The 3472 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3473 * items 3474 */ 3475 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3476 *root, struct btrfs_path *path, int min_data_size, 3477 int data_size, int empty, u32 max_slot) 3478 { 3479 struct extent_buffer *right = path->nodes[0]; 3480 struct extent_buffer *left; 3481 int slot; 3482 int free_space; 3483 u32 right_nritems; 3484 int ret = 0; 3485 3486 slot = path->slots[1]; 3487 if (slot == 0) 3488 return 1; 3489 if (!path->nodes[1]) 3490 return 1; 3491 3492 right_nritems = btrfs_header_nritems(right); 3493 if (right_nritems == 0) 3494 return 1; 3495 3496 btrfs_assert_tree_write_locked(path->nodes[1]); 3497 3498 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3499 if (IS_ERR(left)) 3500 return PTR_ERR(left); 3501 3502 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3503 3504 free_space = btrfs_leaf_free_space(left); 3505 if (free_space < data_size) { 3506 ret = 1; 3507 goto out; 3508 } 3509 3510 ret = btrfs_cow_block(trans, root, left, 3511 path->nodes[1], slot - 1, &left, 3512 BTRFS_NESTING_LEFT_COW); 3513 if (ret) { 3514 /* we hit -ENOSPC, but it isn't fatal here */ 3515 if (ret == -ENOSPC) 3516 ret = 1; 3517 goto out; 3518 } 3519 3520 if (check_sibling_keys(left, right)) { 3521 ret = -EUCLEAN; 3522 btrfs_abort_transaction(trans, ret); 3523 goto out; 3524 } 3525 return __push_leaf_left(trans, path, min_data_size, empty, left, 3526 free_space, right_nritems, max_slot); 3527 out: 3528 btrfs_tree_unlock(left); 3529 free_extent_buffer(left); 3530 return ret; 3531 } 3532 3533 /* 3534 * split the path's leaf in two, making sure there is at least data_size 3535 * available for the resulting leaf level of the path. 3536 */ 3537 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3538 struct btrfs_path *path, 3539 struct extent_buffer *l, 3540 struct extent_buffer *right, 3541 int slot, int mid, int nritems) 3542 { 3543 struct btrfs_fs_info *fs_info = trans->fs_info; 3544 int data_copy_size; 3545 int rt_data_off; 3546 int i; 3547 struct btrfs_disk_key disk_key; 3548 struct btrfs_map_token token; 3549 3550 nritems = nritems - mid; 3551 btrfs_set_header_nritems(right, nritems); 3552 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3553 3554 copy_leaf_items(right, l, 0, mid, nritems); 3555 3556 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3557 leaf_data_end(l), data_copy_size); 3558 3559 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3560 3561 btrfs_init_map_token(&token, right); 3562 for (i = 0; i < nritems; i++) { 3563 u32 ioff; 3564 3565 ioff = btrfs_token_item_offset(&token, i); 3566 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3567 } 3568 3569 btrfs_set_header_nritems(l, mid); 3570 btrfs_item_key(right, &disk_key, 0); 3571 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3572 3573 btrfs_mark_buffer_dirty(right); 3574 btrfs_mark_buffer_dirty(l); 3575 BUG_ON(path->slots[0] != slot); 3576 3577 if (mid <= slot) { 3578 btrfs_tree_unlock(path->nodes[0]); 3579 free_extent_buffer(path->nodes[0]); 3580 path->nodes[0] = right; 3581 path->slots[0] -= mid; 3582 path->slots[1] += 1; 3583 } else { 3584 btrfs_tree_unlock(right); 3585 free_extent_buffer(right); 3586 } 3587 3588 BUG_ON(path->slots[0] < 0); 3589 } 3590 3591 /* 3592 * double splits happen when we need to insert a big item in the middle 3593 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3594 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3595 * A B C 3596 * 3597 * We avoid this by trying to push the items on either side of our target 3598 * into the adjacent leaves. If all goes well we can avoid the double split 3599 * completely. 3600 */ 3601 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3602 struct btrfs_root *root, 3603 struct btrfs_path *path, 3604 int data_size) 3605 { 3606 int ret; 3607 int progress = 0; 3608 int slot; 3609 u32 nritems; 3610 int space_needed = data_size; 3611 3612 slot = path->slots[0]; 3613 if (slot < btrfs_header_nritems(path->nodes[0])) 3614 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3615 3616 /* 3617 * try to push all the items after our slot into the 3618 * right leaf 3619 */ 3620 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3621 if (ret < 0) 3622 return ret; 3623 3624 if (ret == 0) 3625 progress++; 3626 3627 nritems = btrfs_header_nritems(path->nodes[0]); 3628 /* 3629 * our goal is to get our slot at the start or end of a leaf. If 3630 * we've done so we're done 3631 */ 3632 if (path->slots[0] == 0 || path->slots[0] == nritems) 3633 return 0; 3634 3635 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3636 return 0; 3637 3638 /* try to push all the items before our slot into the next leaf */ 3639 slot = path->slots[0]; 3640 space_needed = data_size; 3641 if (slot > 0) 3642 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3643 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3644 if (ret < 0) 3645 return ret; 3646 3647 if (ret == 0) 3648 progress++; 3649 3650 if (progress) 3651 return 0; 3652 return 1; 3653 } 3654 3655 /* 3656 * split the path's leaf in two, making sure there is at least data_size 3657 * available for the resulting leaf level of the path. 3658 * 3659 * returns 0 if all went well and < 0 on failure. 3660 */ 3661 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3662 struct btrfs_root *root, 3663 const struct btrfs_key *ins_key, 3664 struct btrfs_path *path, int data_size, 3665 int extend) 3666 { 3667 struct btrfs_disk_key disk_key; 3668 struct extent_buffer *l; 3669 u32 nritems; 3670 int mid; 3671 int slot; 3672 struct extent_buffer *right; 3673 struct btrfs_fs_info *fs_info = root->fs_info; 3674 int ret = 0; 3675 int wret; 3676 int split; 3677 int num_doubles = 0; 3678 int tried_avoid_double = 0; 3679 3680 l = path->nodes[0]; 3681 slot = path->slots[0]; 3682 if (extend && data_size + btrfs_item_size(l, slot) + 3683 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3684 return -EOVERFLOW; 3685 3686 /* first try to make some room by pushing left and right */ 3687 if (data_size && path->nodes[1]) { 3688 int space_needed = data_size; 3689 3690 if (slot < btrfs_header_nritems(l)) 3691 space_needed -= btrfs_leaf_free_space(l); 3692 3693 wret = push_leaf_right(trans, root, path, space_needed, 3694 space_needed, 0, 0); 3695 if (wret < 0) 3696 return wret; 3697 if (wret) { 3698 space_needed = data_size; 3699 if (slot > 0) 3700 space_needed -= btrfs_leaf_free_space(l); 3701 wret = push_leaf_left(trans, root, path, space_needed, 3702 space_needed, 0, (u32)-1); 3703 if (wret < 0) 3704 return wret; 3705 } 3706 l = path->nodes[0]; 3707 3708 /* did the pushes work? */ 3709 if (btrfs_leaf_free_space(l) >= data_size) 3710 return 0; 3711 } 3712 3713 if (!path->nodes[1]) { 3714 ret = insert_new_root(trans, root, path, 1); 3715 if (ret) 3716 return ret; 3717 } 3718 again: 3719 split = 1; 3720 l = path->nodes[0]; 3721 slot = path->slots[0]; 3722 nritems = btrfs_header_nritems(l); 3723 mid = (nritems + 1) / 2; 3724 3725 if (mid <= slot) { 3726 if (nritems == 1 || 3727 leaf_space_used(l, mid, nritems - mid) + data_size > 3728 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3729 if (slot >= nritems) { 3730 split = 0; 3731 } else { 3732 mid = slot; 3733 if (mid != nritems && 3734 leaf_space_used(l, mid, nritems - mid) + 3735 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3736 if (data_size && !tried_avoid_double) 3737 goto push_for_double; 3738 split = 2; 3739 } 3740 } 3741 } 3742 } else { 3743 if (leaf_space_used(l, 0, mid) + data_size > 3744 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3745 if (!extend && data_size && slot == 0) { 3746 split = 0; 3747 } else if ((extend || !data_size) && slot == 0) { 3748 mid = 1; 3749 } else { 3750 mid = slot; 3751 if (mid != nritems && 3752 leaf_space_used(l, mid, nritems - mid) + 3753 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3754 if (data_size && !tried_avoid_double) 3755 goto push_for_double; 3756 split = 2; 3757 } 3758 } 3759 } 3760 } 3761 3762 if (split == 0) 3763 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3764 else 3765 btrfs_item_key(l, &disk_key, mid); 3766 3767 /* 3768 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3769 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3770 * subclasses, which is 8 at the time of this patch, and we've maxed it 3771 * out. In the future we could add a 3772 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3773 * use BTRFS_NESTING_NEW_ROOT. 3774 */ 3775 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3776 &disk_key, 0, l->start, 0, 3777 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3778 BTRFS_NESTING_SPLIT); 3779 if (IS_ERR(right)) 3780 return PTR_ERR(right); 3781 3782 root_add_used(root, fs_info->nodesize); 3783 3784 if (split == 0) { 3785 if (mid <= slot) { 3786 btrfs_set_header_nritems(right, 0); 3787 insert_ptr(trans, path, &disk_key, 3788 right->start, path->slots[1] + 1, 1); 3789 btrfs_tree_unlock(path->nodes[0]); 3790 free_extent_buffer(path->nodes[0]); 3791 path->nodes[0] = right; 3792 path->slots[0] = 0; 3793 path->slots[1] += 1; 3794 } else { 3795 btrfs_set_header_nritems(right, 0); 3796 insert_ptr(trans, path, &disk_key, 3797 right->start, path->slots[1], 1); 3798 btrfs_tree_unlock(path->nodes[0]); 3799 free_extent_buffer(path->nodes[0]); 3800 path->nodes[0] = right; 3801 path->slots[0] = 0; 3802 if (path->slots[1] == 0) 3803 fixup_low_keys(path, &disk_key, 1); 3804 } 3805 /* 3806 * We create a new leaf 'right' for the required ins_len and 3807 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3808 * the content of ins_len to 'right'. 3809 */ 3810 return ret; 3811 } 3812 3813 copy_for_split(trans, path, l, right, slot, mid, nritems); 3814 3815 if (split == 2) { 3816 BUG_ON(num_doubles != 0); 3817 num_doubles++; 3818 goto again; 3819 } 3820 3821 return 0; 3822 3823 push_for_double: 3824 push_for_double_split(trans, root, path, data_size); 3825 tried_avoid_double = 1; 3826 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3827 return 0; 3828 goto again; 3829 } 3830 3831 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3832 struct btrfs_root *root, 3833 struct btrfs_path *path, int ins_len) 3834 { 3835 struct btrfs_key key; 3836 struct extent_buffer *leaf; 3837 struct btrfs_file_extent_item *fi; 3838 u64 extent_len = 0; 3839 u32 item_size; 3840 int ret; 3841 3842 leaf = path->nodes[0]; 3843 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3844 3845 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3846 key.type != BTRFS_EXTENT_CSUM_KEY); 3847 3848 if (btrfs_leaf_free_space(leaf) >= ins_len) 3849 return 0; 3850 3851 item_size = btrfs_item_size(leaf, path->slots[0]); 3852 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3853 fi = btrfs_item_ptr(leaf, path->slots[0], 3854 struct btrfs_file_extent_item); 3855 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3856 } 3857 btrfs_release_path(path); 3858 3859 path->keep_locks = 1; 3860 path->search_for_split = 1; 3861 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3862 path->search_for_split = 0; 3863 if (ret > 0) 3864 ret = -EAGAIN; 3865 if (ret < 0) 3866 goto err; 3867 3868 ret = -EAGAIN; 3869 leaf = path->nodes[0]; 3870 /* if our item isn't there, return now */ 3871 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3872 goto err; 3873 3874 /* the leaf has changed, it now has room. return now */ 3875 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3876 goto err; 3877 3878 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3879 fi = btrfs_item_ptr(leaf, path->slots[0], 3880 struct btrfs_file_extent_item); 3881 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3882 goto err; 3883 } 3884 3885 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3886 if (ret) 3887 goto err; 3888 3889 path->keep_locks = 0; 3890 btrfs_unlock_up_safe(path, 1); 3891 return 0; 3892 err: 3893 path->keep_locks = 0; 3894 return ret; 3895 } 3896 3897 static noinline int split_item(struct btrfs_path *path, 3898 const struct btrfs_key *new_key, 3899 unsigned long split_offset) 3900 { 3901 struct extent_buffer *leaf; 3902 int orig_slot, slot; 3903 char *buf; 3904 u32 nritems; 3905 u32 item_size; 3906 u32 orig_offset; 3907 struct btrfs_disk_key disk_key; 3908 3909 leaf = path->nodes[0]; 3910 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3911 3912 orig_slot = path->slots[0]; 3913 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3914 item_size = btrfs_item_size(leaf, path->slots[0]); 3915 3916 buf = kmalloc(item_size, GFP_NOFS); 3917 if (!buf) 3918 return -ENOMEM; 3919 3920 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3921 path->slots[0]), item_size); 3922 3923 slot = path->slots[0] + 1; 3924 nritems = btrfs_header_nritems(leaf); 3925 if (slot != nritems) { 3926 /* shift the items */ 3927 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3928 } 3929 3930 btrfs_cpu_key_to_disk(&disk_key, new_key); 3931 btrfs_set_item_key(leaf, &disk_key, slot); 3932 3933 btrfs_set_item_offset(leaf, slot, orig_offset); 3934 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3935 3936 btrfs_set_item_offset(leaf, orig_slot, 3937 orig_offset + item_size - split_offset); 3938 btrfs_set_item_size(leaf, orig_slot, split_offset); 3939 3940 btrfs_set_header_nritems(leaf, nritems + 1); 3941 3942 /* write the data for the start of the original item */ 3943 write_extent_buffer(leaf, buf, 3944 btrfs_item_ptr_offset(leaf, path->slots[0]), 3945 split_offset); 3946 3947 /* write the data for the new item */ 3948 write_extent_buffer(leaf, buf + split_offset, 3949 btrfs_item_ptr_offset(leaf, slot), 3950 item_size - split_offset); 3951 btrfs_mark_buffer_dirty(leaf); 3952 3953 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3954 kfree(buf); 3955 return 0; 3956 } 3957 3958 /* 3959 * This function splits a single item into two items, 3960 * giving 'new_key' to the new item and splitting the 3961 * old one at split_offset (from the start of the item). 3962 * 3963 * The path may be released by this operation. After 3964 * the split, the path is pointing to the old item. The 3965 * new item is going to be in the same node as the old one. 3966 * 3967 * Note, the item being split must be smaller enough to live alone on 3968 * a tree block with room for one extra struct btrfs_item 3969 * 3970 * This allows us to split the item in place, keeping a lock on the 3971 * leaf the entire time. 3972 */ 3973 int btrfs_split_item(struct btrfs_trans_handle *trans, 3974 struct btrfs_root *root, 3975 struct btrfs_path *path, 3976 const struct btrfs_key *new_key, 3977 unsigned long split_offset) 3978 { 3979 int ret; 3980 ret = setup_leaf_for_split(trans, root, path, 3981 sizeof(struct btrfs_item)); 3982 if (ret) 3983 return ret; 3984 3985 ret = split_item(path, new_key, split_offset); 3986 return ret; 3987 } 3988 3989 /* 3990 * make the item pointed to by the path smaller. new_size indicates 3991 * how small to make it, and from_end tells us if we just chop bytes 3992 * off the end of the item or if we shift the item to chop bytes off 3993 * the front. 3994 */ 3995 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 3996 { 3997 int slot; 3998 struct extent_buffer *leaf; 3999 u32 nritems; 4000 unsigned int data_end; 4001 unsigned int old_data_start; 4002 unsigned int old_size; 4003 unsigned int size_diff; 4004 int i; 4005 struct btrfs_map_token token; 4006 4007 leaf = path->nodes[0]; 4008 slot = path->slots[0]; 4009 4010 old_size = btrfs_item_size(leaf, slot); 4011 if (old_size == new_size) 4012 return; 4013 4014 nritems = btrfs_header_nritems(leaf); 4015 data_end = leaf_data_end(leaf); 4016 4017 old_data_start = btrfs_item_offset(leaf, slot); 4018 4019 size_diff = old_size - new_size; 4020 4021 BUG_ON(slot < 0); 4022 BUG_ON(slot >= nritems); 4023 4024 /* 4025 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4026 */ 4027 /* first correct the data pointers */ 4028 btrfs_init_map_token(&token, leaf); 4029 for (i = slot; i < nritems; i++) { 4030 u32 ioff; 4031 4032 ioff = btrfs_token_item_offset(&token, i); 4033 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4034 } 4035 4036 /* shift the data */ 4037 if (from_end) { 4038 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4039 old_data_start + new_size - data_end); 4040 } else { 4041 struct btrfs_disk_key disk_key; 4042 u64 offset; 4043 4044 btrfs_item_key(leaf, &disk_key, slot); 4045 4046 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4047 unsigned long ptr; 4048 struct btrfs_file_extent_item *fi; 4049 4050 fi = btrfs_item_ptr(leaf, slot, 4051 struct btrfs_file_extent_item); 4052 fi = (struct btrfs_file_extent_item *)( 4053 (unsigned long)fi - size_diff); 4054 4055 if (btrfs_file_extent_type(leaf, fi) == 4056 BTRFS_FILE_EXTENT_INLINE) { 4057 ptr = btrfs_item_ptr_offset(leaf, slot); 4058 memmove_extent_buffer(leaf, ptr, 4059 (unsigned long)fi, 4060 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4061 } 4062 } 4063 4064 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4065 old_data_start - data_end); 4066 4067 offset = btrfs_disk_key_offset(&disk_key); 4068 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4069 btrfs_set_item_key(leaf, &disk_key, slot); 4070 if (slot == 0) 4071 fixup_low_keys(path, &disk_key, 1); 4072 } 4073 4074 btrfs_set_item_size(leaf, slot, new_size); 4075 btrfs_mark_buffer_dirty(leaf); 4076 4077 if (btrfs_leaf_free_space(leaf) < 0) { 4078 btrfs_print_leaf(leaf); 4079 BUG(); 4080 } 4081 } 4082 4083 /* 4084 * make the item pointed to by the path bigger, data_size is the added size. 4085 */ 4086 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4087 { 4088 int slot; 4089 struct extent_buffer *leaf; 4090 u32 nritems; 4091 unsigned int data_end; 4092 unsigned int old_data; 4093 unsigned int old_size; 4094 int i; 4095 struct btrfs_map_token token; 4096 4097 leaf = path->nodes[0]; 4098 4099 nritems = btrfs_header_nritems(leaf); 4100 data_end = leaf_data_end(leaf); 4101 4102 if (btrfs_leaf_free_space(leaf) < data_size) { 4103 btrfs_print_leaf(leaf); 4104 BUG(); 4105 } 4106 slot = path->slots[0]; 4107 old_data = btrfs_item_data_end(leaf, slot); 4108 4109 BUG_ON(slot < 0); 4110 if (slot >= nritems) { 4111 btrfs_print_leaf(leaf); 4112 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4113 slot, nritems); 4114 BUG(); 4115 } 4116 4117 /* 4118 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4119 */ 4120 /* first correct the data pointers */ 4121 btrfs_init_map_token(&token, leaf); 4122 for (i = slot; i < nritems; i++) { 4123 u32 ioff; 4124 4125 ioff = btrfs_token_item_offset(&token, i); 4126 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4127 } 4128 4129 /* shift the data */ 4130 memmove_leaf_data(leaf, data_end - data_size, data_end, 4131 old_data - data_end); 4132 4133 data_end = old_data; 4134 old_size = btrfs_item_size(leaf, slot); 4135 btrfs_set_item_size(leaf, slot, old_size + data_size); 4136 btrfs_mark_buffer_dirty(leaf); 4137 4138 if (btrfs_leaf_free_space(leaf) < 0) { 4139 btrfs_print_leaf(leaf); 4140 BUG(); 4141 } 4142 } 4143 4144 /* 4145 * Make space in the node before inserting one or more items. 4146 * 4147 * @root: root we are inserting items to 4148 * @path: points to the leaf/slot where we are going to insert new items 4149 * @batch: information about the batch of items to insert 4150 * 4151 * Main purpose is to save stack depth by doing the bulk of the work in a 4152 * function that doesn't call btrfs_search_slot 4153 */ 4154 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4155 const struct btrfs_item_batch *batch) 4156 { 4157 struct btrfs_fs_info *fs_info = root->fs_info; 4158 int i; 4159 u32 nritems; 4160 unsigned int data_end; 4161 struct btrfs_disk_key disk_key; 4162 struct extent_buffer *leaf; 4163 int slot; 4164 struct btrfs_map_token token; 4165 u32 total_size; 4166 4167 /* 4168 * Before anything else, update keys in the parent and other ancestors 4169 * if needed, then release the write locks on them, so that other tasks 4170 * can use them while we modify the leaf. 4171 */ 4172 if (path->slots[0] == 0) { 4173 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4174 fixup_low_keys(path, &disk_key, 1); 4175 } 4176 btrfs_unlock_up_safe(path, 1); 4177 4178 leaf = path->nodes[0]; 4179 slot = path->slots[0]; 4180 4181 nritems = btrfs_header_nritems(leaf); 4182 data_end = leaf_data_end(leaf); 4183 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4184 4185 if (btrfs_leaf_free_space(leaf) < total_size) { 4186 btrfs_print_leaf(leaf); 4187 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4188 total_size, btrfs_leaf_free_space(leaf)); 4189 BUG(); 4190 } 4191 4192 btrfs_init_map_token(&token, leaf); 4193 if (slot != nritems) { 4194 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4195 4196 if (old_data < data_end) { 4197 btrfs_print_leaf(leaf); 4198 btrfs_crit(fs_info, 4199 "item at slot %d with data offset %u beyond data end of leaf %u", 4200 slot, old_data, data_end); 4201 BUG(); 4202 } 4203 /* 4204 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4205 */ 4206 /* first correct the data pointers */ 4207 for (i = slot; i < nritems; i++) { 4208 u32 ioff; 4209 4210 ioff = btrfs_token_item_offset(&token, i); 4211 btrfs_set_token_item_offset(&token, i, 4212 ioff - batch->total_data_size); 4213 } 4214 /* shift the items */ 4215 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4216 4217 /* shift the data */ 4218 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4219 data_end, old_data - data_end); 4220 data_end = old_data; 4221 } 4222 4223 /* setup the item for the new data */ 4224 for (i = 0; i < batch->nr; i++) { 4225 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4226 btrfs_set_item_key(leaf, &disk_key, slot + i); 4227 data_end -= batch->data_sizes[i]; 4228 btrfs_set_token_item_offset(&token, slot + i, data_end); 4229 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4230 } 4231 4232 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4233 btrfs_mark_buffer_dirty(leaf); 4234 4235 if (btrfs_leaf_free_space(leaf) < 0) { 4236 btrfs_print_leaf(leaf); 4237 BUG(); 4238 } 4239 } 4240 4241 /* 4242 * Insert a new item into a leaf. 4243 * 4244 * @root: The root of the btree. 4245 * @path: A path pointing to the target leaf and slot. 4246 * @key: The key of the new item. 4247 * @data_size: The size of the data associated with the new key. 4248 */ 4249 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4250 struct btrfs_path *path, 4251 const struct btrfs_key *key, 4252 u32 data_size) 4253 { 4254 struct btrfs_item_batch batch; 4255 4256 batch.keys = key; 4257 batch.data_sizes = &data_size; 4258 batch.total_data_size = data_size; 4259 batch.nr = 1; 4260 4261 setup_items_for_insert(root, path, &batch); 4262 } 4263 4264 /* 4265 * Given a key and some data, insert items into the tree. 4266 * This does all the path init required, making room in the tree if needed. 4267 */ 4268 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4269 struct btrfs_root *root, 4270 struct btrfs_path *path, 4271 const struct btrfs_item_batch *batch) 4272 { 4273 int ret = 0; 4274 int slot; 4275 u32 total_size; 4276 4277 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4278 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4279 if (ret == 0) 4280 return -EEXIST; 4281 if (ret < 0) 4282 return ret; 4283 4284 slot = path->slots[0]; 4285 BUG_ON(slot < 0); 4286 4287 setup_items_for_insert(root, path, batch); 4288 return 0; 4289 } 4290 4291 /* 4292 * Given a key and some data, insert an item into the tree. 4293 * This does all the path init required, making room in the tree if needed. 4294 */ 4295 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4296 const struct btrfs_key *cpu_key, void *data, 4297 u32 data_size) 4298 { 4299 int ret = 0; 4300 struct btrfs_path *path; 4301 struct extent_buffer *leaf; 4302 unsigned long ptr; 4303 4304 path = btrfs_alloc_path(); 4305 if (!path) 4306 return -ENOMEM; 4307 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4308 if (!ret) { 4309 leaf = path->nodes[0]; 4310 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4311 write_extent_buffer(leaf, data, ptr, data_size); 4312 btrfs_mark_buffer_dirty(leaf); 4313 } 4314 btrfs_free_path(path); 4315 return ret; 4316 } 4317 4318 /* 4319 * This function duplicates an item, giving 'new_key' to the new item. 4320 * It guarantees both items live in the same tree leaf and the new item is 4321 * contiguous with the original item. 4322 * 4323 * This allows us to split a file extent in place, keeping a lock on the leaf 4324 * the entire time. 4325 */ 4326 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4327 struct btrfs_root *root, 4328 struct btrfs_path *path, 4329 const struct btrfs_key *new_key) 4330 { 4331 struct extent_buffer *leaf; 4332 int ret; 4333 u32 item_size; 4334 4335 leaf = path->nodes[0]; 4336 item_size = btrfs_item_size(leaf, path->slots[0]); 4337 ret = setup_leaf_for_split(trans, root, path, 4338 item_size + sizeof(struct btrfs_item)); 4339 if (ret) 4340 return ret; 4341 4342 path->slots[0]++; 4343 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4344 leaf = path->nodes[0]; 4345 memcpy_extent_buffer(leaf, 4346 btrfs_item_ptr_offset(leaf, path->slots[0]), 4347 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4348 item_size); 4349 return 0; 4350 } 4351 4352 /* 4353 * delete the pointer from a given node. 4354 * 4355 * the tree should have been previously balanced so the deletion does not 4356 * empty a node. 4357 */ 4358 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 4359 int level, int slot) 4360 { 4361 struct extent_buffer *parent = path->nodes[level]; 4362 u32 nritems; 4363 int ret; 4364 4365 nritems = btrfs_header_nritems(parent); 4366 if (slot != nritems - 1) { 4367 if (level) { 4368 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4369 slot + 1, nritems - slot - 1); 4370 BUG_ON(ret < 0); 4371 } 4372 memmove_extent_buffer(parent, 4373 btrfs_node_key_ptr_offset(parent, slot), 4374 btrfs_node_key_ptr_offset(parent, slot + 1), 4375 sizeof(struct btrfs_key_ptr) * 4376 (nritems - slot - 1)); 4377 } else if (level) { 4378 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4379 BTRFS_MOD_LOG_KEY_REMOVE); 4380 BUG_ON(ret < 0); 4381 } 4382 4383 nritems--; 4384 btrfs_set_header_nritems(parent, nritems); 4385 if (nritems == 0 && parent == root->node) { 4386 BUG_ON(btrfs_header_level(root->node) != 1); 4387 /* just turn the root into a leaf and break */ 4388 btrfs_set_header_level(root->node, 0); 4389 } else if (slot == 0) { 4390 struct btrfs_disk_key disk_key; 4391 4392 btrfs_node_key(parent, &disk_key, 0); 4393 fixup_low_keys(path, &disk_key, level + 1); 4394 } 4395 btrfs_mark_buffer_dirty(parent); 4396 } 4397 4398 /* 4399 * a helper function to delete the leaf pointed to by path->slots[1] and 4400 * path->nodes[1]. 4401 * 4402 * This deletes the pointer in path->nodes[1] and frees the leaf 4403 * block extent. zero is returned if it all worked out, < 0 otherwise. 4404 * 4405 * The path must have already been setup for deleting the leaf, including 4406 * all the proper balancing. path->nodes[1] must be locked. 4407 */ 4408 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4409 struct btrfs_root *root, 4410 struct btrfs_path *path, 4411 struct extent_buffer *leaf) 4412 { 4413 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4414 del_ptr(root, path, 1, path->slots[1]); 4415 4416 /* 4417 * btrfs_free_extent is expensive, we want to make sure we 4418 * aren't holding any locks when we call it 4419 */ 4420 btrfs_unlock_up_safe(path, 0); 4421 4422 root_sub_used(root, leaf->len); 4423 4424 atomic_inc(&leaf->refs); 4425 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4426 free_extent_buffer_stale(leaf); 4427 } 4428 /* 4429 * delete the item at the leaf level in path. If that empties 4430 * the leaf, remove it from the tree 4431 */ 4432 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4433 struct btrfs_path *path, int slot, int nr) 4434 { 4435 struct btrfs_fs_info *fs_info = root->fs_info; 4436 struct extent_buffer *leaf; 4437 int ret = 0; 4438 int wret; 4439 u32 nritems; 4440 4441 leaf = path->nodes[0]; 4442 nritems = btrfs_header_nritems(leaf); 4443 4444 if (slot + nr != nritems) { 4445 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4446 const int data_end = leaf_data_end(leaf); 4447 struct btrfs_map_token token; 4448 u32 dsize = 0; 4449 int i; 4450 4451 for (i = 0; i < nr; i++) 4452 dsize += btrfs_item_size(leaf, slot + i); 4453 4454 memmove_leaf_data(leaf, data_end + dsize, data_end, 4455 last_off - data_end); 4456 4457 btrfs_init_map_token(&token, leaf); 4458 for (i = slot + nr; i < nritems; i++) { 4459 u32 ioff; 4460 4461 ioff = btrfs_token_item_offset(&token, i); 4462 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4463 } 4464 4465 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4466 } 4467 btrfs_set_header_nritems(leaf, nritems - nr); 4468 nritems -= nr; 4469 4470 /* delete the leaf if we've emptied it */ 4471 if (nritems == 0) { 4472 if (leaf == root->node) { 4473 btrfs_set_header_level(leaf, 0); 4474 } else { 4475 btrfs_clear_buffer_dirty(trans, leaf); 4476 btrfs_del_leaf(trans, root, path, leaf); 4477 } 4478 } else { 4479 int used = leaf_space_used(leaf, 0, nritems); 4480 if (slot == 0) { 4481 struct btrfs_disk_key disk_key; 4482 4483 btrfs_item_key(leaf, &disk_key, 0); 4484 fixup_low_keys(path, &disk_key, 1); 4485 } 4486 4487 /* 4488 * Try to delete the leaf if it is mostly empty. We do this by 4489 * trying to move all its items into its left and right neighbours. 4490 * If we can't move all the items, then we don't delete it - it's 4491 * not ideal, but future insertions might fill the leaf with more 4492 * items, or items from other leaves might be moved later into our 4493 * leaf due to deletions on those leaves. 4494 */ 4495 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4496 u32 min_push_space; 4497 4498 /* push_leaf_left fixes the path. 4499 * make sure the path still points to our leaf 4500 * for possible call to del_ptr below 4501 */ 4502 slot = path->slots[1]; 4503 atomic_inc(&leaf->refs); 4504 /* 4505 * We want to be able to at least push one item to the 4506 * left neighbour leaf, and that's the first item. 4507 */ 4508 min_push_space = sizeof(struct btrfs_item) + 4509 btrfs_item_size(leaf, 0); 4510 wret = push_leaf_left(trans, root, path, 0, 4511 min_push_space, 1, (u32)-1); 4512 if (wret < 0 && wret != -ENOSPC) 4513 ret = wret; 4514 4515 if (path->nodes[0] == leaf && 4516 btrfs_header_nritems(leaf)) { 4517 /* 4518 * If we were not able to push all items from our 4519 * leaf to its left neighbour, then attempt to 4520 * either push all the remaining items to the 4521 * right neighbour or none. There's no advantage 4522 * in pushing only some items, instead of all, as 4523 * it's pointless to end up with a leaf having 4524 * too few items while the neighbours can be full 4525 * or nearly full. 4526 */ 4527 nritems = btrfs_header_nritems(leaf); 4528 min_push_space = leaf_space_used(leaf, 0, nritems); 4529 wret = push_leaf_right(trans, root, path, 0, 4530 min_push_space, 1, 0); 4531 if (wret < 0 && wret != -ENOSPC) 4532 ret = wret; 4533 } 4534 4535 if (btrfs_header_nritems(leaf) == 0) { 4536 path->slots[1] = slot; 4537 btrfs_del_leaf(trans, root, path, leaf); 4538 free_extent_buffer(leaf); 4539 ret = 0; 4540 } else { 4541 /* if we're still in the path, make sure 4542 * we're dirty. Otherwise, one of the 4543 * push_leaf functions must have already 4544 * dirtied this buffer 4545 */ 4546 if (path->nodes[0] == leaf) 4547 btrfs_mark_buffer_dirty(leaf); 4548 free_extent_buffer(leaf); 4549 } 4550 } else { 4551 btrfs_mark_buffer_dirty(leaf); 4552 } 4553 } 4554 return ret; 4555 } 4556 4557 /* 4558 * A helper function to walk down the tree starting at min_key, and looking 4559 * for nodes or leaves that are have a minimum transaction id. 4560 * This is used by the btree defrag code, and tree logging 4561 * 4562 * This does not cow, but it does stuff the starting key it finds back 4563 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4564 * key and get a writable path. 4565 * 4566 * This honors path->lowest_level to prevent descent past a given level 4567 * of the tree. 4568 * 4569 * min_trans indicates the oldest transaction that you are interested 4570 * in walking through. Any nodes or leaves older than min_trans are 4571 * skipped over (without reading them). 4572 * 4573 * returns zero if something useful was found, < 0 on error and 1 if there 4574 * was nothing in the tree that matched the search criteria. 4575 */ 4576 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4577 struct btrfs_path *path, 4578 u64 min_trans) 4579 { 4580 struct extent_buffer *cur; 4581 struct btrfs_key found_key; 4582 int slot; 4583 int sret; 4584 u32 nritems; 4585 int level; 4586 int ret = 1; 4587 int keep_locks = path->keep_locks; 4588 4589 ASSERT(!path->nowait); 4590 path->keep_locks = 1; 4591 again: 4592 cur = btrfs_read_lock_root_node(root); 4593 level = btrfs_header_level(cur); 4594 WARN_ON(path->nodes[level]); 4595 path->nodes[level] = cur; 4596 path->locks[level] = BTRFS_READ_LOCK; 4597 4598 if (btrfs_header_generation(cur) < min_trans) { 4599 ret = 1; 4600 goto out; 4601 } 4602 while (1) { 4603 nritems = btrfs_header_nritems(cur); 4604 level = btrfs_header_level(cur); 4605 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4606 if (sret < 0) { 4607 ret = sret; 4608 goto out; 4609 } 4610 4611 /* at the lowest level, we're done, setup the path and exit */ 4612 if (level == path->lowest_level) { 4613 if (slot >= nritems) 4614 goto find_next_key; 4615 ret = 0; 4616 path->slots[level] = slot; 4617 btrfs_item_key_to_cpu(cur, &found_key, slot); 4618 goto out; 4619 } 4620 if (sret && slot > 0) 4621 slot--; 4622 /* 4623 * check this node pointer against the min_trans parameters. 4624 * If it is too old, skip to the next one. 4625 */ 4626 while (slot < nritems) { 4627 u64 gen; 4628 4629 gen = btrfs_node_ptr_generation(cur, slot); 4630 if (gen < min_trans) { 4631 slot++; 4632 continue; 4633 } 4634 break; 4635 } 4636 find_next_key: 4637 /* 4638 * we didn't find a candidate key in this node, walk forward 4639 * and find another one 4640 */ 4641 if (slot >= nritems) { 4642 path->slots[level] = slot; 4643 sret = btrfs_find_next_key(root, path, min_key, level, 4644 min_trans); 4645 if (sret == 0) { 4646 btrfs_release_path(path); 4647 goto again; 4648 } else { 4649 goto out; 4650 } 4651 } 4652 /* save our key for returning back */ 4653 btrfs_node_key_to_cpu(cur, &found_key, slot); 4654 path->slots[level] = slot; 4655 if (level == path->lowest_level) { 4656 ret = 0; 4657 goto out; 4658 } 4659 cur = btrfs_read_node_slot(cur, slot); 4660 if (IS_ERR(cur)) { 4661 ret = PTR_ERR(cur); 4662 goto out; 4663 } 4664 4665 btrfs_tree_read_lock(cur); 4666 4667 path->locks[level - 1] = BTRFS_READ_LOCK; 4668 path->nodes[level - 1] = cur; 4669 unlock_up(path, level, 1, 0, NULL); 4670 } 4671 out: 4672 path->keep_locks = keep_locks; 4673 if (ret == 0) { 4674 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4675 memcpy(min_key, &found_key, sizeof(found_key)); 4676 } 4677 return ret; 4678 } 4679 4680 /* 4681 * this is similar to btrfs_next_leaf, but does not try to preserve 4682 * and fixup the path. It looks for and returns the next key in the 4683 * tree based on the current path and the min_trans parameters. 4684 * 4685 * 0 is returned if another key is found, < 0 if there are any errors 4686 * and 1 is returned if there are no higher keys in the tree 4687 * 4688 * path->keep_locks should be set to 1 on the search made before 4689 * calling this function. 4690 */ 4691 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4692 struct btrfs_key *key, int level, u64 min_trans) 4693 { 4694 int slot; 4695 struct extent_buffer *c; 4696 4697 WARN_ON(!path->keep_locks && !path->skip_locking); 4698 while (level < BTRFS_MAX_LEVEL) { 4699 if (!path->nodes[level]) 4700 return 1; 4701 4702 slot = path->slots[level] + 1; 4703 c = path->nodes[level]; 4704 next: 4705 if (slot >= btrfs_header_nritems(c)) { 4706 int ret; 4707 int orig_lowest; 4708 struct btrfs_key cur_key; 4709 if (level + 1 >= BTRFS_MAX_LEVEL || 4710 !path->nodes[level + 1]) 4711 return 1; 4712 4713 if (path->locks[level + 1] || path->skip_locking) { 4714 level++; 4715 continue; 4716 } 4717 4718 slot = btrfs_header_nritems(c) - 1; 4719 if (level == 0) 4720 btrfs_item_key_to_cpu(c, &cur_key, slot); 4721 else 4722 btrfs_node_key_to_cpu(c, &cur_key, slot); 4723 4724 orig_lowest = path->lowest_level; 4725 btrfs_release_path(path); 4726 path->lowest_level = level; 4727 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4728 0, 0); 4729 path->lowest_level = orig_lowest; 4730 if (ret < 0) 4731 return ret; 4732 4733 c = path->nodes[level]; 4734 slot = path->slots[level]; 4735 if (ret == 0) 4736 slot++; 4737 goto next; 4738 } 4739 4740 if (level == 0) 4741 btrfs_item_key_to_cpu(c, key, slot); 4742 else { 4743 u64 gen = btrfs_node_ptr_generation(c, slot); 4744 4745 if (gen < min_trans) { 4746 slot++; 4747 goto next; 4748 } 4749 btrfs_node_key_to_cpu(c, key, slot); 4750 } 4751 return 0; 4752 } 4753 return 1; 4754 } 4755 4756 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4757 u64 time_seq) 4758 { 4759 int slot; 4760 int level; 4761 struct extent_buffer *c; 4762 struct extent_buffer *next; 4763 struct btrfs_fs_info *fs_info = root->fs_info; 4764 struct btrfs_key key; 4765 bool need_commit_sem = false; 4766 u32 nritems; 4767 int ret; 4768 int i; 4769 4770 /* 4771 * The nowait semantics are used only for write paths, where we don't 4772 * use the tree mod log and sequence numbers. 4773 */ 4774 if (time_seq) 4775 ASSERT(!path->nowait); 4776 4777 nritems = btrfs_header_nritems(path->nodes[0]); 4778 if (nritems == 0) 4779 return 1; 4780 4781 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4782 again: 4783 level = 1; 4784 next = NULL; 4785 btrfs_release_path(path); 4786 4787 path->keep_locks = 1; 4788 4789 if (time_seq) { 4790 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4791 } else { 4792 if (path->need_commit_sem) { 4793 path->need_commit_sem = 0; 4794 need_commit_sem = true; 4795 if (path->nowait) { 4796 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4797 ret = -EAGAIN; 4798 goto done; 4799 } 4800 } else { 4801 down_read(&fs_info->commit_root_sem); 4802 } 4803 } 4804 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4805 } 4806 path->keep_locks = 0; 4807 4808 if (ret < 0) 4809 goto done; 4810 4811 nritems = btrfs_header_nritems(path->nodes[0]); 4812 /* 4813 * by releasing the path above we dropped all our locks. A balance 4814 * could have added more items next to the key that used to be 4815 * at the very end of the block. So, check again here and 4816 * advance the path if there are now more items available. 4817 */ 4818 if (nritems > 0 && path->slots[0] < nritems - 1) { 4819 if (ret == 0) 4820 path->slots[0]++; 4821 ret = 0; 4822 goto done; 4823 } 4824 /* 4825 * So the above check misses one case: 4826 * - after releasing the path above, someone has removed the item that 4827 * used to be at the very end of the block, and balance between leafs 4828 * gets another one with bigger key.offset to replace it. 4829 * 4830 * This one should be returned as well, or we can get leaf corruption 4831 * later(esp. in __btrfs_drop_extents()). 4832 * 4833 * And a bit more explanation about this check, 4834 * with ret > 0, the key isn't found, the path points to the slot 4835 * where it should be inserted, so the path->slots[0] item must be the 4836 * bigger one. 4837 */ 4838 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4839 ret = 0; 4840 goto done; 4841 } 4842 4843 while (level < BTRFS_MAX_LEVEL) { 4844 if (!path->nodes[level]) { 4845 ret = 1; 4846 goto done; 4847 } 4848 4849 slot = path->slots[level] + 1; 4850 c = path->nodes[level]; 4851 if (slot >= btrfs_header_nritems(c)) { 4852 level++; 4853 if (level == BTRFS_MAX_LEVEL) { 4854 ret = 1; 4855 goto done; 4856 } 4857 continue; 4858 } 4859 4860 4861 /* 4862 * Our current level is where we're going to start from, and to 4863 * make sure lockdep doesn't complain we need to drop our locks 4864 * and nodes from 0 to our current level. 4865 */ 4866 for (i = 0; i < level; i++) { 4867 if (path->locks[level]) { 4868 btrfs_tree_read_unlock(path->nodes[i]); 4869 path->locks[i] = 0; 4870 } 4871 free_extent_buffer(path->nodes[i]); 4872 path->nodes[i] = NULL; 4873 } 4874 4875 next = c; 4876 ret = read_block_for_search(root, path, &next, level, 4877 slot, &key); 4878 if (ret == -EAGAIN && !path->nowait) 4879 goto again; 4880 4881 if (ret < 0) { 4882 btrfs_release_path(path); 4883 goto done; 4884 } 4885 4886 if (!path->skip_locking) { 4887 ret = btrfs_try_tree_read_lock(next); 4888 if (!ret && path->nowait) { 4889 ret = -EAGAIN; 4890 goto done; 4891 } 4892 if (!ret && time_seq) { 4893 /* 4894 * If we don't get the lock, we may be racing 4895 * with push_leaf_left, holding that lock while 4896 * itself waiting for the leaf we've currently 4897 * locked. To solve this situation, we give up 4898 * on our lock and cycle. 4899 */ 4900 free_extent_buffer(next); 4901 btrfs_release_path(path); 4902 cond_resched(); 4903 goto again; 4904 } 4905 if (!ret) 4906 btrfs_tree_read_lock(next); 4907 } 4908 break; 4909 } 4910 path->slots[level] = slot; 4911 while (1) { 4912 level--; 4913 path->nodes[level] = next; 4914 path->slots[level] = 0; 4915 if (!path->skip_locking) 4916 path->locks[level] = BTRFS_READ_LOCK; 4917 if (!level) 4918 break; 4919 4920 ret = read_block_for_search(root, path, &next, level, 4921 0, &key); 4922 if (ret == -EAGAIN && !path->nowait) 4923 goto again; 4924 4925 if (ret < 0) { 4926 btrfs_release_path(path); 4927 goto done; 4928 } 4929 4930 if (!path->skip_locking) { 4931 if (path->nowait) { 4932 if (!btrfs_try_tree_read_lock(next)) { 4933 ret = -EAGAIN; 4934 goto done; 4935 } 4936 } else { 4937 btrfs_tree_read_lock(next); 4938 } 4939 } 4940 } 4941 ret = 0; 4942 done: 4943 unlock_up(path, 0, 1, 0, NULL); 4944 if (need_commit_sem) { 4945 int ret2; 4946 4947 path->need_commit_sem = 1; 4948 ret2 = finish_need_commit_sem_search(path); 4949 up_read(&fs_info->commit_root_sem); 4950 if (ret2) 4951 ret = ret2; 4952 } 4953 4954 return ret; 4955 } 4956 4957 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 4958 { 4959 path->slots[0]++; 4960 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 4961 return btrfs_next_old_leaf(root, path, time_seq); 4962 return 0; 4963 } 4964 4965 /* 4966 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4967 * searching until it gets past min_objectid or finds an item of 'type' 4968 * 4969 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4970 */ 4971 int btrfs_previous_item(struct btrfs_root *root, 4972 struct btrfs_path *path, u64 min_objectid, 4973 int type) 4974 { 4975 struct btrfs_key found_key; 4976 struct extent_buffer *leaf; 4977 u32 nritems; 4978 int ret; 4979 4980 while (1) { 4981 if (path->slots[0] == 0) { 4982 ret = btrfs_prev_leaf(root, path); 4983 if (ret != 0) 4984 return ret; 4985 } else { 4986 path->slots[0]--; 4987 } 4988 leaf = path->nodes[0]; 4989 nritems = btrfs_header_nritems(leaf); 4990 if (nritems == 0) 4991 return 1; 4992 if (path->slots[0] == nritems) 4993 path->slots[0]--; 4994 4995 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4996 if (found_key.objectid < min_objectid) 4997 break; 4998 if (found_key.type == type) 4999 return 0; 5000 if (found_key.objectid == min_objectid && 5001 found_key.type < type) 5002 break; 5003 } 5004 return 1; 5005 } 5006 5007 /* 5008 * search in extent tree to find a previous Metadata/Data extent item with 5009 * min objecitd. 5010 * 5011 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5012 */ 5013 int btrfs_previous_extent_item(struct btrfs_root *root, 5014 struct btrfs_path *path, u64 min_objectid) 5015 { 5016 struct btrfs_key found_key; 5017 struct extent_buffer *leaf; 5018 u32 nritems; 5019 int ret; 5020 5021 while (1) { 5022 if (path->slots[0] == 0) { 5023 ret = btrfs_prev_leaf(root, path); 5024 if (ret != 0) 5025 return ret; 5026 } else { 5027 path->slots[0]--; 5028 } 5029 leaf = path->nodes[0]; 5030 nritems = btrfs_header_nritems(leaf); 5031 if (nritems == 0) 5032 return 1; 5033 if (path->slots[0] == nritems) 5034 path->slots[0]--; 5035 5036 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5037 if (found_key.objectid < min_objectid) 5038 break; 5039 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5040 found_key.type == BTRFS_METADATA_ITEM_KEY) 5041 return 0; 5042 if (found_key.objectid == min_objectid && 5043 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5044 break; 5045 } 5046 return 1; 5047 } 5048 5049 int __init btrfs_ctree_init(void) 5050 { 5051 btrfs_path_cachep = kmem_cache_create("btrfs_path", 5052 sizeof(struct btrfs_path), 0, 5053 SLAB_MEM_SPREAD, NULL); 5054 if (!btrfs_path_cachep) 5055 return -ENOMEM; 5056 return 0; 5057 } 5058 5059 void __cold btrfs_ctree_exit(void) 5060 { 5061 kmem_cache_destroy(btrfs_path_cachep); 5062 } 5063