1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include "ctree.h" 11 #include "disk-io.h" 12 #include "transaction.h" 13 #include "print-tree.h" 14 #include "locking.h" 15 #include "volumes.h" 16 #include "qgroup.h" 17 18 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 19 *root, struct btrfs_path *path, int level); 20 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 21 const struct btrfs_key *ins_key, struct btrfs_path *path, 22 int data_size, int extend); 23 static int push_node_left(struct btrfs_trans_handle *trans, 24 struct extent_buffer *dst, 25 struct extent_buffer *src, int empty); 26 static int balance_node_right(struct btrfs_trans_handle *trans, 27 struct extent_buffer *dst_buf, 28 struct extent_buffer *src_buf); 29 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 30 int level, int slot); 31 32 static const struct btrfs_csums { 33 u16 size; 34 const char name[10]; 35 const char driver[12]; 36 } btrfs_csums[] = { 37 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 38 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 39 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 40 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 41 .driver = "blake2b-256" }, 42 }; 43 44 int btrfs_super_csum_size(const struct btrfs_super_block *s) 45 { 46 u16 t = btrfs_super_csum_type(s); 47 /* 48 * csum type is validated at mount time 49 */ 50 return btrfs_csums[t].size; 51 } 52 53 const char *btrfs_super_csum_name(u16 csum_type) 54 { 55 /* csum type is validated at mount time */ 56 return btrfs_csums[csum_type].name; 57 } 58 59 /* 60 * Return driver name if defined, otherwise the name that's also a valid driver 61 * name 62 */ 63 const char *btrfs_super_csum_driver(u16 csum_type) 64 { 65 /* csum type is validated at mount time */ 66 return btrfs_csums[csum_type].driver[0] ? 67 btrfs_csums[csum_type].driver : 68 btrfs_csums[csum_type].name; 69 } 70 71 size_t __attribute_const__ btrfs_get_num_csums(void) 72 { 73 return ARRAY_SIZE(btrfs_csums); 74 } 75 76 struct btrfs_path *btrfs_alloc_path(void) 77 { 78 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 79 } 80 81 /* this also releases the path */ 82 void btrfs_free_path(struct btrfs_path *p) 83 { 84 if (!p) 85 return; 86 btrfs_release_path(p); 87 kmem_cache_free(btrfs_path_cachep, p); 88 } 89 90 /* 91 * path release drops references on the extent buffers in the path 92 * and it drops any locks held by this path 93 * 94 * It is safe to call this on paths that no locks or extent buffers held. 95 */ 96 noinline void btrfs_release_path(struct btrfs_path *p) 97 { 98 int i; 99 100 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 101 p->slots[i] = 0; 102 if (!p->nodes[i]) 103 continue; 104 if (p->locks[i]) { 105 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 106 p->locks[i] = 0; 107 } 108 free_extent_buffer(p->nodes[i]); 109 p->nodes[i] = NULL; 110 } 111 } 112 113 /* 114 * safely gets a reference on the root node of a tree. A lock 115 * is not taken, so a concurrent writer may put a different node 116 * at the root of the tree. See btrfs_lock_root_node for the 117 * looping required. 118 * 119 * The extent buffer returned by this has a reference taken, so 120 * it won't disappear. It may stop being the root of the tree 121 * at any time because there are no locks held. 122 */ 123 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 124 { 125 struct extent_buffer *eb; 126 127 while (1) { 128 rcu_read_lock(); 129 eb = rcu_dereference(root->node); 130 131 /* 132 * RCU really hurts here, we could free up the root node because 133 * it was COWed but we may not get the new root node yet so do 134 * the inc_not_zero dance and if it doesn't work then 135 * synchronize_rcu and try again. 136 */ 137 if (atomic_inc_not_zero(&eb->refs)) { 138 rcu_read_unlock(); 139 break; 140 } 141 rcu_read_unlock(); 142 synchronize_rcu(); 143 } 144 return eb; 145 } 146 147 /* 148 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 149 * just get put onto a simple dirty list. Transaction walks this list to make 150 * sure they get properly updated on disk. 151 */ 152 static void add_root_to_dirty_list(struct btrfs_root *root) 153 { 154 struct btrfs_fs_info *fs_info = root->fs_info; 155 156 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 157 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 158 return; 159 160 spin_lock(&fs_info->trans_lock); 161 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 162 /* Want the extent tree to be the last on the list */ 163 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 164 list_move_tail(&root->dirty_list, 165 &fs_info->dirty_cowonly_roots); 166 else 167 list_move(&root->dirty_list, 168 &fs_info->dirty_cowonly_roots); 169 } 170 spin_unlock(&fs_info->trans_lock); 171 } 172 173 /* 174 * used by snapshot creation to make a copy of a root for a tree with 175 * a given objectid. The buffer with the new root node is returned in 176 * cow_ret, and this func returns zero on success or a negative error code. 177 */ 178 int btrfs_copy_root(struct btrfs_trans_handle *trans, 179 struct btrfs_root *root, 180 struct extent_buffer *buf, 181 struct extent_buffer **cow_ret, u64 new_root_objectid) 182 { 183 struct btrfs_fs_info *fs_info = root->fs_info; 184 struct extent_buffer *cow; 185 int ret = 0; 186 int level; 187 struct btrfs_disk_key disk_key; 188 189 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 190 trans->transid != fs_info->running_transaction->transid); 191 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 192 trans->transid != root->last_trans); 193 194 level = btrfs_header_level(buf); 195 if (level == 0) 196 btrfs_item_key(buf, &disk_key, 0); 197 else 198 btrfs_node_key(buf, &disk_key, 0); 199 200 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 201 &disk_key, level, buf->start, 0, 202 BTRFS_NESTING_NEW_ROOT); 203 if (IS_ERR(cow)) 204 return PTR_ERR(cow); 205 206 copy_extent_buffer_full(cow, buf); 207 btrfs_set_header_bytenr(cow, cow->start); 208 btrfs_set_header_generation(cow, trans->transid); 209 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 210 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 211 BTRFS_HEADER_FLAG_RELOC); 212 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 213 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 214 else 215 btrfs_set_header_owner(cow, new_root_objectid); 216 217 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 218 219 WARN_ON(btrfs_header_generation(buf) > trans->transid); 220 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 221 ret = btrfs_inc_ref(trans, root, cow, 1); 222 else 223 ret = btrfs_inc_ref(trans, root, cow, 0); 224 if (ret) { 225 btrfs_tree_unlock(cow); 226 free_extent_buffer(cow); 227 btrfs_abort_transaction(trans, ret); 228 return ret; 229 } 230 231 btrfs_mark_buffer_dirty(cow); 232 *cow_ret = cow; 233 return 0; 234 } 235 236 enum mod_log_op { 237 MOD_LOG_KEY_REPLACE, 238 MOD_LOG_KEY_ADD, 239 MOD_LOG_KEY_REMOVE, 240 MOD_LOG_KEY_REMOVE_WHILE_FREEING, 241 MOD_LOG_KEY_REMOVE_WHILE_MOVING, 242 MOD_LOG_MOVE_KEYS, 243 MOD_LOG_ROOT_REPLACE, 244 }; 245 246 struct tree_mod_root { 247 u64 logical; 248 u8 level; 249 }; 250 251 struct tree_mod_elem { 252 struct rb_node node; 253 u64 logical; 254 u64 seq; 255 enum mod_log_op op; 256 257 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ 258 int slot; 259 260 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ 261 u64 generation; 262 263 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ 264 struct btrfs_disk_key key; 265 u64 blockptr; 266 267 /* this is used for op == MOD_LOG_MOVE_KEYS */ 268 struct { 269 int dst_slot; 270 int nr_items; 271 } move; 272 273 /* this is used for op == MOD_LOG_ROOT_REPLACE */ 274 struct tree_mod_root old_root; 275 }; 276 277 /* 278 * Pull a new tree mod seq number for our operation. 279 */ 280 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) 281 { 282 return atomic64_inc_return(&fs_info->tree_mod_seq); 283 } 284 285 /* 286 * This adds a new blocker to the tree mod log's blocker list if the @elem 287 * passed does not already have a sequence number set. So when a caller expects 288 * to record tree modifications, it should ensure to set elem->seq to zero 289 * before calling btrfs_get_tree_mod_seq. 290 * Returns a fresh, unused tree log modification sequence number, even if no new 291 * blocker was added. 292 */ 293 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 294 struct seq_list *elem) 295 { 296 write_lock(&fs_info->tree_mod_log_lock); 297 if (!elem->seq) { 298 elem->seq = btrfs_inc_tree_mod_seq(fs_info); 299 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); 300 } 301 write_unlock(&fs_info->tree_mod_log_lock); 302 303 return elem->seq; 304 } 305 306 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 307 struct seq_list *elem) 308 { 309 struct rb_root *tm_root; 310 struct rb_node *node; 311 struct rb_node *next; 312 struct tree_mod_elem *tm; 313 u64 min_seq = (u64)-1; 314 u64 seq_putting = elem->seq; 315 316 if (!seq_putting) 317 return; 318 319 write_lock(&fs_info->tree_mod_log_lock); 320 list_del(&elem->list); 321 elem->seq = 0; 322 323 if (!list_empty(&fs_info->tree_mod_seq_list)) { 324 struct seq_list *first; 325 326 first = list_first_entry(&fs_info->tree_mod_seq_list, 327 struct seq_list, list); 328 if (seq_putting > first->seq) { 329 /* 330 * Blocker with lower sequence number exists, we 331 * cannot remove anything from the log. 332 */ 333 write_unlock(&fs_info->tree_mod_log_lock); 334 return; 335 } 336 min_seq = first->seq; 337 } 338 339 /* 340 * anything that's lower than the lowest existing (read: blocked) 341 * sequence number can be removed from the tree. 342 */ 343 tm_root = &fs_info->tree_mod_log; 344 for (node = rb_first(tm_root); node; node = next) { 345 next = rb_next(node); 346 tm = rb_entry(node, struct tree_mod_elem, node); 347 if (tm->seq >= min_seq) 348 continue; 349 rb_erase(node, tm_root); 350 kfree(tm); 351 } 352 write_unlock(&fs_info->tree_mod_log_lock); 353 } 354 355 /* 356 * key order of the log: 357 * node/leaf start address -> sequence 358 * 359 * The 'start address' is the logical address of the *new* root node 360 * for root replace operations, or the logical address of the affected 361 * block for all other operations. 362 */ 363 static noinline int 364 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) 365 { 366 struct rb_root *tm_root; 367 struct rb_node **new; 368 struct rb_node *parent = NULL; 369 struct tree_mod_elem *cur; 370 371 lockdep_assert_held_write(&fs_info->tree_mod_log_lock); 372 373 tm->seq = btrfs_inc_tree_mod_seq(fs_info); 374 375 tm_root = &fs_info->tree_mod_log; 376 new = &tm_root->rb_node; 377 while (*new) { 378 cur = rb_entry(*new, struct tree_mod_elem, node); 379 parent = *new; 380 if (cur->logical < tm->logical) 381 new = &((*new)->rb_left); 382 else if (cur->logical > tm->logical) 383 new = &((*new)->rb_right); 384 else if (cur->seq < tm->seq) 385 new = &((*new)->rb_left); 386 else if (cur->seq > tm->seq) 387 new = &((*new)->rb_right); 388 else 389 return -EEXIST; 390 } 391 392 rb_link_node(&tm->node, parent, new); 393 rb_insert_color(&tm->node, tm_root); 394 return 0; 395 } 396 397 /* 398 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it 399 * returns zero with the tree_mod_log_lock acquired. The caller must hold 400 * this until all tree mod log insertions are recorded in the rb tree and then 401 * write unlock fs_info::tree_mod_log_lock. 402 */ 403 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, 404 struct extent_buffer *eb) { 405 smp_mb(); 406 if (list_empty(&(fs_info)->tree_mod_seq_list)) 407 return 1; 408 if (eb && btrfs_header_level(eb) == 0) 409 return 1; 410 411 write_lock(&fs_info->tree_mod_log_lock); 412 if (list_empty(&(fs_info)->tree_mod_seq_list)) { 413 write_unlock(&fs_info->tree_mod_log_lock); 414 return 1; 415 } 416 417 return 0; 418 } 419 420 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ 421 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info, 422 struct extent_buffer *eb) 423 { 424 smp_mb(); 425 if (list_empty(&(fs_info)->tree_mod_seq_list)) 426 return 0; 427 if (eb && btrfs_header_level(eb) == 0) 428 return 0; 429 430 return 1; 431 } 432 433 static struct tree_mod_elem * 434 alloc_tree_mod_elem(struct extent_buffer *eb, int slot, 435 enum mod_log_op op, gfp_t flags) 436 { 437 struct tree_mod_elem *tm; 438 439 tm = kzalloc(sizeof(*tm), flags); 440 if (!tm) 441 return NULL; 442 443 tm->logical = eb->start; 444 if (op != MOD_LOG_KEY_ADD) { 445 btrfs_node_key(eb, &tm->key, slot); 446 tm->blockptr = btrfs_node_blockptr(eb, slot); 447 } 448 tm->op = op; 449 tm->slot = slot; 450 tm->generation = btrfs_node_ptr_generation(eb, slot); 451 RB_CLEAR_NODE(&tm->node); 452 453 return tm; 454 } 455 456 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot, 457 enum mod_log_op op, gfp_t flags) 458 { 459 struct tree_mod_elem *tm; 460 int ret; 461 462 if (!tree_mod_need_log(eb->fs_info, eb)) 463 return 0; 464 465 tm = alloc_tree_mod_elem(eb, slot, op, flags); 466 if (!tm) 467 return -ENOMEM; 468 469 if (tree_mod_dont_log(eb->fs_info, eb)) { 470 kfree(tm); 471 return 0; 472 } 473 474 ret = __tree_mod_log_insert(eb->fs_info, tm); 475 write_unlock(&eb->fs_info->tree_mod_log_lock); 476 if (ret) 477 kfree(tm); 478 479 return ret; 480 } 481 482 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb, 483 int dst_slot, int src_slot, int nr_items) 484 { 485 struct tree_mod_elem *tm = NULL; 486 struct tree_mod_elem **tm_list = NULL; 487 int ret = 0; 488 int i; 489 int locked = 0; 490 491 if (!tree_mod_need_log(eb->fs_info, eb)) 492 return 0; 493 494 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS); 495 if (!tm_list) 496 return -ENOMEM; 497 498 tm = kzalloc(sizeof(*tm), GFP_NOFS); 499 if (!tm) { 500 ret = -ENOMEM; 501 goto free_tms; 502 } 503 504 tm->logical = eb->start; 505 tm->slot = src_slot; 506 tm->move.dst_slot = dst_slot; 507 tm->move.nr_items = nr_items; 508 tm->op = MOD_LOG_MOVE_KEYS; 509 510 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 511 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, 512 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS); 513 if (!tm_list[i]) { 514 ret = -ENOMEM; 515 goto free_tms; 516 } 517 } 518 519 if (tree_mod_dont_log(eb->fs_info, eb)) 520 goto free_tms; 521 locked = 1; 522 523 /* 524 * When we override something during the move, we log these removals. 525 * This can only happen when we move towards the beginning of the 526 * buffer, i.e. dst_slot < src_slot. 527 */ 528 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 529 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]); 530 if (ret) 531 goto free_tms; 532 } 533 534 ret = __tree_mod_log_insert(eb->fs_info, tm); 535 if (ret) 536 goto free_tms; 537 write_unlock(&eb->fs_info->tree_mod_log_lock); 538 kfree(tm_list); 539 540 return 0; 541 free_tms: 542 for (i = 0; i < nr_items; i++) { 543 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) 544 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log); 545 kfree(tm_list[i]); 546 } 547 if (locked) 548 write_unlock(&eb->fs_info->tree_mod_log_lock); 549 kfree(tm_list); 550 kfree(tm); 551 552 return ret; 553 } 554 555 static inline int 556 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 557 struct tree_mod_elem **tm_list, 558 int nritems) 559 { 560 int i, j; 561 int ret; 562 563 for (i = nritems - 1; i >= 0; i--) { 564 ret = __tree_mod_log_insert(fs_info, tm_list[i]); 565 if (ret) { 566 for (j = nritems - 1; j > i; j--) 567 rb_erase(&tm_list[j]->node, 568 &fs_info->tree_mod_log); 569 return ret; 570 } 571 } 572 573 return 0; 574 } 575 576 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root, 577 struct extent_buffer *new_root, int log_removal) 578 { 579 struct btrfs_fs_info *fs_info = old_root->fs_info; 580 struct tree_mod_elem *tm = NULL; 581 struct tree_mod_elem **tm_list = NULL; 582 int nritems = 0; 583 int ret = 0; 584 int i; 585 586 if (!tree_mod_need_log(fs_info, NULL)) 587 return 0; 588 589 if (log_removal && btrfs_header_level(old_root) > 0) { 590 nritems = btrfs_header_nritems(old_root); 591 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), 592 GFP_NOFS); 593 if (!tm_list) { 594 ret = -ENOMEM; 595 goto free_tms; 596 } 597 for (i = 0; i < nritems; i++) { 598 tm_list[i] = alloc_tree_mod_elem(old_root, i, 599 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); 600 if (!tm_list[i]) { 601 ret = -ENOMEM; 602 goto free_tms; 603 } 604 } 605 } 606 607 tm = kzalloc(sizeof(*tm), GFP_NOFS); 608 if (!tm) { 609 ret = -ENOMEM; 610 goto free_tms; 611 } 612 613 tm->logical = new_root->start; 614 tm->old_root.logical = old_root->start; 615 tm->old_root.level = btrfs_header_level(old_root); 616 tm->generation = btrfs_header_generation(old_root); 617 tm->op = MOD_LOG_ROOT_REPLACE; 618 619 if (tree_mod_dont_log(fs_info, NULL)) 620 goto free_tms; 621 622 if (tm_list) 623 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); 624 if (!ret) 625 ret = __tree_mod_log_insert(fs_info, tm); 626 627 write_unlock(&fs_info->tree_mod_log_lock); 628 if (ret) 629 goto free_tms; 630 kfree(tm_list); 631 632 return ret; 633 634 free_tms: 635 if (tm_list) { 636 for (i = 0; i < nritems; i++) 637 kfree(tm_list[i]); 638 kfree(tm_list); 639 } 640 kfree(tm); 641 642 return ret; 643 } 644 645 static struct tree_mod_elem * 646 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, 647 int smallest) 648 { 649 struct rb_root *tm_root; 650 struct rb_node *node; 651 struct tree_mod_elem *cur = NULL; 652 struct tree_mod_elem *found = NULL; 653 654 read_lock(&fs_info->tree_mod_log_lock); 655 tm_root = &fs_info->tree_mod_log; 656 node = tm_root->rb_node; 657 while (node) { 658 cur = rb_entry(node, struct tree_mod_elem, node); 659 if (cur->logical < start) { 660 node = node->rb_left; 661 } else if (cur->logical > start) { 662 node = node->rb_right; 663 } else if (cur->seq < min_seq) { 664 node = node->rb_left; 665 } else if (!smallest) { 666 /* we want the node with the highest seq */ 667 if (found) 668 BUG_ON(found->seq > cur->seq); 669 found = cur; 670 node = node->rb_left; 671 } else if (cur->seq > min_seq) { 672 /* we want the node with the smallest seq */ 673 if (found) 674 BUG_ON(found->seq < cur->seq); 675 found = cur; 676 node = node->rb_right; 677 } else { 678 found = cur; 679 break; 680 } 681 } 682 read_unlock(&fs_info->tree_mod_log_lock); 683 684 return found; 685 } 686 687 /* 688 * this returns the element from the log with the smallest time sequence 689 * value that's in the log (the oldest log item). any element with a time 690 * sequence lower than min_seq will be ignored. 691 */ 692 static struct tree_mod_elem * 693 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, 694 u64 min_seq) 695 { 696 return __tree_mod_log_search(fs_info, start, min_seq, 1); 697 } 698 699 /* 700 * this returns the element from the log with the largest time sequence 701 * value that's in the log (the most recent log item). any element with 702 * a time sequence lower than min_seq will be ignored. 703 */ 704 static struct tree_mod_elem * 705 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) 706 { 707 return __tree_mod_log_search(fs_info, start, min_seq, 0); 708 } 709 710 static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst, 711 struct extent_buffer *src, unsigned long dst_offset, 712 unsigned long src_offset, int nr_items) 713 { 714 struct btrfs_fs_info *fs_info = dst->fs_info; 715 int ret = 0; 716 struct tree_mod_elem **tm_list = NULL; 717 struct tree_mod_elem **tm_list_add, **tm_list_rem; 718 int i; 719 int locked = 0; 720 721 if (!tree_mod_need_log(fs_info, NULL)) 722 return 0; 723 724 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) 725 return 0; 726 727 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *), 728 GFP_NOFS); 729 if (!tm_list) 730 return -ENOMEM; 731 732 tm_list_add = tm_list; 733 tm_list_rem = tm_list + nr_items; 734 for (i = 0; i < nr_items; i++) { 735 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset, 736 MOD_LOG_KEY_REMOVE, GFP_NOFS); 737 if (!tm_list_rem[i]) { 738 ret = -ENOMEM; 739 goto free_tms; 740 } 741 742 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset, 743 MOD_LOG_KEY_ADD, GFP_NOFS); 744 if (!tm_list_add[i]) { 745 ret = -ENOMEM; 746 goto free_tms; 747 } 748 } 749 750 if (tree_mod_dont_log(fs_info, NULL)) 751 goto free_tms; 752 locked = 1; 753 754 for (i = 0; i < nr_items; i++) { 755 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]); 756 if (ret) 757 goto free_tms; 758 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]); 759 if (ret) 760 goto free_tms; 761 } 762 763 write_unlock(&fs_info->tree_mod_log_lock); 764 kfree(tm_list); 765 766 return 0; 767 768 free_tms: 769 for (i = 0; i < nr_items * 2; i++) { 770 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) 771 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); 772 kfree(tm_list[i]); 773 } 774 if (locked) 775 write_unlock(&fs_info->tree_mod_log_lock); 776 kfree(tm_list); 777 778 return ret; 779 } 780 781 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb) 782 { 783 struct tree_mod_elem **tm_list = NULL; 784 int nritems = 0; 785 int i; 786 int ret = 0; 787 788 if (btrfs_header_level(eb) == 0) 789 return 0; 790 791 if (!tree_mod_need_log(eb->fs_info, NULL)) 792 return 0; 793 794 nritems = btrfs_header_nritems(eb); 795 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS); 796 if (!tm_list) 797 return -ENOMEM; 798 799 for (i = 0; i < nritems; i++) { 800 tm_list[i] = alloc_tree_mod_elem(eb, i, 801 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); 802 if (!tm_list[i]) { 803 ret = -ENOMEM; 804 goto free_tms; 805 } 806 } 807 808 if (tree_mod_dont_log(eb->fs_info, eb)) 809 goto free_tms; 810 811 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems); 812 write_unlock(&eb->fs_info->tree_mod_log_lock); 813 if (ret) 814 goto free_tms; 815 kfree(tm_list); 816 817 return 0; 818 819 free_tms: 820 for (i = 0; i < nritems; i++) 821 kfree(tm_list[i]); 822 kfree(tm_list); 823 824 return ret; 825 } 826 827 /* 828 * check if the tree block can be shared by multiple trees 829 */ 830 int btrfs_block_can_be_shared(struct btrfs_root *root, 831 struct extent_buffer *buf) 832 { 833 /* 834 * Tree blocks not in shareable trees and tree roots are never shared. 835 * If a block was allocated after the last snapshot and the block was 836 * not allocated by tree relocation, we know the block is not shared. 837 */ 838 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 839 buf != root->node && buf != root->commit_root && 840 (btrfs_header_generation(buf) <= 841 btrfs_root_last_snapshot(&root->root_item) || 842 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 843 return 1; 844 845 return 0; 846 } 847 848 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 849 struct btrfs_root *root, 850 struct extent_buffer *buf, 851 struct extent_buffer *cow, 852 int *last_ref) 853 { 854 struct btrfs_fs_info *fs_info = root->fs_info; 855 u64 refs; 856 u64 owner; 857 u64 flags; 858 u64 new_flags = 0; 859 int ret; 860 861 /* 862 * Backrefs update rules: 863 * 864 * Always use full backrefs for extent pointers in tree block 865 * allocated by tree relocation. 866 * 867 * If a shared tree block is no longer referenced by its owner 868 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 869 * use full backrefs for extent pointers in tree block. 870 * 871 * If a tree block is been relocating 872 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 873 * use full backrefs for extent pointers in tree block. 874 * The reason for this is some operations (such as drop tree) 875 * are only allowed for blocks use full backrefs. 876 */ 877 878 if (btrfs_block_can_be_shared(root, buf)) { 879 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 880 btrfs_header_level(buf), 1, 881 &refs, &flags); 882 if (ret) 883 return ret; 884 if (refs == 0) { 885 ret = -EROFS; 886 btrfs_handle_fs_error(fs_info, ret, NULL); 887 return ret; 888 } 889 } else { 890 refs = 1; 891 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 892 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 893 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 894 else 895 flags = 0; 896 } 897 898 owner = btrfs_header_owner(buf); 899 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 900 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 901 902 if (refs > 1) { 903 if ((owner == root->root_key.objectid || 904 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 905 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 906 ret = btrfs_inc_ref(trans, root, buf, 1); 907 if (ret) 908 return ret; 909 910 if (root->root_key.objectid == 911 BTRFS_TREE_RELOC_OBJECTID) { 912 ret = btrfs_dec_ref(trans, root, buf, 0); 913 if (ret) 914 return ret; 915 ret = btrfs_inc_ref(trans, root, cow, 1); 916 if (ret) 917 return ret; 918 } 919 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 920 } else { 921 922 if (root->root_key.objectid == 923 BTRFS_TREE_RELOC_OBJECTID) 924 ret = btrfs_inc_ref(trans, root, cow, 1); 925 else 926 ret = btrfs_inc_ref(trans, root, cow, 0); 927 if (ret) 928 return ret; 929 } 930 if (new_flags != 0) { 931 int level = btrfs_header_level(buf); 932 933 ret = btrfs_set_disk_extent_flags(trans, buf, 934 new_flags, level, 0); 935 if (ret) 936 return ret; 937 } 938 } else { 939 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 940 if (root->root_key.objectid == 941 BTRFS_TREE_RELOC_OBJECTID) 942 ret = btrfs_inc_ref(trans, root, cow, 1); 943 else 944 ret = btrfs_inc_ref(trans, root, cow, 0); 945 if (ret) 946 return ret; 947 ret = btrfs_dec_ref(trans, root, buf, 1); 948 if (ret) 949 return ret; 950 } 951 btrfs_clean_tree_block(buf); 952 *last_ref = 1; 953 } 954 return 0; 955 } 956 957 static struct extent_buffer *alloc_tree_block_no_bg_flush( 958 struct btrfs_trans_handle *trans, 959 struct btrfs_root *root, 960 u64 parent_start, 961 const struct btrfs_disk_key *disk_key, 962 int level, 963 u64 hint, 964 u64 empty_size, 965 enum btrfs_lock_nesting nest) 966 { 967 struct btrfs_fs_info *fs_info = root->fs_info; 968 struct extent_buffer *ret; 969 970 /* 971 * If we are COWing a node/leaf from the extent, chunk, device or free 972 * space trees, make sure that we do not finish block group creation of 973 * pending block groups. We do this to avoid a deadlock. 974 * COWing can result in allocation of a new chunk, and flushing pending 975 * block groups (btrfs_create_pending_block_groups()) can be triggered 976 * when finishing allocation of a new chunk. Creation of a pending block 977 * group modifies the extent, chunk, device and free space trees, 978 * therefore we could deadlock with ourselves since we are holding a 979 * lock on an extent buffer that btrfs_create_pending_block_groups() may 980 * try to COW later. 981 * For similar reasons, we also need to delay flushing pending block 982 * groups when splitting a leaf or node, from one of those trees, since 983 * we are holding a write lock on it and its parent or when inserting a 984 * new root node for one of those trees. 985 */ 986 if (root == fs_info->extent_root || 987 root == fs_info->chunk_root || 988 root == fs_info->dev_root || 989 root == fs_info->free_space_root) 990 trans->can_flush_pending_bgs = false; 991 992 ret = btrfs_alloc_tree_block(trans, root, parent_start, 993 root->root_key.objectid, disk_key, level, 994 hint, empty_size, nest); 995 trans->can_flush_pending_bgs = true; 996 997 return ret; 998 } 999 1000 /* 1001 * does the dirty work in cow of a single block. The parent block (if 1002 * supplied) is updated to point to the new cow copy. The new buffer is marked 1003 * dirty and returned locked. If you modify the block it needs to be marked 1004 * dirty again. 1005 * 1006 * search_start -- an allocation hint for the new block 1007 * 1008 * empty_size -- a hint that you plan on doing more cow. This is the size in 1009 * bytes the allocator should try to find free next to the block it returns. 1010 * This is just a hint and may be ignored by the allocator. 1011 */ 1012 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 1013 struct btrfs_root *root, 1014 struct extent_buffer *buf, 1015 struct extent_buffer *parent, int parent_slot, 1016 struct extent_buffer **cow_ret, 1017 u64 search_start, u64 empty_size, 1018 enum btrfs_lock_nesting nest) 1019 { 1020 struct btrfs_fs_info *fs_info = root->fs_info; 1021 struct btrfs_disk_key disk_key; 1022 struct extent_buffer *cow; 1023 int level, ret; 1024 int last_ref = 0; 1025 int unlock_orig = 0; 1026 u64 parent_start = 0; 1027 1028 if (*cow_ret == buf) 1029 unlock_orig = 1; 1030 1031 btrfs_assert_tree_locked(buf); 1032 1033 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 1034 trans->transid != fs_info->running_transaction->transid); 1035 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 1036 trans->transid != root->last_trans); 1037 1038 level = btrfs_header_level(buf); 1039 1040 if (level == 0) 1041 btrfs_item_key(buf, &disk_key, 0); 1042 else 1043 btrfs_node_key(buf, &disk_key, 0); 1044 1045 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 1046 parent_start = parent->start; 1047 1048 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, 1049 level, search_start, empty_size, nest); 1050 if (IS_ERR(cow)) 1051 return PTR_ERR(cow); 1052 1053 /* cow is set to blocking by btrfs_init_new_buffer */ 1054 1055 copy_extent_buffer_full(cow, buf); 1056 btrfs_set_header_bytenr(cow, cow->start); 1057 btrfs_set_header_generation(cow, trans->transid); 1058 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 1059 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 1060 BTRFS_HEADER_FLAG_RELOC); 1061 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1062 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 1063 else 1064 btrfs_set_header_owner(cow, root->root_key.objectid); 1065 1066 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 1067 1068 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 1069 if (ret) { 1070 btrfs_tree_unlock(cow); 1071 free_extent_buffer(cow); 1072 btrfs_abort_transaction(trans, ret); 1073 return ret; 1074 } 1075 1076 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 1077 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 1078 if (ret) { 1079 btrfs_tree_unlock(cow); 1080 free_extent_buffer(cow); 1081 btrfs_abort_transaction(trans, ret); 1082 return ret; 1083 } 1084 } 1085 1086 if (buf == root->node) { 1087 WARN_ON(parent && parent != buf); 1088 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 1089 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 1090 parent_start = buf->start; 1091 1092 atomic_inc(&cow->refs); 1093 ret = tree_mod_log_insert_root(root->node, cow, 1); 1094 BUG_ON(ret < 0); 1095 rcu_assign_pointer(root->node, cow); 1096 1097 btrfs_free_tree_block(trans, root, buf, parent_start, 1098 last_ref); 1099 free_extent_buffer(buf); 1100 add_root_to_dirty_list(root); 1101 } else { 1102 WARN_ON(trans->transid != btrfs_header_generation(parent)); 1103 tree_mod_log_insert_key(parent, parent_slot, 1104 MOD_LOG_KEY_REPLACE, GFP_NOFS); 1105 btrfs_set_node_blockptr(parent, parent_slot, 1106 cow->start); 1107 btrfs_set_node_ptr_generation(parent, parent_slot, 1108 trans->transid); 1109 btrfs_mark_buffer_dirty(parent); 1110 if (last_ref) { 1111 ret = tree_mod_log_free_eb(buf); 1112 if (ret) { 1113 btrfs_tree_unlock(cow); 1114 free_extent_buffer(cow); 1115 btrfs_abort_transaction(trans, ret); 1116 return ret; 1117 } 1118 } 1119 btrfs_free_tree_block(trans, root, buf, parent_start, 1120 last_ref); 1121 } 1122 if (unlock_orig) 1123 btrfs_tree_unlock(buf); 1124 free_extent_buffer_stale(buf); 1125 btrfs_mark_buffer_dirty(cow); 1126 *cow_ret = cow; 1127 return 0; 1128 } 1129 1130 /* 1131 * returns the logical address of the oldest predecessor of the given root. 1132 * entries older than time_seq are ignored. 1133 */ 1134 static struct tree_mod_elem *__tree_mod_log_oldest_root( 1135 struct extent_buffer *eb_root, u64 time_seq) 1136 { 1137 struct tree_mod_elem *tm; 1138 struct tree_mod_elem *found = NULL; 1139 u64 root_logical = eb_root->start; 1140 int looped = 0; 1141 1142 if (!time_seq) 1143 return NULL; 1144 1145 /* 1146 * the very last operation that's logged for a root is the 1147 * replacement operation (if it is replaced at all). this has 1148 * the logical address of the *new* root, making it the very 1149 * first operation that's logged for this root. 1150 */ 1151 while (1) { 1152 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical, 1153 time_seq); 1154 if (!looped && !tm) 1155 return NULL; 1156 /* 1157 * if there are no tree operation for the oldest root, we simply 1158 * return it. this should only happen if that (old) root is at 1159 * level 0. 1160 */ 1161 if (!tm) 1162 break; 1163 1164 /* 1165 * if there's an operation that's not a root replacement, we 1166 * found the oldest version of our root. normally, we'll find a 1167 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. 1168 */ 1169 if (tm->op != MOD_LOG_ROOT_REPLACE) 1170 break; 1171 1172 found = tm; 1173 root_logical = tm->old_root.logical; 1174 looped = 1; 1175 } 1176 1177 /* if there's no old root to return, return what we found instead */ 1178 if (!found) 1179 found = tm; 1180 1181 return found; 1182 } 1183 1184 /* 1185 * tm is a pointer to the first operation to rewind within eb. then, all 1186 * previous operations will be rewound (until we reach something older than 1187 * time_seq). 1188 */ 1189 static void 1190 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, 1191 u64 time_seq, struct tree_mod_elem *first_tm) 1192 { 1193 u32 n; 1194 struct rb_node *next; 1195 struct tree_mod_elem *tm = first_tm; 1196 unsigned long o_dst; 1197 unsigned long o_src; 1198 unsigned long p_size = sizeof(struct btrfs_key_ptr); 1199 1200 n = btrfs_header_nritems(eb); 1201 read_lock(&fs_info->tree_mod_log_lock); 1202 while (tm && tm->seq >= time_seq) { 1203 /* 1204 * all the operations are recorded with the operator used for 1205 * the modification. as we're going backwards, we do the 1206 * opposite of each operation here. 1207 */ 1208 switch (tm->op) { 1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1210 BUG_ON(tm->slot < n); 1211 fallthrough; 1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1213 case MOD_LOG_KEY_REMOVE: 1214 btrfs_set_node_key(eb, &tm->key, tm->slot); 1215 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1216 btrfs_set_node_ptr_generation(eb, tm->slot, 1217 tm->generation); 1218 n++; 1219 break; 1220 case MOD_LOG_KEY_REPLACE: 1221 BUG_ON(tm->slot >= n); 1222 btrfs_set_node_key(eb, &tm->key, tm->slot); 1223 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1224 btrfs_set_node_ptr_generation(eb, tm->slot, 1225 tm->generation); 1226 break; 1227 case MOD_LOG_KEY_ADD: 1228 /* if a move operation is needed it's in the log */ 1229 n--; 1230 break; 1231 case MOD_LOG_MOVE_KEYS: 1232 o_dst = btrfs_node_key_ptr_offset(tm->slot); 1233 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); 1234 memmove_extent_buffer(eb, o_dst, o_src, 1235 tm->move.nr_items * p_size); 1236 break; 1237 case MOD_LOG_ROOT_REPLACE: 1238 /* 1239 * this operation is special. for roots, this must be 1240 * handled explicitly before rewinding. 1241 * for non-roots, this operation may exist if the node 1242 * was a root: root A -> child B; then A gets empty and 1243 * B is promoted to the new root. in the mod log, we'll 1244 * have a root-replace operation for B, a tree block 1245 * that is no root. we simply ignore that operation. 1246 */ 1247 break; 1248 } 1249 next = rb_next(&tm->node); 1250 if (!next) 1251 break; 1252 tm = rb_entry(next, struct tree_mod_elem, node); 1253 if (tm->logical != first_tm->logical) 1254 break; 1255 } 1256 read_unlock(&fs_info->tree_mod_log_lock); 1257 btrfs_set_header_nritems(eb, n); 1258 } 1259 1260 /* 1261 * Called with eb read locked. If the buffer cannot be rewound, the same buffer 1262 * is returned. If rewind operations happen, a fresh buffer is returned. The 1263 * returned buffer is always read-locked. If the returned buffer is not the 1264 * input buffer, the lock on the input buffer is released and the input buffer 1265 * is freed (its refcount is decremented). 1266 */ 1267 static struct extent_buffer * 1268 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, 1269 struct extent_buffer *eb, u64 time_seq) 1270 { 1271 struct extent_buffer *eb_rewin; 1272 struct tree_mod_elem *tm; 1273 1274 if (!time_seq) 1275 return eb; 1276 1277 if (btrfs_header_level(eb) == 0) 1278 return eb; 1279 1280 tm = tree_mod_log_search(fs_info, eb->start, time_seq); 1281 if (!tm) 1282 return eb; 1283 1284 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1285 BUG_ON(tm->slot != 0); 1286 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); 1287 if (!eb_rewin) { 1288 btrfs_tree_read_unlock(eb); 1289 free_extent_buffer(eb); 1290 return NULL; 1291 } 1292 btrfs_set_header_bytenr(eb_rewin, eb->start); 1293 btrfs_set_header_backref_rev(eb_rewin, 1294 btrfs_header_backref_rev(eb)); 1295 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); 1296 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); 1297 } else { 1298 eb_rewin = btrfs_clone_extent_buffer(eb); 1299 if (!eb_rewin) { 1300 btrfs_tree_read_unlock(eb); 1301 free_extent_buffer(eb); 1302 return NULL; 1303 } 1304 } 1305 1306 btrfs_tree_read_unlock(eb); 1307 free_extent_buffer(eb); 1308 1309 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), 1310 eb_rewin, btrfs_header_level(eb_rewin)); 1311 btrfs_tree_read_lock(eb_rewin); 1312 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); 1313 WARN_ON(btrfs_header_nritems(eb_rewin) > 1314 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 1315 1316 return eb_rewin; 1317 } 1318 1319 /* 1320 * get_old_root() rewinds the state of @root's root node to the given @time_seq 1321 * value. If there are no changes, the current root->root_node is returned. If 1322 * anything changed in between, there's a fresh buffer allocated on which the 1323 * rewind operations are done. In any case, the returned buffer is read locked. 1324 * Returns NULL on error (with no locks held). 1325 */ 1326 static inline struct extent_buffer * 1327 get_old_root(struct btrfs_root *root, u64 time_seq) 1328 { 1329 struct btrfs_fs_info *fs_info = root->fs_info; 1330 struct tree_mod_elem *tm; 1331 struct extent_buffer *eb = NULL; 1332 struct extent_buffer *eb_root; 1333 u64 eb_root_owner = 0; 1334 struct extent_buffer *old; 1335 struct tree_mod_root *old_root = NULL; 1336 u64 old_generation = 0; 1337 u64 logical; 1338 int level; 1339 1340 eb_root = btrfs_read_lock_root_node(root); 1341 tm = __tree_mod_log_oldest_root(eb_root, time_seq); 1342 if (!tm) 1343 return eb_root; 1344 1345 if (tm->op == MOD_LOG_ROOT_REPLACE) { 1346 old_root = &tm->old_root; 1347 old_generation = tm->generation; 1348 logical = old_root->logical; 1349 level = old_root->level; 1350 } else { 1351 logical = eb_root->start; 1352 level = btrfs_header_level(eb_root); 1353 } 1354 1355 tm = tree_mod_log_search(fs_info, logical, time_seq); 1356 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1357 btrfs_tree_read_unlock(eb_root); 1358 free_extent_buffer(eb_root); 1359 old = read_tree_block(fs_info, logical, root->root_key.objectid, 1360 0, level, NULL); 1361 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) { 1362 if (!IS_ERR(old)) 1363 free_extent_buffer(old); 1364 btrfs_warn(fs_info, 1365 "failed to read tree block %llu from get_old_root", 1366 logical); 1367 } else { 1368 eb = btrfs_clone_extent_buffer(old); 1369 free_extent_buffer(old); 1370 } 1371 } else if (old_root) { 1372 eb_root_owner = btrfs_header_owner(eb_root); 1373 btrfs_tree_read_unlock(eb_root); 1374 free_extent_buffer(eb_root); 1375 eb = alloc_dummy_extent_buffer(fs_info, logical); 1376 } else { 1377 eb = btrfs_clone_extent_buffer(eb_root); 1378 btrfs_tree_read_unlock(eb_root); 1379 free_extent_buffer(eb_root); 1380 } 1381 1382 if (!eb) 1383 return NULL; 1384 if (old_root) { 1385 btrfs_set_header_bytenr(eb, eb->start); 1386 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); 1387 btrfs_set_header_owner(eb, eb_root_owner); 1388 btrfs_set_header_level(eb, old_root->level); 1389 btrfs_set_header_generation(eb, old_generation); 1390 } 1391 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, 1392 btrfs_header_level(eb)); 1393 btrfs_tree_read_lock(eb); 1394 if (tm) 1395 __tree_mod_log_rewind(fs_info, eb, time_seq, tm); 1396 else 1397 WARN_ON(btrfs_header_level(eb) != 0); 1398 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 1399 1400 return eb; 1401 } 1402 1403 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) 1404 { 1405 struct tree_mod_elem *tm; 1406 int level; 1407 struct extent_buffer *eb_root = btrfs_root_node(root); 1408 1409 tm = __tree_mod_log_oldest_root(eb_root, time_seq); 1410 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { 1411 level = tm->old_root.level; 1412 } else { 1413 level = btrfs_header_level(eb_root); 1414 } 1415 free_extent_buffer(eb_root); 1416 1417 return level; 1418 } 1419 1420 static inline int should_cow_block(struct btrfs_trans_handle *trans, 1421 struct btrfs_root *root, 1422 struct extent_buffer *buf) 1423 { 1424 if (btrfs_is_testing(root->fs_info)) 1425 return 0; 1426 1427 /* Ensure we can see the FORCE_COW bit */ 1428 smp_mb__before_atomic(); 1429 1430 /* 1431 * We do not need to cow a block if 1432 * 1) this block is not created or changed in this transaction; 1433 * 2) this block does not belong to TREE_RELOC tree; 1434 * 3) the root is not forced COW. 1435 * 1436 * What is forced COW: 1437 * when we create snapshot during committing the transaction, 1438 * after we've finished copying src root, we must COW the shared 1439 * block to ensure the metadata consistency. 1440 */ 1441 if (btrfs_header_generation(buf) == trans->transid && 1442 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 1443 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 1444 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 1445 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 1446 return 0; 1447 return 1; 1448 } 1449 1450 /* 1451 * cows a single block, see __btrfs_cow_block for the real work. 1452 * This version of it has extra checks so that a block isn't COWed more than 1453 * once per transaction, as long as it hasn't been written yet 1454 */ 1455 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 1456 struct btrfs_root *root, struct extent_buffer *buf, 1457 struct extent_buffer *parent, int parent_slot, 1458 struct extent_buffer **cow_ret, 1459 enum btrfs_lock_nesting nest) 1460 { 1461 struct btrfs_fs_info *fs_info = root->fs_info; 1462 u64 search_start; 1463 int ret; 1464 1465 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 1466 btrfs_err(fs_info, 1467 "COW'ing blocks on a fs root that's being dropped"); 1468 1469 if (trans->transaction != fs_info->running_transaction) 1470 WARN(1, KERN_CRIT "trans %llu running %llu\n", 1471 trans->transid, 1472 fs_info->running_transaction->transid); 1473 1474 if (trans->transid != fs_info->generation) 1475 WARN(1, KERN_CRIT "trans %llu running %llu\n", 1476 trans->transid, fs_info->generation); 1477 1478 if (!should_cow_block(trans, root, buf)) { 1479 trans->dirty = true; 1480 *cow_ret = buf; 1481 return 0; 1482 } 1483 1484 search_start = buf->start & ~((u64)SZ_1G - 1); 1485 1486 /* 1487 * Before CoWing this block for later modification, check if it's 1488 * the subtree root and do the delayed subtree trace if needed. 1489 * 1490 * Also We don't care about the error, as it's handled internally. 1491 */ 1492 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 1493 ret = __btrfs_cow_block(trans, root, buf, parent, 1494 parent_slot, cow_ret, search_start, 0, nest); 1495 1496 trace_btrfs_cow_block(root, buf, *cow_ret); 1497 1498 return ret; 1499 } 1500 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 1501 1502 /* 1503 * helper function for defrag to decide if two blocks pointed to by a 1504 * node are actually close by 1505 */ 1506 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 1507 { 1508 if (blocknr < other && other - (blocknr + blocksize) < 32768) 1509 return 1; 1510 if (blocknr > other && blocknr - (other + blocksize) < 32768) 1511 return 1; 1512 return 0; 1513 } 1514 1515 #ifdef __LITTLE_ENDIAN 1516 1517 /* 1518 * Compare two keys, on little-endian the disk order is same as CPU order and 1519 * we can avoid the conversion. 1520 */ 1521 static int comp_keys(const struct btrfs_disk_key *disk_key, 1522 const struct btrfs_key *k2) 1523 { 1524 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 1525 1526 return btrfs_comp_cpu_keys(k1, k2); 1527 } 1528 1529 #else 1530 1531 /* 1532 * compare two keys in a memcmp fashion 1533 */ 1534 static int comp_keys(const struct btrfs_disk_key *disk, 1535 const struct btrfs_key *k2) 1536 { 1537 struct btrfs_key k1; 1538 1539 btrfs_disk_key_to_cpu(&k1, disk); 1540 1541 return btrfs_comp_cpu_keys(&k1, k2); 1542 } 1543 #endif 1544 1545 /* 1546 * same as comp_keys only with two btrfs_key's 1547 */ 1548 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 1549 { 1550 if (k1->objectid > k2->objectid) 1551 return 1; 1552 if (k1->objectid < k2->objectid) 1553 return -1; 1554 if (k1->type > k2->type) 1555 return 1; 1556 if (k1->type < k2->type) 1557 return -1; 1558 if (k1->offset > k2->offset) 1559 return 1; 1560 if (k1->offset < k2->offset) 1561 return -1; 1562 return 0; 1563 } 1564 1565 /* 1566 * this is used by the defrag code to go through all the 1567 * leaves pointed to by a node and reallocate them so that 1568 * disk order is close to key order 1569 */ 1570 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 1571 struct btrfs_root *root, struct extent_buffer *parent, 1572 int start_slot, u64 *last_ret, 1573 struct btrfs_key *progress) 1574 { 1575 struct btrfs_fs_info *fs_info = root->fs_info; 1576 struct extent_buffer *cur; 1577 u64 blocknr; 1578 u64 search_start = *last_ret; 1579 u64 last_block = 0; 1580 u64 other; 1581 u32 parent_nritems; 1582 int end_slot; 1583 int i; 1584 int err = 0; 1585 u32 blocksize; 1586 int progress_passed = 0; 1587 struct btrfs_disk_key disk_key; 1588 1589 WARN_ON(trans->transaction != fs_info->running_transaction); 1590 WARN_ON(trans->transid != fs_info->generation); 1591 1592 parent_nritems = btrfs_header_nritems(parent); 1593 blocksize = fs_info->nodesize; 1594 end_slot = parent_nritems - 1; 1595 1596 if (parent_nritems <= 1) 1597 return 0; 1598 1599 for (i = start_slot; i <= end_slot; i++) { 1600 int close = 1; 1601 1602 btrfs_node_key(parent, &disk_key, i); 1603 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 1604 continue; 1605 1606 progress_passed = 1; 1607 blocknr = btrfs_node_blockptr(parent, i); 1608 if (last_block == 0) 1609 last_block = blocknr; 1610 1611 if (i > 0) { 1612 other = btrfs_node_blockptr(parent, i - 1); 1613 close = close_blocks(blocknr, other, blocksize); 1614 } 1615 if (!close && i < end_slot) { 1616 other = btrfs_node_blockptr(parent, i + 1); 1617 close = close_blocks(blocknr, other, blocksize); 1618 } 1619 if (close) { 1620 last_block = blocknr; 1621 continue; 1622 } 1623 1624 cur = btrfs_read_node_slot(parent, i); 1625 if (IS_ERR(cur)) 1626 return PTR_ERR(cur); 1627 if (search_start == 0) 1628 search_start = last_block; 1629 1630 btrfs_tree_lock(cur); 1631 err = __btrfs_cow_block(trans, root, cur, parent, i, 1632 &cur, search_start, 1633 min(16 * blocksize, 1634 (end_slot - i) * blocksize), 1635 BTRFS_NESTING_COW); 1636 if (err) { 1637 btrfs_tree_unlock(cur); 1638 free_extent_buffer(cur); 1639 break; 1640 } 1641 search_start = cur->start; 1642 last_block = cur->start; 1643 *last_ret = search_start; 1644 btrfs_tree_unlock(cur); 1645 free_extent_buffer(cur); 1646 } 1647 return err; 1648 } 1649 1650 /* 1651 * search for key in the extent_buffer. The items start at offset p, 1652 * and they are item_size apart. There are 'max' items in p. 1653 * 1654 * the slot in the array is returned via slot, and it points to 1655 * the place where you would insert key if it is not found in 1656 * the array. 1657 * 1658 * slot may point to max if the key is bigger than all of the keys 1659 */ 1660 static noinline int generic_bin_search(struct extent_buffer *eb, 1661 unsigned long p, int item_size, 1662 const struct btrfs_key *key, 1663 int max, int *slot) 1664 { 1665 int low = 0; 1666 int high = max; 1667 int ret; 1668 const int key_size = sizeof(struct btrfs_disk_key); 1669 1670 if (low > high) { 1671 btrfs_err(eb->fs_info, 1672 "%s: low (%d) > high (%d) eb %llu owner %llu level %d", 1673 __func__, low, high, eb->start, 1674 btrfs_header_owner(eb), btrfs_header_level(eb)); 1675 return -EINVAL; 1676 } 1677 1678 while (low < high) { 1679 unsigned long oip; 1680 unsigned long offset; 1681 struct btrfs_disk_key *tmp; 1682 struct btrfs_disk_key unaligned; 1683 int mid; 1684 1685 mid = (low + high) / 2; 1686 offset = p + mid * item_size; 1687 oip = offset_in_page(offset); 1688 1689 if (oip + key_size <= PAGE_SIZE) { 1690 const unsigned long idx = get_eb_page_index(offset); 1691 char *kaddr = page_address(eb->pages[idx]); 1692 1693 oip = get_eb_offset_in_page(eb, offset); 1694 tmp = (struct btrfs_disk_key *)(kaddr + oip); 1695 } else { 1696 read_extent_buffer(eb, &unaligned, offset, key_size); 1697 tmp = &unaligned; 1698 } 1699 1700 ret = comp_keys(tmp, key); 1701 1702 if (ret < 0) 1703 low = mid + 1; 1704 else if (ret > 0) 1705 high = mid; 1706 else { 1707 *slot = mid; 1708 return 0; 1709 } 1710 } 1711 *slot = low; 1712 return 1; 1713 } 1714 1715 /* 1716 * simple bin_search frontend that does the right thing for 1717 * leaves vs nodes 1718 */ 1719 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, 1720 int *slot) 1721 { 1722 if (btrfs_header_level(eb) == 0) 1723 return generic_bin_search(eb, 1724 offsetof(struct btrfs_leaf, items), 1725 sizeof(struct btrfs_item), 1726 key, btrfs_header_nritems(eb), 1727 slot); 1728 else 1729 return generic_bin_search(eb, 1730 offsetof(struct btrfs_node, ptrs), 1731 sizeof(struct btrfs_key_ptr), 1732 key, btrfs_header_nritems(eb), 1733 slot); 1734 } 1735 1736 static void root_add_used(struct btrfs_root *root, u32 size) 1737 { 1738 spin_lock(&root->accounting_lock); 1739 btrfs_set_root_used(&root->root_item, 1740 btrfs_root_used(&root->root_item) + size); 1741 spin_unlock(&root->accounting_lock); 1742 } 1743 1744 static void root_sub_used(struct btrfs_root *root, u32 size) 1745 { 1746 spin_lock(&root->accounting_lock); 1747 btrfs_set_root_used(&root->root_item, 1748 btrfs_root_used(&root->root_item) - size); 1749 spin_unlock(&root->accounting_lock); 1750 } 1751 1752 /* given a node and slot number, this reads the blocks it points to. The 1753 * extent buffer is returned with a reference taken (but unlocked). 1754 */ 1755 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 1756 int slot) 1757 { 1758 int level = btrfs_header_level(parent); 1759 struct extent_buffer *eb; 1760 struct btrfs_key first_key; 1761 1762 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 1763 return ERR_PTR(-ENOENT); 1764 1765 BUG_ON(level == 0); 1766 1767 btrfs_node_key_to_cpu(parent, &first_key, slot); 1768 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 1769 btrfs_header_owner(parent), 1770 btrfs_node_ptr_generation(parent, slot), 1771 level - 1, &first_key); 1772 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) { 1773 free_extent_buffer(eb); 1774 eb = ERR_PTR(-EIO); 1775 } 1776 1777 return eb; 1778 } 1779 1780 /* 1781 * node level balancing, used to make sure nodes are in proper order for 1782 * item deletion. We balance from the top down, so we have to make sure 1783 * that a deletion won't leave an node completely empty later on. 1784 */ 1785 static noinline int balance_level(struct btrfs_trans_handle *trans, 1786 struct btrfs_root *root, 1787 struct btrfs_path *path, int level) 1788 { 1789 struct btrfs_fs_info *fs_info = root->fs_info; 1790 struct extent_buffer *right = NULL; 1791 struct extent_buffer *mid; 1792 struct extent_buffer *left = NULL; 1793 struct extent_buffer *parent = NULL; 1794 int ret = 0; 1795 int wret; 1796 int pslot; 1797 int orig_slot = path->slots[level]; 1798 u64 orig_ptr; 1799 1800 ASSERT(level > 0); 1801 1802 mid = path->nodes[level]; 1803 1804 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1805 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1806 1807 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1808 1809 if (level < BTRFS_MAX_LEVEL - 1) { 1810 parent = path->nodes[level + 1]; 1811 pslot = path->slots[level + 1]; 1812 } 1813 1814 /* 1815 * deal with the case where there is only one pointer in the root 1816 * by promoting the node below to a root 1817 */ 1818 if (!parent) { 1819 struct extent_buffer *child; 1820 1821 if (btrfs_header_nritems(mid) != 1) 1822 return 0; 1823 1824 /* promote the child to a root */ 1825 child = btrfs_read_node_slot(mid, 0); 1826 if (IS_ERR(child)) { 1827 ret = PTR_ERR(child); 1828 btrfs_handle_fs_error(fs_info, ret, NULL); 1829 goto enospc; 1830 } 1831 1832 btrfs_tree_lock(child); 1833 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1834 BTRFS_NESTING_COW); 1835 if (ret) { 1836 btrfs_tree_unlock(child); 1837 free_extent_buffer(child); 1838 goto enospc; 1839 } 1840 1841 ret = tree_mod_log_insert_root(root->node, child, 1); 1842 BUG_ON(ret < 0); 1843 rcu_assign_pointer(root->node, child); 1844 1845 add_root_to_dirty_list(root); 1846 btrfs_tree_unlock(child); 1847 1848 path->locks[level] = 0; 1849 path->nodes[level] = NULL; 1850 btrfs_clean_tree_block(mid); 1851 btrfs_tree_unlock(mid); 1852 /* once for the path */ 1853 free_extent_buffer(mid); 1854 1855 root_sub_used(root, mid->len); 1856 btrfs_free_tree_block(trans, root, mid, 0, 1); 1857 /* once for the root ptr */ 1858 free_extent_buffer_stale(mid); 1859 return 0; 1860 } 1861 if (btrfs_header_nritems(mid) > 1862 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1863 return 0; 1864 1865 left = btrfs_read_node_slot(parent, pslot - 1); 1866 if (IS_ERR(left)) 1867 left = NULL; 1868 1869 if (left) { 1870 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1871 wret = btrfs_cow_block(trans, root, left, 1872 parent, pslot - 1, &left, 1873 BTRFS_NESTING_LEFT_COW); 1874 if (wret) { 1875 ret = wret; 1876 goto enospc; 1877 } 1878 } 1879 1880 right = btrfs_read_node_slot(parent, pslot + 1); 1881 if (IS_ERR(right)) 1882 right = NULL; 1883 1884 if (right) { 1885 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1886 wret = btrfs_cow_block(trans, root, right, 1887 parent, pslot + 1, &right, 1888 BTRFS_NESTING_RIGHT_COW); 1889 if (wret) { 1890 ret = wret; 1891 goto enospc; 1892 } 1893 } 1894 1895 /* first, try to make some room in the middle buffer */ 1896 if (left) { 1897 orig_slot += btrfs_header_nritems(left); 1898 wret = push_node_left(trans, left, mid, 1); 1899 if (wret < 0) 1900 ret = wret; 1901 } 1902 1903 /* 1904 * then try to empty the right most buffer into the middle 1905 */ 1906 if (right) { 1907 wret = push_node_left(trans, mid, right, 1); 1908 if (wret < 0 && wret != -ENOSPC) 1909 ret = wret; 1910 if (btrfs_header_nritems(right) == 0) { 1911 btrfs_clean_tree_block(right); 1912 btrfs_tree_unlock(right); 1913 del_ptr(root, path, level + 1, pslot + 1); 1914 root_sub_used(root, right->len); 1915 btrfs_free_tree_block(trans, root, right, 0, 1); 1916 free_extent_buffer_stale(right); 1917 right = NULL; 1918 } else { 1919 struct btrfs_disk_key right_key; 1920 btrfs_node_key(right, &right_key, 0); 1921 ret = tree_mod_log_insert_key(parent, pslot + 1, 1922 MOD_LOG_KEY_REPLACE, GFP_NOFS); 1923 BUG_ON(ret < 0); 1924 btrfs_set_node_key(parent, &right_key, pslot + 1); 1925 btrfs_mark_buffer_dirty(parent); 1926 } 1927 } 1928 if (btrfs_header_nritems(mid) == 1) { 1929 /* 1930 * we're not allowed to leave a node with one item in the 1931 * tree during a delete. A deletion from lower in the tree 1932 * could try to delete the only pointer in this node. 1933 * So, pull some keys from the left. 1934 * There has to be a left pointer at this point because 1935 * otherwise we would have pulled some pointers from the 1936 * right 1937 */ 1938 if (!left) { 1939 ret = -EROFS; 1940 btrfs_handle_fs_error(fs_info, ret, NULL); 1941 goto enospc; 1942 } 1943 wret = balance_node_right(trans, mid, left); 1944 if (wret < 0) { 1945 ret = wret; 1946 goto enospc; 1947 } 1948 if (wret == 1) { 1949 wret = push_node_left(trans, left, mid, 1); 1950 if (wret < 0) 1951 ret = wret; 1952 } 1953 BUG_ON(wret == 1); 1954 } 1955 if (btrfs_header_nritems(mid) == 0) { 1956 btrfs_clean_tree_block(mid); 1957 btrfs_tree_unlock(mid); 1958 del_ptr(root, path, level + 1, pslot); 1959 root_sub_used(root, mid->len); 1960 btrfs_free_tree_block(trans, root, mid, 0, 1); 1961 free_extent_buffer_stale(mid); 1962 mid = NULL; 1963 } else { 1964 /* update the parent key to reflect our changes */ 1965 struct btrfs_disk_key mid_key; 1966 btrfs_node_key(mid, &mid_key, 0); 1967 ret = tree_mod_log_insert_key(parent, pslot, 1968 MOD_LOG_KEY_REPLACE, GFP_NOFS); 1969 BUG_ON(ret < 0); 1970 btrfs_set_node_key(parent, &mid_key, pslot); 1971 btrfs_mark_buffer_dirty(parent); 1972 } 1973 1974 /* update the path */ 1975 if (left) { 1976 if (btrfs_header_nritems(left) > orig_slot) { 1977 atomic_inc(&left->refs); 1978 /* left was locked after cow */ 1979 path->nodes[level] = left; 1980 path->slots[level + 1] -= 1; 1981 path->slots[level] = orig_slot; 1982 if (mid) { 1983 btrfs_tree_unlock(mid); 1984 free_extent_buffer(mid); 1985 } 1986 } else { 1987 orig_slot -= btrfs_header_nritems(left); 1988 path->slots[level] = orig_slot; 1989 } 1990 } 1991 /* double check we haven't messed things up */ 1992 if (orig_ptr != 1993 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1994 BUG(); 1995 enospc: 1996 if (right) { 1997 btrfs_tree_unlock(right); 1998 free_extent_buffer(right); 1999 } 2000 if (left) { 2001 if (path->nodes[level] != left) 2002 btrfs_tree_unlock(left); 2003 free_extent_buffer(left); 2004 } 2005 return ret; 2006 } 2007 2008 /* Node balancing for insertion. Here we only split or push nodes around 2009 * when they are completely full. This is also done top down, so we 2010 * have to be pessimistic. 2011 */ 2012 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 2013 struct btrfs_root *root, 2014 struct btrfs_path *path, int level) 2015 { 2016 struct btrfs_fs_info *fs_info = root->fs_info; 2017 struct extent_buffer *right = NULL; 2018 struct extent_buffer *mid; 2019 struct extent_buffer *left = NULL; 2020 struct extent_buffer *parent = NULL; 2021 int ret = 0; 2022 int wret; 2023 int pslot; 2024 int orig_slot = path->slots[level]; 2025 2026 if (level == 0) 2027 return 1; 2028 2029 mid = path->nodes[level]; 2030 WARN_ON(btrfs_header_generation(mid) != trans->transid); 2031 2032 if (level < BTRFS_MAX_LEVEL - 1) { 2033 parent = path->nodes[level + 1]; 2034 pslot = path->slots[level + 1]; 2035 } 2036 2037 if (!parent) 2038 return 1; 2039 2040 left = btrfs_read_node_slot(parent, pslot - 1); 2041 if (IS_ERR(left)) 2042 left = NULL; 2043 2044 /* first, try to make some room in the middle buffer */ 2045 if (left) { 2046 u32 left_nr; 2047 2048 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 2049 2050 left_nr = btrfs_header_nritems(left); 2051 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 2052 wret = 1; 2053 } else { 2054 ret = btrfs_cow_block(trans, root, left, parent, 2055 pslot - 1, &left, 2056 BTRFS_NESTING_LEFT_COW); 2057 if (ret) 2058 wret = 1; 2059 else { 2060 wret = push_node_left(trans, left, mid, 0); 2061 } 2062 } 2063 if (wret < 0) 2064 ret = wret; 2065 if (wret == 0) { 2066 struct btrfs_disk_key disk_key; 2067 orig_slot += left_nr; 2068 btrfs_node_key(mid, &disk_key, 0); 2069 ret = tree_mod_log_insert_key(parent, pslot, 2070 MOD_LOG_KEY_REPLACE, GFP_NOFS); 2071 BUG_ON(ret < 0); 2072 btrfs_set_node_key(parent, &disk_key, pslot); 2073 btrfs_mark_buffer_dirty(parent); 2074 if (btrfs_header_nritems(left) > orig_slot) { 2075 path->nodes[level] = left; 2076 path->slots[level + 1] -= 1; 2077 path->slots[level] = orig_slot; 2078 btrfs_tree_unlock(mid); 2079 free_extent_buffer(mid); 2080 } else { 2081 orig_slot -= 2082 btrfs_header_nritems(left); 2083 path->slots[level] = orig_slot; 2084 btrfs_tree_unlock(left); 2085 free_extent_buffer(left); 2086 } 2087 return 0; 2088 } 2089 btrfs_tree_unlock(left); 2090 free_extent_buffer(left); 2091 } 2092 right = btrfs_read_node_slot(parent, pslot + 1); 2093 if (IS_ERR(right)) 2094 right = NULL; 2095 2096 /* 2097 * then try to empty the right most buffer into the middle 2098 */ 2099 if (right) { 2100 u32 right_nr; 2101 2102 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 2103 2104 right_nr = btrfs_header_nritems(right); 2105 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 2106 wret = 1; 2107 } else { 2108 ret = btrfs_cow_block(trans, root, right, 2109 parent, pslot + 1, 2110 &right, BTRFS_NESTING_RIGHT_COW); 2111 if (ret) 2112 wret = 1; 2113 else { 2114 wret = balance_node_right(trans, right, mid); 2115 } 2116 } 2117 if (wret < 0) 2118 ret = wret; 2119 if (wret == 0) { 2120 struct btrfs_disk_key disk_key; 2121 2122 btrfs_node_key(right, &disk_key, 0); 2123 ret = tree_mod_log_insert_key(parent, pslot + 1, 2124 MOD_LOG_KEY_REPLACE, GFP_NOFS); 2125 BUG_ON(ret < 0); 2126 btrfs_set_node_key(parent, &disk_key, pslot + 1); 2127 btrfs_mark_buffer_dirty(parent); 2128 2129 if (btrfs_header_nritems(mid) <= orig_slot) { 2130 path->nodes[level] = right; 2131 path->slots[level + 1] += 1; 2132 path->slots[level] = orig_slot - 2133 btrfs_header_nritems(mid); 2134 btrfs_tree_unlock(mid); 2135 free_extent_buffer(mid); 2136 } else { 2137 btrfs_tree_unlock(right); 2138 free_extent_buffer(right); 2139 } 2140 return 0; 2141 } 2142 btrfs_tree_unlock(right); 2143 free_extent_buffer(right); 2144 } 2145 return 1; 2146 } 2147 2148 /* 2149 * readahead one full node of leaves, finding things that are close 2150 * to the block in 'slot', and triggering ra on them. 2151 */ 2152 static void reada_for_search(struct btrfs_fs_info *fs_info, 2153 struct btrfs_path *path, 2154 int level, int slot, u64 objectid) 2155 { 2156 struct extent_buffer *node; 2157 struct btrfs_disk_key disk_key; 2158 u32 nritems; 2159 u64 search; 2160 u64 target; 2161 u64 nread = 0; 2162 struct extent_buffer *eb; 2163 u32 nr; 2164 u32 blocksize; 2165 u32 nscan = 0; 2166 2167 if (level != 1) 2168 return; 2169 2170 if (!path->nodes[level]) 2171 return; 2172 2173 node = path->nodes[level]; 2174 2175 search = btrfs_node_blockptr(node, slot); 2176 blocksize = fs_info->nodesize; 2177 eb = find_extent_buffer(fs_info, search); 2178 if (eb) { 2179 free_extent_buffer(eb); 2180 return; 2181 } 2182 2183 target = search; 2184 2185 nritems = btrfs_header_nritems(node); 2186 nr = slot; 2187 2188 while (1) { 2189 if (path->reada == READA_BACK) { 2190 if (nr == 0) 2191 break; 2192 nr--; 2193 } else if (path->reada == READA_FORWARD) { 2194 nr++; 2195 if (nr >= nritems) 2196 break; 2197 } 2198 if (path->reada == READA_BACK && objectid) { 2199 btrfs_node_key(node, &disk_key, nr); 2200 if (btrfs_disk_key_objectid(&disk_key) != objectid) 2201 break; 2202 } 2203 search = btrfs_node_blockptr(node, nr); 2204 if ((search <= target && target - search <= 65536) || 2205 (search > target && search - target <= 65536)) { 2206 btrfs_readahead_node_child(node, nr); 2207 nread += blocksize; 2208 } 2209 nscan++; 2210 if ((nread > 65536 || nscan > 32)) 2211 break; 2212 } 2213 } 2214 2215 static noinline void reada_for_balance(struct btrfs_path *path, int level) 2216 { 2217 struct extent_buffer *parent; 2218 int slot; 2219 int nritems; 2220 2221 parent = path->nodes[level + 1]; 2222 if (!parent) 2223 return; 2224 2225 nritems = btrfs_header_nritems(parent); 2226 slot = path->slots[level + 1]; 2227 2228 if (slot > 0) 2229 btrfs_readahead_node_child(parent, slot - 1); 2230 if (slot + 1 < nritems) 2231 btrfs_readahead_node_child(parent, slot + 1); 2232 } 2233 2234 2235 /* 2236 * when we walk down the tree, it is usually safe to unlock the higher layers 2237 * in the tree. The exceptions are when our path goes through slot 0, because 2238 * operations on the tree might require changing key pointers higher up in the 2239 * tree. 2240 * 2241 * callers might also have set path->keep_locks, which tells this code to keep 2242 * the lock if the path points to the last slot in the block. This is part of 2243 * walking through the tree, and selecting the next slot in the higher block. 2244 * 2245 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 2246 * if lowest_unlock is 1, level 0 won't be unlocked 2247 */ 2248 static noinline void unlock_up(struct btrfs_path *path, int level, 2249 int lowest_unlock, int min_write_lock_level, 2250 int *write_lock_level) 2251 { 2252 int i; 2253 int skip_level = level; 2254 int no_skips = 0; 2255 struct extent_buffer *t; 2256 2257 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2258 if (!path->nodes[i]) 2259 break; 2260 if (!path->locks[i]) 2261 break; 2262 if (!no_skips && path->slots[i] == 0) { 2263 skip_level = i + 1; 2264 continue; 2265 } 2266 if (!no_skips && path->keep_locks) { 2267 u32 nritems; 2268 t = path->nodes[i]; 2269 nritems = btrfs_header_nritems(t); 2270 if (nritems < 1 || path->slots[i] >= nritems - 1) { 2271 skip_level = i + 1; 2272 continue; 2273 } 2274 } 2275 if (skip_level < i && i >= lowest_unlock) 2276 no_skips = 1; 2277 2278 t = path->nodes[i]; 2279 if (i >= lowest_unlock && i > skip_level) { 2280 btrfs_tree_unlock_rw(t, path->locks[i]); 2281 path->locks[i] = 0; 2282 if (write_lock_level && 2283 i > min_write_lock_level && 2284 i <= *write_lock_level) { 2285 *write_lock_level = i - 1; 2286 } 2287 } 2288 } 2289 } 2290 2291 /* 2292 * helper function for btrfs_search_slot. The goal is to find a block 2293 * in cache without setting the path to blocking. If we find the block 2294 * we return zero and the path is unchanged. 2295 * 2296 * If we can't find the block, we set the path blocking and do some 2297 * reada. -EAGAIN is returned and the search must be repeated. 2298 */ 2299 static int 2300 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 2301 struct extent_buffer **eb_ret, int level, int slot, 2302 const struct btrfs_key *key) 2303 { 2304 struct btrfs_fs_info *fs_info = root->fs_info; 2305 u64 blocknr; 2306 u64 gen; 2307 struct extent_buffer *tmp; 2308 struct btrfs_key first_key; 2309 int ret; 2310 int parent_level; 2311 2312 blocknr = btrfs_node_blockptr(*eb_ret, slot); 2313 gen = btrfs_node_ptr_generation(*eb_ret, slot); 2314 parent_level = btrfs_header_level(*eb_ret); 2315 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot); 2316 2317 tmp = find_extent_buffer(fs_info, blocknr); 2318 if (tmp) { 2319 /* first we do an atomic uptodate check */ 2320 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 2321 /* 2322 * Do extra check for first_key, eb can be stale due to 2323 * being cached, read from scrub, or have multiple 2324 * parents (shared tree blocks). 2325 */ 2326 if (btrfs_verify_level_key(tmp, 2327 parent_level - 1, &first_key, gen)) { 2328 free_extent_buffer(tmp); 2329 return -EUCLEAN; 2330 } 2331 *eb_ret = tmp; 2332 return 0; 2333 } 2334 2335 /* now we're allowed to do a blocking uptodate check */ 2336 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key); 2337 if (!ret) { 2338 *eb_ret = tmp; 2339 return 0; 2340 } 2341 free_extent_buffer(tmp); 2342 btrfs_release_path(p); 2343 return -EIO; 2344 } 2345 2346 /* 2347 * reduce lock contention at high levels 2348 * of the btree by dropping locks before 2349 * we read. Don't release the lock on the current 2350 * level because we need to walk this node to figure 2351 * out which blocks to read. 2352 */ 2353 btrfs_unlock_up_safe(p, level + 1); 2354 2355 if (p->reada != READA_NONE) 2356 reada_for_search(fs_info, p, level, slot, key->objectid); 2357 2358 ret = -EAGAIN; 2359 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid, 2360 gen, parent_level - 1, &first_key); 2361 if (!IS_ERR(tmp)) { 2362 /* 2363 * If the read above didn't mark this buffer up to date, 2364 * it will never end up being up to date. Set ret to EIO now 2365 * and give up so that our caller doesn't loop forever 2366 * on our EAGAINs. 2367 */ 2368 if (!extent_buffer_uptodate(tmp)) 2369 ret = -EIO; 2370 free_extent_buffer(tmp); 2371 } else { 2372 ret = PTR_ERR(tmp); 2373 } 2374 2375 btrfs_release_path(p); 2376 return ret; 2377 } 2378 2379 /* 2380 * helper function for btrfs_search_slot. This does all of the checks 2381 * for node-level blocks and does any balancing required based on 2382 * the ins_len. 2383 * 2384 * If no extra work was required, zero is returned. If we had to 2385 * drop the path, -EAGAIN is returned and btrfs_search_slot must 2386 * start over 2387 */ 2388 static int 2389 setup_nodes_for_search(struct btrfs_trans_handle *trans, 2390 struct btrfs_root *root, struct btrfs_path *p, 2391 struct extent_buffer *b, int level, int ins_len, 2392 int *write_lock_level) 2393 { 2394 struct btrfs_fs_info *fs_info = root->fs_info; 2395 int ret = 0; 2396 2397 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 2398 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 2399 2400 if (*write_lock_level < level + 1) { 2401 *write_lock_level = level + 1; 2402 btrfs_release_path(p); 2403 return -EAGAIN; 2404 } 2405 2406 reada_for_balance(p, level); 2407 ret = split_node(trans, root, p, level); 2408 2409 b = p->nodes[level]; 2410 } else if (ins_len < 0 && btrfs_header_nritems(b) < 2411 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 2412 2413 if (*write_lock_level < level + 1) { 2414 *write_lock_level = level + 1; 2415 btrfs_release_path(p); 2416 return -EAGAIN; 2417 } 2418 2419 reada_for_balance(p, level); 2420 ret = balance_level(trans, root, p, level); 2421 if (ret) 2422 return ret; 2423 2424 b = p->nodes[level]; 2425 if (!b) { 2426 btrfs_release_path(p); 2427 return -EAGAIN; 2428 } 2429 BUG_ON(btrfs_header_nritems(b) == 1); 2430 } 2431 return ret; 2432 } 2433 2434 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 2435 u64 iobjectid, u64 ioff, u8 key_type, 2436 struct btrfs_key *found_key) 2437 { 2438 int ret; 2439 struct btrfs_key key; 2440 struct extent_buffer *eb; 2441 2442 ASSERT(path); 2443 ASSERT(found_key); 2444 2445 key.type = key_type; 2446 key.objectid = iobjectid; 2447 key.offset = ioff; 2448 2449 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 2450 if (ret < 0) 2451 return ret; 2452 2453 eb = path->nodes[0]; 2454 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 2455 ret = btrfs_next_leaf(fs_root, path); 2456 if (ret) 2457 return ret; 2458 eb = path->nodes[0]; 2459 } 2460 2461 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 2462 if (found_key->type != key.type || 2463 found_key->objectid != key.objectid) 2464 return 1; 2465 2466 return 0; 2467 } 2468 2469 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 2470 struct btrfs_path *p, 2471 int write_lock_level) 2472 { 2473 struct btrfs_fs_info *fs_info = root->fs_info; 2474 struct extent_buffer *b; 2475 int root_lock; 2476 int level = 0; 2477 2478 /* We try very hard to do read locks on the root */ 2479 root_lock = BTRFS_READ_LOCK; 2480 2481 if (p->search_commit_root) { 2482 /* 2483 * The commit roots are read only so we always do read locks, 2484 * and we always must hold the commit_root_sem when doing 2485 * searches on them, the only exception is send where we don't 2486 * want to block transaction commits for a long time, so 2487 * we need to clone the commit root in order to avoid races 2488 * with transaction commits that create a snapshot of one of 2489 * the roots used by a send operation. 2490 */ 2491 if (p->need_commit_sem) { 2492 down_read(&fs_info->commit_root_sem); 2493 b = btrfs_clone_extent_buffer(root->commit_root); 2494 up_read(&fs_info->commit_root_sem); 2495 if (!b) 2496 return ERR_PTR(-ENOMEM); 2497 2498 } else { 2499 b = root->commit_root; 2500 atomic_inc(&b->refs); 2501 } 2502 level = btrfs_header_level(b); 2503 /* 2504 * Ensure that all callers have set skip_locking when 2505 * p->search_commit_root = 1. 2506 */ 2507 ASSERT(p->skip_locking == 1); 2508 2509 goto out; 2510 } 2511 2512 if (p->skip_locking) { 2513 b = btrfs_root_node(root); 2514 level = btrfs_header_level(b); 2515 goto out; 2516 } 2517 2518 /* 2519 * If the level is set to maximum, we can skip trying to get the read 2520 * lock. 2521 */ 2522 if (write_lock_level < BTRFS_MAX_LEVEL) { 2523 /* 2524 * We don't know the level of the root node until we actually 2525 * have it read locked 2526 */ 2527 b = btrfs_read_lock_root_node(root); 2528 level = btrfs_header_level(b); 2529 if (level > write_lock_level) 2530 goto out; 2531 2532 /* Whoops, must trade for write lock */ 2533 btrfs_tree_read_unlock(b); 2534 free_extent_buffer(b); 2535 } 2536 2537 b = btrfs_lock_root_node(root); 2538 root_lock = BTRFS_WRITE_LOCK; 2539 2540 /* The level might have changed, check again */ 2541 level = btrfs_header_level(b); 2542 2543 out: 2544 p->nodes[level] = b; 2545 if (!p->skip_locking) 2546 p->locks[level] = root_lock; 2547 /* 2548 * Callers are responsible for dropping b's references. 2549 */ 2550 return b; 2551 } 2552 2553 2554 /* 2555 * btrfs_search_slot - look for a key in a tree and perform necessary 2556 * modifications to preserve tree invariants. 2557 * 2558 * @trans: Handle of transaction, used when modifying the tree 2559 * @p: Holds all btree nodes along the search path 2560 * @root: The root node of the tree 2561 * @key: The key we are looking for 2562 * @ins_len: Indicates purpose of search: 2563 * >0 for inserts it's size of item inserted (*) 2564 * <0 for deletions 2565 * 0 for plain searches, not modifying the tree 2566 * 2567 * (*) If size of item inserted doesn't include 2568 * sizeof(struct btrfs_item), then p->search_for_extension must 2569 * be set. 2570 * @cow: boolean should CoW operations be performed. Must always be 1 2571 * when modifying the tree. 2572 * 2573 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2574 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2575 * 2576 * If @key is found, 0 is returned and you can find the item in the leaf level 2577 * of the path (level 0) 2578 * 2579 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2580 * points to the slot where it should be inserted 2581 * 2582 * If an error is encountered while searching the tree a negative error number 2583 * is returned 2584 */ 2585 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2586 const struct btrfs_key *key, struct btrfs_path *p, 2587 int ins_len, int cow) 2588 { 2589 struct extent_buffer *b; 2590 int slot; 2591 int ret; 2592 int err; 2593 int level; 2594 int lowest_unlock = 1; 2595 /* everything at write_lock_level or lower must be write locked */ 2596 int write_lock_level = 0; 2597 u8 lowest_level = 0; 2598 int min_write_lock_level; 2599 int prev_cmp; 2600 2601 lowest_level = p->lowest_level; 2602 WARN_ON(lowest_level && ins_len > 0); 2603 WARN_ON(p->nodes[0] != NULL); 2604 BUG_ON(!cow && ins_len); 2605 2606 if (ins_len < 0) { 2607 lowest_unlock = 2; 2608 2609 /* when we are removing items, we might have to go up to level 2610 * two as we update tree pointers Make sure we keep write 2611 * for those levels as well 2612 */ 2613 write_lock_level = 2; 2614 } else if (ins_len > 0) { 2615 /* 2616 * for inserting items, make sure we have a write lock on 2617 * level 1 so we can update keys 2618 */ 2619 write_lock_level = 1; 2620 } 2621 2622 if (!cow) 2623 write_lock_level = -1; 2624 2625 if (cow && (p->keep_locks || p->lowest_level)) 2626 write_lock_level = BTRFS_MAX_LEVEL; 2627 2628 min_write_lock_level = write_lock_level; 2629 2630 again: 2631 prev_cmp = -1; 2632 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2633 if (IS_ERR(b)) { 2634 ret = PTR_ERR(b); 2635 goto done; 2636 } 2637 2638 while (b) { 2639 int dec = 0; 2640 2641 level = btrfs_header_level(b); 2642 2643 if (cow) { 2644 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2645 2646 /* 2647 * if we don't really need to cow this block 2648 * then we don't want to set the path blocking, 2649 * so we test it here 2650 */ 2651 if (!should_cow_block(trans, root, b)) { 2652 trans->dirty = true; 2653 goto cow_done; 2654 } 2655 2656 /* 2657 * must have write locks on this node and the 2658 * parent 2659 */ 2660 if (level > write_lock_level || 2661 (level + 1 > write_lock_level && 2662 level + 1 < BTRFS_MAX_LEVEL && 2663 p->nodes[level + 1])) { 2664 write_lock_level = level + 1; 2665 btrfs_release_path(p); 2666 goto again; 2667 } 2668 2669 if (last_level) 2670 err = btrfs_cow_block(trans, root, b, NULL, 0, 2671 &b, 2672 BTRFS_NESTING_COW); 2673 else 2674 err = btrfs_cow_block(trans, root, b, 2675 p->nodes[level + 1], 2676 p->slots[level + 1], &b, 2677 BTRFS_NESTING_COW); 2678 if (err) { 2679 ret = err; 2680 goto done; 2681 } 2682 } 2683 cow_done: 2684 p->nodes[level] = b; 2685 /* 2686 * Leave path with blocking locks to avoid massive 2687 * lock context switch, this is made on purpose. 2688 */ 2689 2690 /* 2691 * we have a lock on b and as long as we aren't changing 2692 * the tree, there is no way to for the items in b to change. 2693 * It is safe to drop the lock on our parent before we 2694 * go through the expensive btree search on b. 2695 * 2696 * If we're inserting or deleting (ins_len != 0), then we might 2697 * be changing slot zero, which may require changing the parent. 2698 * So, we can't drop the lock until after we know which slot 2699 * we're operating on. 2700 */ 2701 if (!ins_len && !p->keep_locks) { 2702 int u = level + 1; 2703 2704 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2705 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2706 p->locks[u] = 0; 2707 } 2708 } 2709 2710 /* 2711 * If btrfs_bin_search returns an exact match (prev_cmp == 0) 2712 * we can safely assume the target key will always be in slot 0 2713 * on lower levels due to the invariants BTRFS' btree provides, 2714 * namely that a btrfs_key_ptr entry always points to the 2715 * lowest key in the child node, thus we can skip searching 2716 * lower levels 2717 */ 2718 if (prev_cmp == 0) { 2719 slot = 0; 2720 ret = 0; 2721 } else { 2722 ret = btrfs_bin_search(b, key, &slot); 2723 prev_cmp = ret; 2724 if (ret < 0) 2725 goto done; 2726 } 2727 2728 if (level == 0) { 2729 p->slots[level] = slot; 2730 /* 2731 * Item key already exists. In this case, if we are 2732 * allowed to insert the item (for example, in dir_item 2733 * case, item key collision is allowed), it will be 2734 * merged with the original item. Only the item size 2735 * grows, no new btrfs item will be added. If 2736 * search_for_extension is not set, ins_len already 2737 * accounts the size btrfs_item, deduct it here so leaf 2738 * space check will be correct. 2739 */ 2740 if (ret == 0 && ins_len > 0 && !p->search_for_extension) { 2741 ASSERT(ins_len >= sizeof(struct btrfs_item)); 2742 ins_len -= sizeof(struct btrfs_item); 2743 } 2744 if (ins_len > 0 && 2745 btrfs_leaf_free_space(b) < ins_len) { 2746 if (write_lock_level < 1) { 2747 write_lock_level = 1; 2748 btrfs_release_path(p); 2749 goto again; 2750 } 2751 2752 err = split_leaf(trans, root, key, 2753 p, ins_len, ret == 0); 2754 2755 BUG_ON(err > 0); 2756 if (err) { 2757 ret = err; 2758 goto done; 2759 } 2760 } 2761 if (!p->search_for_split) 2762 unlock_up(p, level, lowest_unlock, 2763 min_write_lock_level, NULL); 2764 goto done; 2765 } 2766 if (ret && slot > 0) { 2767 dec = 1; 2768 slot--; 2769 } 2770 p->slots[level] = slot; 2771 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2772 &write_lock_level); 2773 if (err == -EAGAIN) 2774 goto again; 2775 if (err) { 2776 ret = err; 2777 goto done; 2778 } 2779 b = p->nodes[level]; 2780 slot = p->slots[level]; 2781 2782 /* 2783 * Slot 0 is special, if we change the key we have to update 2784 * the parent pointer which means we must have a write lock on 2785 * the parent 2786 */ 2787 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2788 write_lock_level = level + 1; 2789 btrfs_release_path(p); 2790 goto again; 2791 } 2792 2793 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2794 &write_lock_level); 2795 2796 if (level == lowest_level) { 2797 if (dec) 2798 p->slots[level]++; 2799 goto done; 2800 } 2801 2802 err = read_block_for_search(root, p, &b, level, slot, key); 2803 if (err == -EAGAIN) 2804 goto again; 2805 if (err) { 2806 ret = err; 2807 goto done; 2808 } 2809 2810 if (!p->skip_locking) { 2811 level = btrfs_header_level(b); 2812 if (level <= write_lock_level) { 2813 btrfs_tree_lock(b); 2814 p->locks[level] = BTRFS_WRITE_LOCK; 2815 } else { 2816 btrfs_tree_read_lock(b); 2817 p->locks[level] = BTRFS_READ_LOCK; 2818 } 2819 p->nodes[level] = b; 2820 } 2821 } 2822 ret = 1; 2823 done: 2824 if (ret < 0 && !p->skip_release_on_error) 2825 btrfs_release_path(p); 2826 return ret; 2827 } 2828 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2829 2830 /* 2831 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2832 * current state of the tree together with the operations recorded in the tree 2833 * modification log to search for the key in a previous version of this tree, as 2834 * denoted by the time_seq parameter. 2835 * 2836 * Naturally, there is no support for insert, delete or cow operations. 2837 * 2838 * The resulting path and return value will be set up as if we called 2839 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2840 */ 2841 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2842 struct btrfs_path *p, u64 time_seq) 2843 { 2844 struct btrfs_fs_info *fs_info = root->fs_info; 2845 struct extent_buffer *b; 2846 int slot; 2847 int ret; 2848 int err; 2849 int level; 2850 int lowest_unlock = 1; 2851 u8 lowest_level = 0; 2852 2853 lowest_level = p->lowest_level; 2854 WARN_ON(p->nodes[0] != NULL); 2855 2856 if (p->search_commit_root) { 2857 BUG_ON(time_seq); 2858 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2859 } 2860 2861 again: 2862 b = get_old_root(root, time_seq); 2863 if (!b) { 2864 ret = -EIO; 2865 goto done; 2866 } 2867 level = btrfs_header_level(b); 2868 p->locks[level] = BTRFS_READ_LOCK; 2869 2870 while (b) { 2871 int dec = 0; 2872 2873 level = btrfs_header_level(b); 2874 p->nodes[level] = b; 2875 2876 /* 2877 * we have a lock on b and as long as we aren't changing 2878 * the tree, there is no way to for the items in b to change. 2879 * It is safe to drop the lock on our parent before we 2880 * go through the expensive btree search on b. 2881 */ 2882 btrfs_unlock_up_safe(p, level + 1); 2883 2884 ret = btrfs_bin_search(b, key, &slot); 2885 if (ret < 0) 2886 goto done; 2887 2888 if (level == 0) { 2889 p->slots[level] = slot; 2890 unlock_up(p, level, lowest_unlock, 0, NULL); 2891 goto done; 2892 } 2893 2894 if (ret && slot > 0) { 2895 dec = 1; 2896 slot--; 2897 } 2898 p->slots[level] = slot; 2899 unlock_up(p, level, lowest_unlock, 0, NULL); 2900 2901 if (level == lowest_level) { 2902 if (dec) 2903 p->slots[level]++; 2904 goto done; 2905 } 2906 2907 err = read_block_for_search(root, p, &b, level, slot, key); 2908 if (err == -EAGAIN) 2909 goto again; 2910 if (err) { 2911 ret = err; 2912 goto done; 2913 } 2914 2915 level = btrfs_header_level(b); 2916 btrfs_tree_read_lock(b); 2917 b = tree_mod_log_rewind(fs_info, p, b, time_seq); 2918 if (!b) { 2919 ret = -ENOMEM; 2920 goto done; 2921 } 2922 p->locks[level] = BTRFS_READ_LOCK; 2923 p->nodes[level] = b; 2924 } 2925 ret = 1; 2926 done: 2927 if (ret < 0) 2928 btrfs_release_path(p); 2929 2930 return ret; 2931 } 2932 2933 /* 2934 * helper to use instead of search slot if no exact match is needed but 2935 * instead the next or previous item should be returned. 2936 * When find_higher is true, the next higher item is returned, the next lower 2937 * otherwise. 2938 * When return_any and find_higher are both true, and no higher item is found, 2939 * return the next lower instead. 2940 * When return_any is true and find_higher is false, and no lower item is found, 2941 * return the next higher instead. 2942 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2943 * < 0 on error 2944 */ 2945 int btrfs_search_slot_for_read(struct btrfs_root *root, 2946 const struct btrfs_key *key, 2947 struct btrfs_path *p, int find_higher, 2948 int return_any) 2949 { 2950 int ret; 2951 struct extent_buffer *leaf; 2952 2953 again: 2954 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2955 if (ret <= 0) 2956 return ret; 2957 /* 2958 * a return value of 1 means the path is at the position where the 2959 * item should be inserted. Normally this is the next bigger item, 2960 * but in case the previous item is the last in a leaf, path points 2961 * to the first free slot in the previous leaf, i.e. at an invalid 2962 * item. 2963 */ 2964 leaf = p->nodes[0]; 2965 2966 if (find_higher) { 2967 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2968 ret = btrfs_next_leaf(root, p); 2969 if (ret <= 0) 2970 return ret; 2971 if (!return_any) 2972 return 1; 2973 /* 2974 * no higher item found, return the next 2975 * lower instead 2976 */ 2977 return_any = 0; 2978 find_higher = 0; 2979 btrfs_release_path(p); 2980 goto again; 2981 } 2982 } else { 2983 if (p->slots[0] == 0) { 2984 ret = btrfs_prev_leaf(root, p); 2985 if (ret < 0) 2986 return ret; 2987 if (!ret) { 2988 leaf = p->nodes[0]; 2989 if (p->slots[0] == btrfs_header_nritems(leaf)) 2990 p->slots[0]--; 2991 return 0; 2992 } 2993 if (!return_any) 2994 return 1; 2995 /* 2996 * no lower item found, return the next 2997 * higher instead 2998 */ 2999 return_any = 0; 3000 find_higher = 1; 3001 btrfs_release_path(p); 3002 goto again; 3003 } else { 3004 --p->slots[0]; 3005 } 3006 } 3007 return 0; 3008 } 3009 3010 /* 3011 * adjust the pointers going up the tree, starting at level 3012 * making sure the right key of each node is points to 'key'. 3013 * This is used after shifting pointers to the left, so it stops 3014 * fixing up pointers when a given leaf/node is not in slot 0 of the 3015 * higher levels 3016 * 3017 */ 3018 static void fixup_low_keys(struct btrfs_path *path, 3019 struct btrfs_disk_key *key, int level) 3020 { 3021 int i; 3022 struct extent_buffer *t; 3023 int ret; 3024 3025 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 3026 int tslot = path->slots[i]; 3027 3028 if (!path->nodes[i]) 3029 break; 3030 t = path->nodes[i]; 3031 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE, 3032 GFP_ATOMIC); 3033 BUG_ON(ret < 0); 3034 btrfs_set_node_key(t, key, tslot); 3035 btrfs_mark_buffer_dirty(path->nodes[i]); 3036 if (tslot != 0) 3037 break; 3038 } 3039 } 3040 3041 /* 3042 * update item key. 3043 * 3044 * This function isn't completely safe. It's the caller's responsibility 3045 * that the new key won't break the order 3046 */ 3047 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 3048 struct btrfs_path *path, 3049 const struct btrfs_key *new_key) 3050 { 3051 struct btrfs_disk_key disk_key; 3052 struct extent_buffer *eb; 3053 int slot; 3054 3055 eb = path->nodes[0]; 3056 slot = path->slots[0]; 3057 if (slot > 0) { 3058 btrfs_item_key(eb, &disk_key, slot - 1); 3059 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 3060 btrfs_crit(fs_info, 3061 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 3062 slot, btrfs_disk_key_objectid(&disk_key), 3063 btrfs_disk_key_type(&disk_key), 3064 btrfs_disk_key_offset(&disk_key), 3065 new_key->objectid, new_key->type, 3066 new_key->offset); 3067 btrfs_print_leaf(eb); 3068 BUG(); 3069 } 3070 } 3071 if (slot < btrfs_header_nritems(eb) - 1) { 3072 btrfs_item_key(eb, &disk_key, slot + 1); 3073 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 3074 btrfs_crit(fs_info, 3075 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 3076 slot, btrfs_disk_key_objectid(&disk_key), 3077 btrfs_disk_key_type(&disk_key), 3078 btrfs_disk_key_offset(&disk_key), 3079 new_key->objectid, new_key->type, 3080 new_key->offset); 3081 btrfs_print_leaf(eb); 3082 BUG(); 3083 } 3084 } 3085 3086 btrfs_cpu_key_to_disk(&disk_key, new_key); 3087 btrfs_set_item_key(eb, &disk_key, slot); 3088 btrfs_mark_buffer_dirty(eb); 3089 if (slot == 0) 3090 fixup_low_keys(path, &disk_key, 1); 3091 } 3092 3093 /* 3094 * Check key order of two sibling extent buffers. 3095 * 3096 * Return true if something is wrong. 3097 * Return false if everything is fine. 3098 * 3099 * Tree-checker only works inside one tree block, thus the following 3100 * corruption can not be detected by tree-checker: 3101 * 3102 * Leaf @left | Leaf @right 3103 * -------------------------------------------------------------- 3104 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 3105 * 3106 * Key f6 in leaf @left itself is valid, but not valid when the next 3107 * key in leaf @right is 7. 3108 * This can only be checked at tree block merge time. 3109 * And since tree checker has ensured all key order in each tree block 3110 * is correct, we only need to bother the last key of @left and the first 3111 * key of @right. 3112 */ 3113 static bool check_sibling_keys(struct extent_buffer *left, 3114 struct extent_buffer *right) 3115 { 3116 struct btrfs_key left_last; 3117 struct btrfs_key right_first; 3118 int level = btrfs_header_level(left); 3119 int nr_left = btrfs_header_nritems(left); 3120 int nr_right = btrfs_header_nritems(right); 3121 3122 /* No key to check in one of the tree blocks */ 3123 if (!nr_left || !nr_right) 3124 return false; 3125 3126 if (level) { 3127 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 3128 btrfs_node_key_to_cpu(right, &right_first, 0); 3129 } else { 3130 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 3131 btrfs_item_key_to_cpu(right, &right_first, 0); 3132 } 3133 3134 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) { 3135 btrfs_crit(left->fs_info, 3136 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 3137 left_last.objectid, left_last.type, 3138 left_last.offset, right_first.objectid, 3139 right_first.type, right_first.offset); 3140 return true; 3141 } 3142 return false; 3143 } 3144 3145 /* 3146 * try to push data from one node into the next node left in the 3147 * tree. 3148 * 3149 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 3150 * error, and > 0 if there was no room in the left hand block. 3151 */ 3152 static int push_node_left(struct btrfs_trans_handle *trans, 3153 struct extent_buffer *dst, 3154 struct extent_buffer *src, int empty) 3155 { 3156 struct btrfs_fs_info *fs_info = trans->fs_info; 3157 int push_items = 0; 3158 int src_nritems; 3159 int dst_nritems; 3160 int ret = 0; 3161 3162 src_nritems = btrfs_header_nritems(src); 3163 dst_nritems = btrfs_header_nritems(dst); 3164 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 3165 WARN_ON(btrfs_header_generation(src) != trans->transid); 3166 WARN_ON(btrfs_header_generation(dst) != trans->transid); 3167 3168 if (!empty && src_nritems <= 8) 3169 return 1; 3170 3171 if (push_items <= 0) 3172 return 1; 3173 3174 if (empty) { 3175 push_items = min(src_nritems, push_items); 3176 if (push_items < src_nritems) { 3177 /* leave at least 8 pointers in the node if 3178 * we aren't going to empty it 3179 */ 3180 if (src_nritems - push_items < 8) { 3181 if (push_items <= 8) 3182 return 1; 3183 push_items -= 8; 3184 } 3185 } 3186 } else 3187 push_items = min(src_nritems - 8, push_items); 3188 3189 /* dst is the left eb, src is the middle eb */ 3190 if (check_sibling_keys(dst, src)) { 3191 ret = -EUCLEAN; 3192 btrfs_abort_transaction(trans, ret); 3193 return ret; 3194 } 3195 ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 3196 if (ret) { 3197 btrfs_abort_transaction(trans, ret); 3198 return ret; 3199 } 3200 copy_extent_buffer(dst, src, 3201 btrfs_node_key_ptr_offset(dst_nritems), 3202 btrfs_node_key_ptr_offset(0), 3203 push_items * sizeof(struct btrfs_key_ptr)); 3204 3205 if (push_items < src_nritems) { 3206 /* 3207 * Don't call tree_mod_log_insert_move here, key removal was 3208 * already fully logged by tree_mod_log_eb_copy above. 3209 */ 3210 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 3211 btrfs_node_key_ptr_offset(push_items), 3212 (src_nritems - push_items) * 3213 sizeof(struct btrfs_key_ptr)); 3214 } 3215 btrfs_set_header_nritems(src, src_nritems - push_items); 3216 btrfs_set_header_nritems(dst, dst_nritems + push_items); 3217 btrfs_mark_buffer_dirty(src); 3218 btrfs_mark_buffer_dirty(dst); 3219 3220 return ret; 3221 } 3222 3223 /* 3224 * try to push data from one node into the next node right in the 3225 * tree. 3226 * 3227 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 3228 * error, and > 0 if there was no room in the right hand block. 3229 * 3230 * this will only push up to 1/2 the contents of the left node over 3231 */ 3232 static int balance_node_right(struct btrfs_trans_handle *trans, 3233 struct extent_buffer *dst, 3234 struct extent_buffer *src) 3235 { 3236 struct btrfs_fs_info *fs_info = trans->fs_info; 3237 int push_items = 0; 3238 int max_push; 3239 int src_nritems; 3240 int dst_nritems; 3241 int ret = 0; 3242 3243 WARN_ON(btrfs_header_generation(src) != trans->transid); 3244 WARN_ON(btrfs_header_generation(dst) != trans->transid); 3245 3246 src_nritems = btrfs_header_nritems(src); 3247 dst_nritems = btrfs_header_nritems(dst); 3248 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 3249 if (push_items <= 0) 3250 return 1; 3251 3252 if (src_nritems < 4) 3253 return 1; 3254 3255 max_push = src_nritems / 2 + 1; 3256 /* don't try to empty the node */ 3257 if (max_push >= src_nritems) 3258 return 1; 3259 3260 if (max_push < push_items) 3261 push_items = max_push; 3262 3263 /* dst is the right eb, src is the middle eb */ 3264 if (check_sibling_keys(src, dst)) { 3265 ret = -EUCLEAN; 3266 btrfs_abort_transaction(trans, ret); 3267 return ret; 3268 } 3269 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems); 3270 BUG_ON(ret < 0); 3271 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 3272 btrfs_node_key_ptr_offset(0), 3273 (dst_nritems) * 3274 sizeof(struct btrfs_key_ptr)); 3275 3276 ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 3277 push_items); 3278 if (ret) { 3279 btrfs_abort_transaction(trans, ret); 3280 return ret; 3281 } 3282 copy_extent_buffer(dst, src, 3283 btrfs_node_key_ptr_offset(0), 3284 btrfs_node_key_ptr_offset(src_nritems - push_items), 3285 push_items * sizeof(struct btrfs_key_ptr)); 3286 3287 btrfs_set_header_nritems(src, src_nritems - push_items); 3288 btrfs_set_header_nritems(dst, dst_nritems + push_items); 3289 3290 btrfs_mark_buffer_dirty(src); 3291 btrfs_mark_buffer_dirty(dst); 3292 3293 return ret; 3294 } 3295 3296 /* 3297 * helper function to insert a new root level in the tree. 3298 * A new node is allocated, and a single item is inserted to 3299 * point to the existing root 3300 * 3301 * returns zero on success or < 0 on failure. 3302 */ 3303 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 3304 struct btrfs_root *root, 3305 struct btrfs_path *path, int level) 3306 { 3307 struct btrfs_fs_info *fs_info = root->fs_info; 3308 u64 lower_gen; 3309 struct extent_buffer *lower; 3310 struct extent_buffer *c; 3311 struct extent_buffer *old; 3312 struct btrfs_disk_key lower_key; 3313 int ret; 3314 3315 BUG_ON(path->nodes[level]); 3316 BUG_ON(path->nodes[level-1] != root->node); 3317 3318 lower = path->nodes[level-1]; 3319 if (level == 1) 3320 btrfs_item_key(lower, &lower_key, 0); 3321 else 3322 btrfs_node_key(lower, &lower_key, 0); 3323 3324 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, 3325 root->node->start, 0, 3326 BTRFS_NESTING_NEW_ROOT); 3327 if (IS_ERR(c)) 3328 return PTR_ERR(c); 3329 3330 root_add_used(root, fs_info->nodesize); 3331 3332 btrfs_set_header_nritems(c, 1); 3333 btrfs_set_node_key(c, &lower_key, 0); 3334 btrfs_set_node_blockptr(c, 0, lower->start); 3335 lower_gen = btrfs_header_generation(lower); 3336 WARN_ON(lower_gen != trans->transid); 3337 3338 btrfs_set_node_ptr_generation(c, 0, lower_gen); 3339 3340 btrfs_mark_buffer_dirty(c); 3341 3342 old = root->node; 3343 ret = tree_mod_log_insert_root(root->node, c, 0); 3344 BUG_ON(ret < 0); 3345 rcu_assign_pointer(root->node, c); 3346 3347 /* the super has an extra ref to root->node */ 3348 free_extent_buffer(old); 3349 3350 add_root_to_dirty_list(root); 3351 atomic_inc(&c->refs); 3352 path->nodes[level] = c; 3353 path->locks[level] = BTRFS_WRITE_LOCK; 3354 path->slots[level] = 0; 3355 return 0; 3356 } 3357 3358 /* 3359 * worker function to insert a single pointer in a node. 3360 * the node should have enough room for the pointer already 3361 * 3362 * slot and level indicate where you want the key to go, and 3363 * blocknr is the block the key points to. 3364 */ 3365 static void insert_ptr(struct btrfs_trans_handle *trans, 3366 struct btrfs_path *path, 3367 struct btrfs_disk_key *key, u64 bytenr, 3368 int slot, int level) 3369 { 3370 struct extent_buffer *lower; 3371 int nritems; 3372 int ret; 3373 3374 BUG_ON(!path->nodes[level]); 3375 btrfs_assert_tree_locked(path->nodes[level]); 3376 lower = path->nodes[level]; 3377 nritems = btrfs_header_nritems(lower); 3378 BUG_ON(slot > nritems); 3379 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 3380 if (slot != nritems) { 3381 if (level) { 3382 ret = tree_mod_log_insert_move(lower, slot + 1, slot, 3383 nritems - slot); 3384 BUG_ON(ret < 0); 3385 } 3386 memmove_extent_buffer(lower, 3387 btrfs_node_key_ptr_offset(slot + 1), 3388 btrfs_node_key_ptr_offset(slot), 3389 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3390 } 3391 if (level) { 3392 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD, 3393 GFP_NOFS); 3394 BUG_ON(ret < 0); 3395 } 3396 btrfs_set_node_key(lower, key, slot); 3397 btrfs_set_node_blockptr(lower, slot, bytenr); 3398 WARN_ON(trans->transid == 0); 3399 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3400 btrfs_set_header_nritems(lower, nritems + 1); 3401 btrfs_mark_buffer_dirty(lower); 3402 } 3403 3404 /* 3405 * split the node at the specified level in path in two. 3406 * The path is corrected to point to the appropriate node after the split 3407 * 3408 * Before splitting this tries to make some room in the node by pushing 3409 * left and right, if either one works, it returns right away. 3410 * 3411 * returns 0 on success and < 0 on failure 3412 */ 3413 static noinline int split_node(struct btrfs_trans_handle *trans, 3414 struct btrfs_root *root, 3415 struct btrfs_path *path, int level) 3416 { 3417 struct btrfs_fs_info *fs_info = root->fs_info; 3418 struct extent_buffer *c; 3419 struct extent_buffer *split; 3420 struct btrfs_disk_key disk_key; 3421 int mid; 3422 int ret; 3423 u32 c_nritems; 3424 3425 c = path->nodes[level]; 3426 WARN_ON(btrfs_header_generation(c) != trans->transid); 3427 if (c == root->node) { 3428 /* 3429 * trying to split the root, lets make a new one 3430 * 3431 * tree mod log: We don't log_removal old root in 3432 * insert_new_root, because that root buffer will be kept as a 3433 * normal node. We are going to log removal of half of the 3434 * elements below with tree_mod_log_eb_copy. We're holding a 3435 * tree lock on the buffer, which is why we cannot race with 3436 * other tree_mod_log users. 3437 */ 3438 ret = insert_new_root(trans, root, path, level + 1); 3439 if (ret) 3440 return ret; 3441 } else { 3442 ret = push_nodes_for_insert(trans, root, path, level); 3443 c = path->nodes[level]; 3444 if (!ret && btrfs_header_nritems(c) < 3445 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3446 return 0; 3447 if (ret < 0) 3448 return ret; 3449 } 3450 3451 c_nritems = btrfs_header_nritems(c); 3452 mid = (c_nritems + 1) / 2; 3453 btrfs_node_key(c, &disk_key, mid); 3454 3455 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level, 3456 c->start, 0, BTRFS_NESTING_SPLIT); 3457 if (IS_ERR(split)) 3458 return PTR_ERR(split); 3459 3460 root_add_used(root, fs_info->nodesize); 3461 ASSERT(btrfs_header_level(c) == level); 3462 3463 ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3464 if (ret) { 3465 btrfs_abort_transaction(trans, ret); 3466 return ret; 3467 } 3468 copy_extent_buffer(split, c, 3469 btrfs_node_key_ptr_offset(0), 3470 btrfs_node_key_ptr_offset(mid), 3471 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3472 btrfs_set_header_nritems(split, c_nritems - mid); 3473 btrfs_set_header_nritems(c, mid); 3474 3475 btrfs_mark_buffer_dirty(c); 3476 btrfs_mark_buffer_dirty(split); 3477 3478 insert_ptr(trans, path, &disk_key, split->start, 3479 path->slots[level + 1] + 1, level + 1); 3480 3481 if (path->slots[level] >= mid) { 3482 path->slots[level] -= mid; 3483 btrfs_tree_unlock(c); 3484 free_extent_buffer(c); 3485 path->nodes[level] = split; 3486 path->slots[level + 1] += 1; 3487 } else { 3488 btrfs_tree_unlock(split); 3489 free_extent_buffer(split); 3490 } 3491 return 0; 3492 } 3493 3494 /* 3495 * how many bytes are required to store the items in a leaf. start 3496 * and nr indicate which items in the leaf to check. This totals up the 3497 * space used both by the item structs and the item data 3498 */ 3499 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 3500 { 3501 struct btrfs_item *start_item; 3502 struct btrfs_item *end_item; 3503 int data_len; 3504 int nritems = btrfs_header_nritems(l); 3505 int end = min(nritems, start + nr) - 1; 3506 3507 if (!nr) 3508 return 0; 3509 start_item = btrfs_item_nr(start); 3510 end_item = btrfs_item_nr(end); 3511 data_len = btrfs_item_offset(l, start_item) + 3512 btrfs_item_size(l, start_item); 3513 data_len = data_len - btrfs_item_offset(l, end_item); 3514 data_len += sizeof(struct btrfs_item) * nr; 3515 WARN_ON(data_len < 0); 3516 return data_len; 3517 } 3518 3519 /* 3520 * The space between the end of the leaf items and 3521 * the start of the leaf data. IOW, how much room 3522 * the leaf has left for both items and data 3523 */ 3524 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf) 3525 { 3526 struct btrfs_fs_info *fs_info = leaf->fs_info; 3527 int nritems = btrfs_header_nritems(leaf); 3528 int ret; 3529 3530 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3531 if (ret < 0) { 3532 btrfs_crit(fs_info, 3533 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3534 ret, 3535 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3536 leaf_space_used(leaf, 0, nritems), nritems); 3537 } 3538 return ret; 3539 } 3540 3541 /* 3542 * min slot controls the lowest index we're willing to push to the 3543 * right. We'll push up to and including min_slot, but no lower 3544 */ 3545 static noinline int __push_leaf_right(struct btrfs_path *path, 3546 int data_size, int empty, 3547 struct extent_buffer *right, 3548 int free_space, u32 left_nritems, 3549 u32 min_slot) 3550 { 3551 struct btrfs_fs_info *fs_info = right->fs_info; 3552 struct extent_buffer *left = path->nodes[0]; 3553 struct extent_buffer *upper = path->nodes[1]; 3554 struct btrfs_map_token token; 3555 struct btrfs_disk_key disk_key; 3556 int slot; 3557 u32 i; 3558 int push_space = 0; 3559 int push_items = 0; 3560 struct btrfs_item *item; 3561 u32 nr; 3562 u32 right_nritems; 3563 u32 data_end; 3564 u32 this_item_size; 3565 3566 if (empty) 3567 nr = 0; 3568 else 3569 nr = max_t(u32, 1, min_slot); 3570 3571 if (path->slots[0] >= left_nritems) 3572 push_space += data_size; 3573 3574 slot = path->slots[1]; 3575 i = left_nritems - 1; 3576 while (i >= nr) { 3577 item = btrfs_item_nr(i); 3578 3579 if (!empty && push_items > 0) { 3580 if (path->slots[0] > i) 3581 break; 3582 if (path->slots[0] == i) { 3583 int space = btrfs_leaf_free_space(left); 3584 3585 if (space + push_space * 2 > free_space) 3586 break; 3587 } 3588 } 3589 3590 if (path->slots[0] == i) 3591 push_space += data_size; 3592 3593 this_item_size = btrfs_item_size(left, item); 3594 if (this_item_size + sizeof(*item) + push_space > free_space) 3595 break; 3596 3597 push_items++; 3598 push_space += this_item_size + sizeof(*item); 3599 if (i == 0) 3600 break; 3601 i--; 3602 } 3603 3604 if (push_items == 0) 3605 goto out_unlock; 3606 3607 WARN_ON(!empty && push_items == left_nritems); 3608 3609 /* push left to right */ 3610 right_nritems = btrfs_header_nritems(right); 3611 3612 push_space = btrfs_item_end_nr(left, left_nritems - push_items); 3613 push_space -= leaf_data_end(left); 3614 3615 /* make room in the right data area */ 3616 data_end = leaf_data_end(right); 3617 memmove_extent_buffer(right, 3618 BTRFS_LEAF_DATA_OFFSET + data_end - push_space, 3619 BTRFS_LEAF_DATA_OFFSET + data_end, 3620 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3621 3622 /* copy from the left data area */ 3623 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET + 3624 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3625 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left), 3626 push_space); 3627 3628 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), 3629 btrfs_item_nr_offset(0), 3630 right_nritems * sizeof(struct btrfs_item)); 3631 3632 /* copy the items from left to right */ 3633 copy_extent_buffer(right, left, btrfs_item_nr_offset(0), 3634 btrfs_item_nr_offset(left_nritems - push_items), 3635 push_items * sizeof(struct btrfs_item)); 3636 3637 /* update the item pointers */ 3638 btrfs_init_map_token(&token, right); 3639 right_nritems += push_items; 3640 btrfs_set_header_nritems(right, right_nritems); 3641 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3642 for (i = 0; i < right_nritems; i++) { 3643 item = btrfs_item_nr(i); 3644 push_space -= btrfs_token_item_size(&token, item); 3645 btrfs_set_token_item_offset(&token, item, push_space); 3646 } 3647 3648 left_nritems -= push_items; 3649 btrfs_set_header_nritems(left, left_nritems); 3650 3651 if (left_nritems) 3652 btrfs_mark_buffer_dirty(left); 3653 else 3654 btrfs_clean_tree_block(left); 3655 3656 btrfs_mark_buffer_dirty(right); 3657 3658 btrfs_item_key(right, &disk_key, 0); 3659 btrfs_set_node_key(upper, &disk_key, slot + 1); 3660 btrfs_mark_buffer_dirty(upper); 3661 3662 /* then fixup the leaf pointer in the path */ 3663 if (path->slots[0] >= left_nritems) { 3664 path->slots[0] -= left_nritems; 3665 if (btrfs_header_nritems(path->nodes[0]) == 0) 3666 btrfs_clean_tree_block(path->nodes[0]); 3667 btrfs_tree_unlock(path->nodes[0]); 3668 free_extent_buffer(path->nodes[0]); 3669 path->nodes[0] = right; 3670 path->slots[1] += 1; 3671 } else { 3672 btrfs_tree_unlock(right); 3673 free_extent_buffer(right); 3674 } 3675 return 0; 3676 3677 out_unlock: 3678 btrfs_tree_unlock(right); 3679 free_extent_buffer(right); 3680 return 1; 3681 } 3682 3683 /* 3684 * push some data in the path leaf to the right, trying to free up at 3685 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3686 * 3687 * returns 1 if the push failed because the other node didn't have enough 3688 * room, 0 if everything worked out and < 0 if there were major errors. 3689 * 3690 * this will push starting from min_slot to the end of the leaf. It won't 3691 * push any slot lower than min_slot 3692 */ 3693 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3694 *root, struct btrfs_path *path, 3695 int min_data_size, int data_size, 3696 int empty, u32 min_slot) 3697 { 3698 struct extent_buffer *left = path->nodes[0]; 3699 struct extent_buffer *right; 3700 struct extent_buffer *upper; 3701 int slot; 3702 int free_space; 3703 u32 left_nritems; 3704 int ret; 3705 3706 if (!path->nodes[1]) 3707 return 1; 3708 3709 slot = path->slots[1]; 3710 upper = path->nodes[1]; 3711 if (slot >= btrfs_header_nritems(upper) - 1) 3712 return 1; 3713 3714 btrfs_assert_tree_locked(path->nodes[1]); 3715 3716 right = btrfs_read_node_slot(upper, slot + 1); 3717 /* 3718 * slot + 1 is not valid or we fail to read the right node, 3719 * no big deal, just return. 3720 */ 3721 if (IS_ERR(right)) 3722 return 1; 3723 3724 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3725 3726 free_space = btrfs_leaf_free_space(right); 3727 if (free_space < data_size) 3728 goto out_unlock; 3729 3730 /* cow and double check */ 3731 ret = btrfs_cow_block(trans, root, right, upper, 3732 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3733 if (ret) 3734 goto out_unlock; 3735 3736 free_space = btrfs_leaf_free_space(right); 3737 if (free_space < data_size) 3738 goto out_unlock; 3739 3740 left_nritems = btrfs_header_nritems(left); 3741 if (left_nritems == 0) 3742 goto out_unlock; 3743 3744 if (check_sibling_keys(left, right)) { 3745 ret = -EUCLEAN; 3746 btrfs_tree_unlock(right); 3747 free_extent_buffer(right); 3748 return ret; 3749 } 3750 if (path->slots[0] == left_nritems && !empty) { 3751 /* Key greater than all keys in the leaf, right neighbor has 3752 * enough room for it and we're not emptying our leaf to delete 3753 * it, therefore use right neighbor to insert the new item and 3754 * no need to touch/dirty our left leaf. */ 3755 btrfs_tree_unlock(left); 3756 free_extent_buffer(left); 3757 path->nodes[0] = right; 3758 path->slots[0] = 0; 3759 path->slots[1]++; 3760 return 0; 3761 } 3762 3763 return __push_leaf_right(path, min_data_size, empty, 3764 right, free_space, left_nritems, min_slot); 3765 out_unlock: 3766 btrfs_tree_unlock(right); 3767 free_extent_buffer(right); 3768 return 1; 3769 } 3770 3771 /* 3772 * push some data in the path leaf to the left, trying to free up at 3773 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3774 * 3775 * max_slot can put a limit on how far into the leaf we'll push items. The 3776 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3777 * items 3778 */ 3779 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, 3780 int empty, struct extent_buffer *left, 3781 int free_space, u32 right_nritems, 3782 u32 max_slot) 3783 { 3784 struct btrfs_fs_info *fs_info = left->fs_info; 3785 struct btrfs_disk_key disk_key; 3786 struct extent_buffer *right = path->nodes[0]; 3787 int i; 3788 int push_space = 0; 3789 int push_items = 0; 3790 struct btrfs_item *item; 3791 u32 old_left_nritems; 3792 u32 nr; 3793 int ret = 0; 3794 u32 this_item_size; 3795 u32 old_left_item_size; 3796 struct btrfs_map_token token; 3797 3798 if (empty) 3799 nr = min(right_nritems, max_slot); 3800 else 3801 nr = min(right_nritems - 1, max_slot); 3802 3803 for (i = 0; i < nr; i++) { 3804 item = btrfs_item_nr(i); 3805 3806 if (!empty && push_items > 0) { 3807 if (path->slots[0] < i) 3808 break; 3809 if (path->slots[0] == i) { 3810 int space = btrfs_leaf_free_space(right); 3811 3812 if (space + push_space * 2 > free_space) 3813 break; 3814 } 3815 } 3816 3817 if (path->slots[0] == i) 3818 push_space += data_size; 3819 3820 this_item_size = btrfs_item_size(right, item); 3821 if (this_item_size + sizeof(*item) + push_space > free_space) 3822 break; 3823 3824 push_items++; 3825 push_space += this_item_size + sizeof(*item); 3826 } 3827 3828 if (push_items == 0) { 3829 ret = 1; 3830 goto out; 3831 } 3832 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3833 3834 /* push data from right to left */ 3835 copy_extent_buffer(left, right, 3836 btrfs_item_nr_offset(btrfs_header_nritems(left)), 3837 btrfs_item_nr_offset(0), 3838 push_items * sizeof(struct btrfs_item)); 3839 3840 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3841 btrfs_item_offset_nr(right, push_items - 1); 3842 3843 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET + 3844 leaf_data_end(left) - push_space, 3845 BTRFS_LEAF_DATA_OFFSET + 3846 btrfs_item_offset_nr(right, push_items - 1), 3847 push_space); 3848 old_left_nritems = btrfs_header_nritems(left); 3849 BUG_ON(old_left_nritems <= 0); 3850 3851 btrfs_init_map_token(&token, left); 3852 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); 3853 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3854 u32 ioff; 3855 3856 item = btrfs_item_nr(i); 3857 3858 ioff = btrfs_token_item_offset(&token, item); 3859 btrfs_set_token_item_offset(&token, item, 3860 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3861 } 3862 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3863 3864 /* fixup right node */ 3865 if (push_items > right_nritems) 3866 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3867 right_nritems); 3868 3869 if (push_items < right_nritems) { 3870 push_space = btrfs_item_offset_nr(right, push_items - 1) - 3871 leaf_data_end(right); 3872 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET + 3873 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3874 BTRFS_LEAF_DATA_OFFSET + 3875 leaf_data_end(right), push_space); 3876 3877 memmove_extent_buffer(right, btrfs_item_nr_offset(0), 3878 btrfs_item_nr_offset(push_items), 3879 (btrfs_header_nritems(right) - push_items) * 3880 sizeof(struct btrfs_item)); 3881 } 3882 3883 btrfs_init_map_token(&token, right); 3884 right_nritems -= push_items; 3885 btrfs_set_header_nritems(right, right_nritems); 3886 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3887 for (i = 0; i < right_nritems; i++) { 3888 item = btrfs_item_nr(i); 3889 3890 push_space = push_space - btrfs_token_item_size(&token, item); 3891 btrfs_set_token_item_offset(&token, item, push_space); 3892 } 3893 3894 btrfs_mark_buffer_dirty(left); 3895 if (right_nritems) 3896 btrfs_mark_buffer_dirty(right); 3897 else 3898 btrfs_clean_tree_block(right); 3899 3900 btrfs_item_key(right, &disk_key, 0); 3901 fixup_low_keys(path, &disk_key, 1); 3902 3903 /* then fixup the leaf pointer in the path */ 3904 if (path->slots[0] < push_items) { 3905 path->slots[0] += old_left_nritems; 3906 btrfs_tree_unlock(path->nodes[0]); 3907 free_extent_buffer(path->nodes[0]); 3908 path->nodes[0] = left; 3909 path->slots[1] -= 1; 3910 } else { 3911 btrfs_tree_unlock(left); 3912 free_extent_buffer(left); 3913 path->slots[0] -= push_items; 3914 } 3915 BUG_ON(path->slots[0] < 0); 3916 return ret; 3917 out: 3918 btrfs_tree_unlock(left); 3919 free_extent_buffer(left); 3920 return ret; 3921 } 3922 3923 /* 3924 * push some data in the path leaf to the left, trying to free up at 3925 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3926 * 3927 * max_slot can put a limit on how far into the leaf we'll push items. The 3928 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3929 * items 3930 */ 3931 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3932 *root, struct btrfs_path *path, int min_data_size, 3933 int data_size, int empty, u32 max_slot) 3934 { 3935 struct extent_buffer *right = path->nodes[0]; 3936 struct extent_buffer *left; 3937 int slot; 3938 int free_space; 3939 u32 right_nritems; 3940 int ret = 0; 3941 3942 slot = path->slots[1]; 3943 if (slot == 0) 3944 return 1; 3945 if (!path->nodes[1]) 3946 return 1; 3947 3948 right_nritems = btrfs_header_nritems(right); 3949 if (right_nritems == 0) 3950 return 1; 3951 3952 btrfs_assert_tree_locked(path->nodes[1]); 3953 3954 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3955 /* 3956 * slot - 1 is not valid or we fail to read the left node, 3957 * no big deal, just return. 3958 */ 3959 if (IS_ERR(left)) 3960 return 1; 3961 3962 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3963 3964 free_space = btrfs_leaf_free_space(left); 3965 if (free_space < data_size) { 3966 ret = 1; 3967 goto out; 3968 } 3969 3970 /* cow and double check */ 3971 ret = btrfs_cow_block(trans, root, left, 3972 path->nodes[1], slot - 1, &left, 3973 BTRFS_NESTING_LEFT_COW); 3974 if (ret) { 3975 /* we hit -ENOSPC, but it isn't fatal here */ 3976 if (ret == -ENOSPC) 3977 ret = 1; 3978 goto out; 3979 } 3980 3981 free_space = btrfs_leaf_free_space(left); 3982 if (free_space < data_size) { 3983 ret = 1; 3984 goto out; 3985 } 3986 3987 if (check_sibling_keys(left, right)) { 3988 ret = -EUCLEAN; 3989 goto out; 3990 } 3991 return __push_leaf_left(path, min_data_size, 3992 empty, left, free_space, right_nritems, 3993 max_slot); 3994 out: 3995 btrfs_tree_unlock(left); 3996 free_extent_buffer(left); 3997 return ret; 3998 } 3999 4000 /* 4001 * split the path's leaf in two, making sure there is at least data_size 4002 * available for the resulting leaf level of the path. 4003 */ 4004 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 4005 struct btrfs_path *path, 4006 struct extent_buffer *l, 4007 struct extent_buffer *right, 4008 int slot, int mid, int nritems) 4009 { 4010 struct btrfs_fs_info *fs_info = trans->fs_info; 4011 int data_copy_size; 4012 int rt_data_off; 4013 int i; 4014 struct btrfs_disk_key disk_key; 4015 struct btrfs_map_token token; 4016 4017 nritems = nritems - mid; 4018 btrfs_set_header_nritems(right, nritems); 4019 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l); 4020 4021 copy_extent_buffer(right, l, btrfs_item_nr_offset(0), 4022 btrfs_item_nr_offset(mid), 4023 nritems * sizeof(struct btrfs_item)); 4024 4025 copy_extent_buffer(right, l, 4026 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) - 4027 data_copy_size, BTRFS_LEAF_DATA_OFFSET + 4028 leaf_data_end(l), data_copy_size); 4029 4030 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid); 4031 4032 btrfs_init_map_token(&token, right); 4033 for (i = 0; i < nritems; i++) { 4034 struct btrfs_item *item = btrfs_item_nr(i); 4035 u32 ioff; 4036 4037 ioff = btrfs_token_item_offset(&token, item); 4038 btrfs_set_token_item_offset(&token, item, ioff + rt_data_off); 4039 } 4040 4041 btrfs_set_header_nritems(l, mid); 4042 btrfs_item_key(right, &disk_key, 0); 4043 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 4044 4045 btrfs_mark_buffer_dirty(right); 4046 btrfs_mark_buffer_dirty(l); 4047 BUG_ON(path->slots[0] != slot); 4048 4049 if (mid <= slot) { 4050 btrfs_tree_unlock(path->nodes[0]); 4051 free_extent_buffer(path->nodes[0]); 4052 path->nodes[0] = right; 4053 path->slots[0] -= mid; 4054 path->slots[1] += 1; 4055 } else { 4056 btrfs_tree_unlock(right); 4057 free_extent_buffer(right); 4058 } 4059 4060 BUG_ON(path->slots[0] < 0); 4061 } 4062 4063 /* 4064 * double splits happen when we need to insert a big item in the middle 4065 * of a leaf. A double split can leave us with 3 mostly empty leaves: 4066 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 4067 * A B C 4068 * 4069 * We avoid this by trying to push the items on either side of our target 4070 * into the adjacent leaves. If all goes well we can avoid the double split 4071 * completely. 4072 */ 4073 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 4074 struct btrfs_root *root, 4075 struct btrfs_path *path, 4076 int data_size) 4077 { 4078 int ret; 4079 int progress = 0; 4080 int slot; 4081 u32 nritems; 4082 int space_needed = data_size; 4083 4084 slot = path->slots[0]; 4085 if (slot < btrfs_header_nritems(path->nodes[0])) 4086 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 4087 4088 /* 4089 * try to push all the items after our slot into the 4090 * right leaf 4091 */ 4092 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 4093 if (ret < 0) 4094 return ret; 4095 4096 if (ret == 0) 4097 progress++; 4098 4099 nritems = btrfs_header_nritems(path->nodes[0]); 4100 /* 4101 * our goal is to get our slot at the start or end of a leaf. If 4102 * we've done so we're done 4103 */ 4104 if (path->slots[0] == 0 || path->slots[0] == nritems) 4105 return 0; 4106 4107 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 4108 return 0; 4109 4110 /* try to push all the items before our slot into the next leaf */ 4111 slot = path->slots[0]; 4112 space_needed = data_size; 4113 if (slot > 0) 4114 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 4115 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 4116 if (ret < 0) 4117 return ret; 4118 4119 if (ret == 0) 4120 progress++; 4121 4122 if (progress) 4123 return 0; 4124 return 1; 4125 } 4126 4127 /* 4128 * split the path's leaf in two, making sure there is at least data_size 4129 * available for the resulting leaf level of the path. 4130 * 4131 * returns 0 if all went well and < 0 on failure. 4132 */ 4133 static noinline int split_leaf(struct btrfs_trans_handle *trans, 4134 struct btrfs_root *root, 4135 const struct btrfs_key *ins_key, 4136 struct btrfs_path *path, int data_size, 4137 int extend) 4138 { 4139 struct btrfs_disk_key disk_key; 4140 struct extent_buffer *l; 4141 u32 nritems; 4142 int mid; 4143 int slot; 4144 struct extent_buffer *right; 4145 struct btrfs_fs_info *fs_info = root->fs_info; 4146 int ret = 0; 4147 int wret; 4148 int split; 4149 int num_doubles = 0; 4150 int tried_avoid_double = 0; 4151 4152 l = path->nodes[0]; 4153 slot = path->slots[0]; 4154 if (extend && data_size + btrfs_item_size_nr(l, slot) + 4155 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 4156 return -EOVERFLOW; 4157 4158 /* first try to make some room by pushing left and right */ 4159 if (data_size && path->nodes[1]) { 4160 int space_needed = data_size; 4161 4162 if (slot < btrfs_header_nritems(l)) 4163 space_needed -= btrfs_leaf_free_space(l); 4164 4165 wret = push_leaf_right(trans, root, path, space_needed, 4166 space_needed, 0, 0); 4167 if (wret < 0) 4168 return wret; 4169 if (wret) { 4170 space_needed = data_size; 4171 if (slot > 0) 4172 space_needed -= btrfs_leaf_free_space(l); 4173 wret = push_leaf_left(trans, root, path, space_needed, 4174 space_needed, 0, (u32)-1); 4175 if (wret < 0) 4176 return wret; 4177 } 4178 l = path->nodes[0]; 4179 4180 /* did the pushes work? */ 4181 if (btrfs_leaf_free_space(l) >= data_size) 4182 return 0; 4183 } 4184 4185 if (!path->nodes[1]) { 4186 ret = insert_new_root(trans, root, path, 1); 4187 if (ret) 4188 return ret; 4189 } 4190 again: 4191 split = 1; 4192 l = path->nodes[0]; 4193 slot = path->slots[0]; 4194 nritems = btrfs_header_nritems(l); 4195 mid = (nritems + 1) / 2; 4196 4197 if (mid <= slot) { 4198 if (nritems == 1 || 4199 leaf_space_used(l, mid, nritems - mid) + data_size > 4200 BTRFS_LEAF_DATA_SIZE(fs_info)) { 4201 if (slot >= nritems) { 4202 split = 0; 4203 } else { 4204 mid = slot; 4205 if (mid != nritems && 4206 leaf_space_used(l, mid, nritems - mid) + 4207 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 4208 if (data_size && !tried_avoid_double) 4209 goto push_for_double; 4210 split = 2; 4211 } 4212 } 4213 } 4214 } else { 4215 if (leaf_space_used(l, 0, mid) + data_size > 4216 BTRFS_LEAF_DATA_SIZE(fs_info)) { 4217 if (!extend && data_size && slot == 0) { 4218 split = 0; 4219 } else if ((extend || !data_size) && slot == 0) { 4220 mid = 1; 4221 } else { 4222 mid = slot; 4223 if (mid != nritems && 4224 leaf_space_used(l, mid, nritems - mid) + 4225 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 4226 if (data_size && !tried_avoid_double) 4227 goto push_for_double; 4228 split = 2; 4229 } 4230 } 4231 } 4232 } 4233 4234 if (split == 0) 4235 btrfs_cpu_key_to_disk(&disk_key, ins_key); 4236 else 4237 btrfs_item_key(l, &disk_key, mid); 4238 4239 /* 4240 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 4241 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 4242 * subclasses, which is 8 at the time of this patch, and we've maxed it 4243 * out. In the future we could add a 4244 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 4245 * use BTRFS_NESTING_NEW_ROOT. 4246 */ 4247 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0, 4248 l->start, 0, num_doubles ? 4249 BTRFS_NESTING_NEW_ROOT : 4250 BTRFS_NESTING_SPLIT); 4251 if (IS_ERR(right)) 4252 return PTR_ERR(right); 4253 4254 root_add_used(root, fs_info->nodesize); 4255 4256 if (split == 0) { 4257 if (mid <= slot) { 4258 btrfs_set_header_nritems(right, 0); 4259 insert_ptr(trans, path, &disk_key, 4260 right->start, path->slots[1] + 1, 1); 4261 btrfs_tree_unlock(path->nodes[0]); 4262 free_extent_buffer(path->nodes[0]); 4263 path->nodes[0] = right; 4264 path->slots[0] = 0; 4265 path->slots[1] += 1; 4266 } else { 4267 btrfs_set_header_nritems(right, 0); 4268 insert_ptr(trans, path, &disk_key, 4269 right->start, path->slots[1], 1); 4270 btrfs_tree_unlock(path->nodes[0]); 4271 free_extent_buffer(path->nodes[0]); 4272 path->nodes[0] = right; 4273 path->slots[0] = 0; 4274 if (path->slots[1] == 0) 4275 fixup_low_keys(path, &disk_key, 1); 4276 } 4277 /* 4278 * We create a new leaf 'right' for the required ins_len and 4279 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 4280 * the content of ins_len to 'right'. 4281 */ 4282 return ret; 4283 } 4284 4285 copy_for_split(trans, path, l, right, slot, mid, nritems); 4286 4287 if (split == 2) { 4288 BUG_ON(num_doubles != 0); 4289 num_doubles++; 4290 goto again; 4291 } 4292 4293 return 0; 4294 4295 push_for_double: 4296 push_for_double_split(trans, root, path, data_size); 4297 tried_avoid_double = 1; 4298 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 4299 return 0; 4300 goto again; 4301 } 4302 4303 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 4304 struct btrfs_root *root, 4305 struct btrfs_path *path, int ins_len) 4306 { 4307 struct btrfs_key key; 4308 struct extent_buffer *leaf; 4309 struct btrfs_file_extent_item *fi; 4310 u64 extent_len = 0; 4311 u32 item_size; 4312 int ret; 4313 4314 leaf = path->nodes[0]; 4315 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4316 4317 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 4318 key.type != BTRFS_EXTENT_CSUM_KEY); 4319 4320 if (btrfs_leaf_free_space(leaf) >= ins_len) 4321 return 0; 4322 4323 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4324 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4325 fi = btrfs_item_ptr(leaf, path->slots[0], 4326 struct btrfs_file_extent_item); 4327 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 4328 } 4329 btrfs_release_path(path); 4330 4331 path->keep_locks = 1; 4332 path->search_for_split = 1; 4333 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 4334 path->search_for_split = 0; 4335 if (ret > 0) 4336 ret = -EAGAIN; 4337 if (ret < 0) 4338 goto err; 4339 4340 ret = -EAGAIN; 4341 leaf = path->nodes[0]; 4342 /* if our item isn't there, return now */ 4343 if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) 4344 goto err; 4345 4346 /* the leaf has changed, it now has room. return now */ 4347 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 4348 goto err; 4349 4350 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4351 fi = btrfs_item_ptr(leaf, path->slots[0], 4352 struct btrfs_file_extent_item); 4353 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 4354 goto err; 4355 } 4356 4357 ret = split_leaf(trans, root, &key, path, ins_len, 1); 4358 if (ret) 4359 goto err; 4360 4361 path->keep_locks = 0; 4362 btrfs_unlock_up_safe(path, 1); 4363 return 0; 4364 err: 4365 path->keep_locks = 0; 4366 return ret; 4367 } 4368 4369 static noinline int split_item(struct btrfs_path *path, 4370 const struct btrfs_key *new_key, 4371 unsigned long split_offset) 4372 { 4373 struct extent_buffer *leaf; 4374 struct btrfs_item *item; 4375 struct btrfs_item *new_item; 4376 int slot; 4377 char *buf; 4378 u32 nritems; 4379 u32 item_size; 4380 u32 orig_offset; 4381 struct btrfs_disk_key disk_key; 4382 4383 leaf = path->nodes[0]; 4384 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 4385 4386 item = btrfs_item_nr(path->slots[0]); 4387 orig_offset = btrfs_item_offset(leaf, item); 4388 item_size = btrfs_item_size(leaf, item); 4389 4390 buf = kmalloc(item_size, GFP_NOFS); 4391 if (!buf) 4392 return -ENOMEM; 4393 4394 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 4395 path->slots[0]), item_size); 4396 4397 slot = path->slots[0] + 1; 4398 nritems = btrfs_header_nritems(leaf); 4399 if (slot != nritems) { 4400 /* shift the items */ 4401 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), 4402 btrfs_item_nr_offset(slot), 4403 (nritems - slot) * sizeof(struct btrfs_item)); 4404 } 4405 4406 btrfs_cpu_key_to_disk(&disk_key, new_key); 4407 btrfs_set_item_key(leaf, &disk_key, slot); 4408 4409 new_item = btrfs_item_nr(slot); 4410 4411 btrfs_set_item_offset(leaf, new_item, orig_offset); 4412 btrfs_set_item_size(leaf, new_item, item_size - split_offset); 4413 4414 btrfs_set_item_offset(leaf, item, 4415 orig_offset + item_size - split_offset); 4416 btrfs_set_item_size(leaf, item, split_offset); 4417 4418 btrfs_set_header_nritems(leaf, nritems + 1); 4419 4420 /* write the data for the start of the original item */ 4421 write_extent_buffer(leaf, buf, 4422 btrfs_item_ptr_offset(leaf, path->slots[0]), 4423 split_offset); 4424 4425 /* write the data for the new item */ 4426 write_extent_buffer(leaf, buf + split_offset, 4427 btrfs_item_ptr_offset(leaf, slot), 4428 item_size - split_offset); 4429 btrfs_mark_buffer_dirty(leaf); 4430 4431 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 4432 kfree(buf); 4433 return 0; 4434 } 4435 4436 /* 4437 * This function splits a single item into two items, 4438 * giving 'new_key' to the new item and splitting the 4439 * old one at split_offset (from the start of the item). 4440 * 4441 * The path may be released by this operation. After 4442 * the split, the path is pointing to the old item. The 4443 * new item is going to be in the same node as the old one. 4444 * 4445 * Note, the item being split must be smaller enough to live alone on 4446 * a tree block with room for one extra struct btrfs_item 4447 * 4448 * This allows us to split the item in place, keeping a lock on the 4449 * leaf the entire time. 4450 */ 4451 int btrfs_split_item(struct btrfs_trans_handle *trans, 4452 struct btrfs_root *root, 4453 struct btrfs_path *path, 4454 const struct btrfs_key *new_key, 4455 unsigned long split_offset) 4456 { 4457 int ret; 4458 ret = setup_leaf_for_split(trans, root, path, 4459 sizeof(struct btrfs_item)); 4460 if (ret) 4461 return ret; 4462 4463 ret = split_item(path, new_key, split_offset); 4464 return ret; 4465 } 4466 4467 /* 4468 * This function duplicate a item, giving 'new_key' to the new item. 4469 * It guarantees both items live in the same tree leaf and the new item 4470 * is contiguous with the original item. 4471 * 4472 * This allows us to split file extent in place, keeping a lock on the 4473 * leaf the entire time. 4474 */ 4475 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4476 struct btrfs_root *root, 4477 struct btrfs_path *path, 4478 const struct btrfs_key *new_key) 4479 { 4480 struct extent_buffer *leaf; 4481 int ret; 4482 u32 item_size; 4483 4484 leaf = path->nodes[0]; 4485 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4486 ret = setup_leaf_for_split(trans, root, path, 4487 item_size + sizeof(struct btrfs_item)); 4488 if (ret) 4489 return ret; 4490 4491 path->slots[0]++; 4492 setup_items_for_insert(root, path, new_key, &item_size, 1); 4493 leaf = path->nodes[0]; 4494 memcpy_extent_buffer(leaf, 4495 btrfs_item_ptr_offset(leaf, path->slots[0]), 4496 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4497 item_size); 4498 return 0; 4499 } 4500 4501 /* 4502 * make the item pointed to by the path smaller. new_size indicates 4503 * how small to make it, and from_end tells us if we just chop bytes 4504 * off the end of the item or if we shift the item to chop bytes off 4505 * the front. 4506 */ 4507 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 4508 { 4509 int slot; 4510 struct extent_buffer *leaf; 4511 struct btrfs_item *item; 4512 u32 nritems; 4513 unsigned int data_end; 4514 unsigned int old_data_start; 4515 unsigned int old_size; 4516 unsigned int size_diff; 4517 int i; 4518 struct btrfs_map_token token; 4519 4520 leaf = path->nodes[0]; 4521 slot = path->slots[0]; 4522 4523 old_size = btrfs_item_size_nr(leaf, slot); 4524 if (old_size == new_size) 4525 return; 4526 4527 nritems = btrfs_header_nritems(leaf); 4528 data_end = leaf_data_end(leaf); 4529 4530 old_data_start = btrfs_item_offset_nr(leaf, slot); 4531 4532 size_diff = old_size - new_size; 4533 4534 BUG_ON(slot < 0); 4535 BUG_ON(slot >= nritems); 4536 4537 /* 4538 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4539 */ 4540 /* first correct the data pointers */ 4541 btrfs_init_map_token(&token, leaf); 4542 for (i = slot; i < nritems; i++) { 4543 u32 ioff; 4544 item = btrfs_item_nr(i); 4545 4546 ioff = btrfs_token_item_offset(&token, item); 4547 btrfs_set_token_item_offset(&token, item, ioff + size_diff); 4548 } 4549 4550 /* shift the data */ 4551 if (from_end) { 4552 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4553 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET + 4554 data_end, old_data_start + new_size - data_end); 4555 } else { 4556 struct btrfs_disk_key disk_key; 4557 u64 offset; 4558 4559 btrfs_item_key(leaf, &disk_key, slot); 4560 4561 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4562 unsigned long ptr; 4563 struct btrfs_file_extent_item *fi; 4564 4565 fi = btrfs_item_ptr(leaf, slot, 4566 struct btrfs_file_extent_item); 4567 fi = (struct btrfs_file_extent_item *)( 4568 (unsigned long)fi - size_diff); 4569 4570 if (btrfs_file_extent_type(leaf, fi) == 4571 BTRFS_FILE_EXTENT_INLINE) { 4572 ptr = btrfs_item_ptr_offset(leaf, slot); 4573 memmove_extent_buffer(leaf, ptr, 4574 (unsigned long)fi, 4575 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4576 } 4577 } 4578 4579 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4580 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET + 4581 data_end, old_data_start - data_end); 4582 4583 offset = btrfs_disk_key_offset(&disk_key); 4584 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4585 btrfs_set_item_key(leaf, &disk_key, slot); 4586 if (slot == 0) 4587 fixup_low_keys(path, &disk_key, 1); 4588 } 4589 4590 item = btrfs_item_nr(slot); 4591 btrfs_set_item_size(leaf, item, new_size); 4592 btrfs_mark_buffer_dirty(leaf); 4593 4594 if (btrfs_leaf_free_space(leaf) < 0) { 4595 btrfs_print_leaf(leaf); 4596 BUG(); 4597 } 4598 } 4599 4600 /* 4601 * make the item pointed to by the path bigger, data_size is the added size. 4602 */ 4603 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4604 { 4605 int slot; 4606 struct extent_buffer *leaf; 4607 struct btrfs_item *item; 4608 u32 nritems; 4609 unsigned int data_end; 4610 unsigned int old_data; 4611 unsigned int old_size; 4612 int i; 4613 struct btrfs_map_token token; 4614 4615 leaf = path->nodes[0]; 4616 4617 nritems = btrfs_header_nritems(leaf); 4618 data_end = leaf_data_end(leaf); 4619 4620 if (btrfs_leaf_free_space(leaf) < data_size) { 4621 btrfs_print_leaf(leaf); 4622 BUG(); 4623 } 4624 slot = path->slots[0]; 4625 old_data = btrfs_item_end_nr(leaf, slot); 4626 4627 BUG_ON(slot < 0); 4628 if (slot >= nritems) { 4629 btrfs_print_leaf(leaf); 4630 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4631 slot, nritems); 4632 BUG(); 4633 } 4634 4635 /* 4636 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4637 */ 4638 /* first correct the data pointers */ 4639 btrfs_init_map_token(&token, leaf); 4640 for (i = slot; i < nritems; i++) { 4641 u32 ioff; 4642 item = btrfs_item_nr(i); 4643 4644 ioff = btrfs_token_item_offset(&token, item); 4645 btrfs_set_token_item_offset(&token, item, ioff - data_size); 4646 } 4647 4648 /* shift the data */ 4649 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4650 data_end - data_size, BTRFS_LEAF_DATA_OFFSET + 4651 data_end, old_data - data_end); 4652 4653 data_end = old_data; 4654 old_size = btrfs_item_size_nr(leaf, slot); 4655 item = btrfs_item_nr(slot); 4656 btrfs_set_item_size(leaf, item, old_size + data_size); 4657 btrfs_mark_buffer_dirty(leaf); 4658 4659 if (btrfs_leaf_free_space(leaf) < 0) { 4660 btrfs_print_leaf(leaf); 4661 BUG(); 4662 } 4663 } 4664 4665 /** 4666 * setup_items_for_insert - Helper called before inserting one or more items 4667 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work 4668 * in a function that doesn't call btrfs_search_slot 4669 * 4670 * @root: root we are inserting items to 4671 * @path: points to the leaf/slot where we are going to insert new items 4672 * @cpu_key: array of keys for items to be inserted 4673 * @data_size: size of the body of each item we are going to insert 4674 * @nr: size of @cpu_key/@data_size arrays 4675 */ 4676 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4677 const struct btrfs_key *cpu_key, u32 *data_size, 4678 int nr) 4679 { 4680 struct btrfs_fs_info *fs_info = root->fs_info; 4681 struct btrfs_item *item; 4682 int i; 4683 u32 nritems; 4684 unsigned int data_end; 4685 struct btrfs_disk_key disk_key; 4686 struct extent_buffer *leaf; 4687 int slot; 4688 struct btrfs_map_token token; 4689 u32 total_size; 4690 u32 total_data = 0; 4691 4692 for (i = 0; i < nr; i++) 4693 total_data += data_size[i]; 4694 total_size = total_data + (nr * sizeof(struct btrfs_item)); 4695 4696 if (path->slots[0] == 0) { 4697 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 4698 fixup_low_keys(path, &disk_key, 1); 4699 } 4700 btrfs_unlock_up_safe(path, 1); 4701 4702 leaf = path->nodes[0]; 4703 slot = path->slots[0]; 4704 4705 nritems = btrfs_header_nritems(leaf); 4706 data_end = leaf_data_end(leaf); 4707 4708 if (btrfs_leaf_free_space(leaf) < total_size) { 4709 btrfs_print_leaf(leaf); 4710 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4711 total_size, btrfs_leaf_free_space(leaf)); 4712 BUG(); 4713 } 4714 4715 btrfs_init_map_token(&token, leaf); 4716 if (slot != nritems) { 4717 unsigned int old_data = btrfs_item_end_nr(leaf, slot); 4718 4719 if (old_data < data_end) { 4720 btrfs_print_leaf(leaf); 4721 btrfs_crit(fs_info, 4722 "item at slot %d with data offset %u beyond data end of leaf %u", 4723 slot, old_data, data_end); 4724 BUG(); 4725 } 4726 /* 4727 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4728 */ 4729 /* first correct the data pointers */ 4730 for (i = slot; i < nritems; i++) { 4731 u32 ioff; 4732 4733 item = btrfs_item_nr(i); 4734 ioff = btrfs_token_item_offset(&token, item); 4735 btrfs_set_token_item_offset(&token, item, 4736 ioff - total_data); 4737 } 4738 /* shift the items */ 4739 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 4740 btrfs_item_nr_offset(slot), 4741 (nritems - slot) * sizeof(struct btrfs_item)); 4742 4743 /* shift the data */ 4744 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4745 data_end - total_data, BTRFS_LEAF_DATA_OFFSET + 4746 data_end, old_data - data_end); 4747 data_end = old_data; 4748 } 4749 4750 /* setup the item for the new data */ 4751 for (i = 0; i < nr; i++) { 4752 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 4753 btrfs_set_item_key(leaf, &disk_key, slot + i); 4754 item = btrfs_item_nr(slot + i); 4755 data_end -= data_size[i]; 4756 btrfs_set_token_item_offset(&token, item, data_end); 4757 btrfs_set_token_item_size(&token, item, data_size[i]); 4758 } 4759 4760 btrfs_set_header_nritems(leaf, nritems + nr); 4761 btrfs_mark_buffer_dirty(leaf); 4762 4763 if (btrfs_leaf_free_space(leaf) < 0) { 4764 btrfs_print_leaf(leaf); 4765 BUG(); 4766 } 4767 } 4768 4769 /* 4770 * Given a key and some data, insert items into the tree. 4771 * This does all the path init required, making room in the tree if needed. 4772 */ 4773 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4774 struct btrfs_root *root, 4775 struct btrfs_path *path, 4776 const struct btrfs_key *cpu_key, u32 *data_size, 4777 int nr) 4778 { 4779 int ret = 0; 4780 int slot; 4781 int i; 4782 u32 total_size = 0; 4783 u32 total_data = 0; 4784 4785 for (i = 0; i < nr; i++) 4786 total_data += data_size[i]; 4787 4788 total_size = total_data + (nr * sizeof(struct btrfs_item)); 4789 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); 4790 if (ret == 0) 4791 return -EEXIST; 4792 if (ret < 0) 4793 return ret; 4794 4795 slot = path->slots[0]; 4796 BUG_ON(slot < 0); 4797 4798 setup_items_for_insert(root, path, cpu_key, data_size, nr); 4799 return 0; 4800 } 4801 4802 /* 4803 * Given a key and some data, insert an item into the tree. 4804 * This does all the path init required, making room in the tree if needed. 4805 */ 4806 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4807 const struct btrfs_key *cpu_key, void *data, 4808 u32 data_size) 4809 { 4810 int ret = 0; 4811 struct btrfs_path *path; 4812 struct extent_buffer *leaf; 4813 unsigned long ptr; 4814 4815 path = btrfs_alloc_path(); 4816 if (!path) 4817 return -ENOMEM; 4818 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4819 if (!ret) { 4820 leaf = path->nodes[0]; 4821 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4822 write_extent_buffer(leaf, data, ptr, data_size); 4823 btrfs_mark_buffer_dirty(leaf); 4824 } 4825 btrfs_free_path(path); 4826 return ret; 4827 } 4828 4829 /* 4830 * delete the pointer from a given node. 4831 * 4832 * the tree should have been previously balanced so the deletion does not 4833 * empty a node. 4834 */ 4835 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 4836 int level, int slot) 4837 { 4838 struct extent_buffer *parent = path->nodes[level]; 4839 u32 nritems; 4840 int ret; 4841 4842 nritems = btrfs_header_nritems(parent); 4843 if (slot != nritems - 1) { 4844 if (level) { 4845 ret = tree_mod_log_insert_move(parent, slot, slot + 1, 4846 nritems - slot - 1); 4847 BUG_ON(ret < 0); 4848 } 4849 memmove_extent_buffer(parent, 4850 btrfs_node_key_ptr_offset(slot), 4851 btrfs_node_key_ptr_offset(slot + 1), 4852 sizeof(struct btrfs_key_ptr) * 4853 (nritems - slot - 1)); 4854 } else if (level) { 4855 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE, 4856 GFP_NOFS); 4857 BUG_ON(ret < 0); 4858 } 4859 4860 nritems--; 4861 btrfs_set_header_nritems(parent, nritems); 4862 if (nritems == 0 && parent == root->node) { 4863 BUG_ON(btrfs_header_level(root->node) != 1); 4864 /* just turn the root into a leaf and break */ 4865 btrfs_set_header_level(root->node, 0); 4866 } else if (slot == 0) { 4867 struct btrfs_disk_key disk_key; 4868 4869 btrfs_node_key(parent, &disk_key, 0); 4870 fixup_low_keys(path, &disk_key, level + 1); 4871 } 4872 btrfs_mark_buffer_dirty(parent); 4873 } 4874 4875 /* 4876 * a helper function to delete the leaf pointed to by path->slots[1] and 4877 * path->nodes[1]. 4878 * 4879 * This deletes the pointer in path->nodes[1] and frees the leaf 4880 * block extent. zero is returned if it all worked out, < 0 otherwise. 4881 * 4882 * The path must have already been setup for deleting the leaf, including 4883 * all the proper balancing. path->nodes[1] must be locked. 4884 */ 4885 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4886 struct btrfs_root *root, 4887 struct btrfs_path *path, 4888 struct extent_buffer *leaf) 4889 { 4890 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4891 del_ptr(root, path, 1, path->slots[1]); 4892 4893 /* 4894 * btrfs_free_extent is expensive, we want to make sure we 4895 * aren't holding any locks when we call it 4896 */ 4897 btrfs_unlock_up_safe(path, 0); 4898 4899 root_sub_used(root, leaf->len); 4900 4901 atomic_inc(&leaf->refs); 4902 btrfs_free_tree_block(trans, root, leaf, 0, 1); 4903 free_extent_buffer_stale(leaf); 4904 } 4905 /* 4906 * delete the item at the leaf level in path. If that empties 4907 * the leaf, remove it from the tree 4908 */ 4909 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4910 struct btrfs_path *path, int slot, int nr) 4911 { 4912 struct btrfs_fs_info *fs_info = root->fs_info; 4913 struct extent_buffer *leaf; 4914 struct btrfs_item *item; 4915 u32 last_off; 4916 u32 dsize = 0; 4917 int ret = 0; 4918 int wret; 4919 int i; 4920 u32 nritems; 4921 4922 leaf = path->nodes[0]; 4923 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); 4924 4925 for (i = 0; i < nr; i++) 4926 dsize += btrfs_item_size_nr(leaf, slot + i); 4927 4928 nritems = btrfs_header_nritems(leaf); 4929 4930 if (slot + nr != nritems) { 4931 int data_end = leaf_data_end(leaf); 4932 struct btrfs_map_token token; 4933 4934 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + 4935 data_end + dsize, 4936 BTRFS_LEAF_DATA_OFFSET + data_end, 4937 last_off - data_end); 4938 4939 btrfs_init_map_token(&token, leaf); 4940 for (i = slot + nr; i < nritems; i++) { 4941 u32 ioff; 4942 4943 item = btrfs_item_nr(i); 4944 ioff = btrfs_token_item_offset(&token, item); 4945 btrfs_set_token_item_offset(&token, item, ioff + dsize); 4946 } 4947 4948 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 4949 btrfs_item_nr_offset(slot + nr), 4950 sizeof(struct btrfs_item) * 4951 (nritems - slot - nr)); 4952 } 4953 btrfs_set_header_nritems(leaf, nritems - nr); 4954 nritems -= nr; 4955 4956 /* delete the leaf if we've emptied it */ 4957 if (nritems == 0) { 4958 if (leaf == root->node) { 4959 btrfs_set_header_level(leaf, 0); 4960 } else { 4961 btrfs_clean_tree_block(leaf); 4962 btrfs_del_leaf(trans, root, path, leaf); 4963 } 4964 } else { 4965 int used = leaf_space_used(leaf, 0, nritems); 4966 if (slot == 0) { 4967 struct btrfs_disk_key disk_key; 4968 4969 btrfs_item_key(leaf, &disk_key, 0); 4970 fixup_low_keys(path, &disk_key, 1); 4971 } 4972 4973 /* delete the leaf if it is mostly empty */ 4974 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4975 /* push_leaf_left fixes the path. 4976 * make sure the path still points to our leaf 4977 * for possible call to del_ptr below 4978 */ 4979 slot = path->slots[1]; 4980 atomic_inc(&leaf->refs); 4981 4982 wret = push_leaf_left(trans, root, path, 1, 1, 4983 1, (u32)-1); 4984 if (wret < 0 && wret != -ENOSPC) 4985 ret = wret; 4986 4987 if (path->nodes[0] == leaf && 4988 btrfs_header_nritems(leaf)) { 4989 wret = push_leaf_right(trans, root, path, 1, 4990 1, 1, 0); 4991 if (wret < 0 && wret != -ENOSPC) 4992 ret = wret; 4993 } 4994 4995 if (btrfs_header_nritems(leaf) == 0) { 4996 path->slots[1] = slot; 4997 btrfs_del_leaf(trans, root, path, leaf); 4998 free_extent_buffer(leaf); 4999 ret = 0; 5000 } else { 5001 /* if we're still in the path, make sure 5002 * we're dirty. Otherwise, one of the 5003 * push_leaf functions must have already 5004 * dirtied this buffer 5005 */ 5006 if (path->nodes[0] == leaf) 5007 btrfs_mark_buffer_dirty(leaf); 5008 free_extent_buffer(leaf); 5009 } 5010 } else { 5011 btrfs_mark_buffer_dirty(leaf); 5012 } 5013 } 5014 return ret; 5015 } 5016 5017 /* 5018 * search the tree again to find a leaf with lesser keys 5019 * returns 0 if it found something or 1 if there are no lesser leaves. 5020 * returns < 0 on io errors. 5021 * 5022 * This may release the path, and so you may lose any locks held at the 5023 * time you call it. 5024 */ 5025 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 5026 { 5027 struct btrfs_key key; 5028 struct btrfs_disk_key found_key; 5029 int ret; 5030 5031 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 5032 5033 if (key.offset > 0) { 5034 key.offset--; 5035 } else if (key.type > 0) { 5036 key.type--; 5037 key.offset = (u64)-1; 5038 } else if (key.objectid > 0) { 5039 key.objectid--; 5040 key.type = (u8)-1; 5041 key.offset = (u64)-1; 5042 } else { 5043 return 1; 5044 } 5045 5046 btrfs_release_path(path); 5047 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5048 if (ret < 0) 5049 return ret; 5050 btrfs_item_key(path->nodes[0], &found_key, 0); 5051 ret = comp_keys(&found_key, &key); 5052 /* 5053 * We might have had an item with the previous key in the tree right 5054 * before we released our path. And after we released our path, that 5055 * item might have been pushed to the first slot (0) of the leaf we 5056 * were holding due to a tree balance. Alternatively, an item with the 5057 * previous key can exist as the only element of a leaf (big fat item). 5058 * Therefore account for these 2 cases, so that our callers (like 5059 * btrfs_previous_item) don't miss an existing item with a key matching 5060 * the previous key we computed above. 5061 */ 5062 if (ret <= 0) 5063 return 0; 5064 return 1; 5065 } 5066 5067 /* 5068 * A helper function to walk down the tree starting at min_key, and looking 5069 * for nodes or leaves that are have a minimum transaction id. 5070 * This is used by the btree defrag code, and tree logging 5071 * 5072 * This does not cow, but it does stuff the starting key it finds back 5073 * into min_key, so you can call btrfs_search_slot with cow=1 on the 5074 * key and get a writable path. 5075 * 5076 * This honors path->lowest_level to prevent descent past a given level 5077 * of the tree. 5078 * 5079 * min_trans indicates the oldest transaction that you are interested 5080 * in walking through. Any nodes or leaves older than min_trans are 5081 * skipped over (without reading them). 5082 * 5083 * returns zero if something useful was found, < 0 on error and 1 if there 5084 * was nothing in the tree that matched the search criteria. 5085 */ 5086 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 5087 struct btrfs_path *path, 5088 u64 min_trans) 5089 { 5090 struct extent_buffer *cur; 5091 struct btrfs_key found_key; 5092 int slot; 5093 int sret; 5094 u32 nritems; 5095 int level; 5096 int ret = 1; 5097 int keep_locks = path->keep_locks; 5098 5099 path->keep_locks = 1; 5100 again: 5101 cur = btrfs_read_lock_root_node(root); 5102 level = btrfs_header_level(cur); 5103 WARN_ON(path->nodes[level]); 5104 path->nodes[level] = cur; 5105 path->locks[level] = BTRFS_READ_LOCK; 5106 5107 if (btrfs_header_generation(cur) < min_trans) { 5108 ret = 1; 5109 goto out; 5110 } 5111 while (1) { 5112 nritems = btrfs_header_nritems(cur); 5113 level = btrfs_header_level(cur); 5114 sret = btrfs_bin_search(cur, min_key, &slot); 5115 if (sret < 0) { 5116 ret = sret; 5117 goto out; 5118 } 5119 5120 /* at the lowest level, we're done, setup the path and exit */ 5121 if (level == path->lowest_level) { 5122 if (slot >= nritems) 5123 goto find_next_key; 5124 ret = 0; 5125 path->slots[level] = slot; 5126 btrfs_item_key_to_cpu(cur, &found_key, slot); 5127 goto out; 5128 } 5129 if (sret && slot > 0) 5130 slot--; 5131 /* 5132 * check this node pointer against the min_trans parameters. 5133 * If it is too old, skip to the next one. 5134 */ 5135 while (slot < nritems) { 5136 u64 gen; 5137 5138 gen = btrfs_node_ptr_generation(cur, slot); 5139 if (gen < min_trans) { 5140 slot++; 5141 continue; 5142 } 5143 break; 5144 } 5145 find_next_key: 5146 /* 5147 * we didn't find a candidate key in this node, walk forward 5148 * and find another one 5149 */ 5150 if (slot >= nritems) { 5151 path->slots[level] = slot; 5152 sret = btrfs_find_next_key(root, path, min_key, level, 5153 min_trans); 5154 if (sret == 0) { 5155 btrfs_release_path(path); 5156 goto again; 5157 } else { 5158 goto out; 5159 } 5160 } 5161 /* save our key for returning back */ 5162 btrfs_node_key_to_cpu(cur, &found_key, slot); 5163 path->slots[level] = slot; 5164 if (level == path->lowest_level) { 5165 ret = 0; 5166 goto out; 5167 } 5168 cur = btrfs_read_node_slot(cur, slot); 5169 if (IS_ERR(cur)) { 5170 ret = PTR_ERR(cur); 5171 goto out; 5172 } 5173 5174 btrfs_tree_read_lock(cur); 5175 5176 path->locks[level - 1] = BTRFS_READ_LOCK; 5177 path->nodes[level - 1] = cur; 5178 unlock_up(path, level, 1, 0, NULL); 5179 } 5180 out: 5181 path->keep_locks = keep_locks; 5182 if (ret == 0) { 5183 btrfs_unlock_up_safe(path, path->lowest_level + 1); 5184 memcpy(min_key, &found_key, sizeof(found_key)); 5185 } 5186 return ret; 5187 } 5188 5189 /* 5190 * this is similar to btrfs_next_leaf, but does not try to preserve 5191 * and fixup the path. It looks for and returns the next key in the 5192 * tree based on the current path and the min_trans parameters. 5193 * 5194 * 0 is returned if another key is found, < 0 if there are any errors 5195 * and 1 is returned if there are no higher keys in the tree 5196 * 5197 * path->keep_locks should be set to 1 on the search made before 5198 * calling this function. 5199 */ 5200 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 5201 struct btrfs_key *key, int level, u64 min_trans) 5202 { 5203 int slot; 5204 struct extent_buffer *c; 5205 5206 WARN_ON(!path->keep_locks && !path->skip_locking); 5207 while (level < BTRFS_MAX_LEVEL) { 5208 if (!path->nodes[level]) 5209 return 1; 5210 5211 slot = path->slots[level] + 1; 5212 c = path->nodes[level]; 5213 next: 5214 if (slot >= btrfs_header_nritems(c)) { 5215 int ret; 5216 int orig_lowest; 5217 struct btrfs_key cur_key; 5218 if (level + 1 >= BTRFS_MAX_LEVEL || 5219 !path->nodes[level + 1]) 5220 return 1; 5221 5222 if (path->locks[level + 1] || path->skip_locking) { 5223 level++; 5224 continue; 5225 } 5226 5227 slot = btrfs_header_nritems(c) - 1; 5228 if (level == 0) 5229 btrfs_item_key_to_cpu(c, &cur_key, slot); 5230 else 5231 btrfs_node_key_to_cpu(c, &cur_key, slot); 5232 5233 orig_lowest = path->lowest_level; 5234 btrfs_release_path(path); 5235 path->lowest_level = level; 5236 ret = btrfs_search_slot(NULL, root, &cur_key, path, 5237 0, 0); 5238 path->lowest_level = orig_lowest; 5239 if (ret < 0) 5240 return ret; 5241 5242 c = path->nodes[level]; 5243 slot = path->slots[level]; 5244 if (ret == 0) 5245 slot++; 5246 goto next; 5247 } 5248 5249 if (level == 0) 5250 btrfs_item_key_to_cpu(c, key, slot); 5251 else { 5252 u64 gen = btrfs_node_ptr_generation(c, slot); 5253 5254 if (gen < min_trans) { 5255 slot++; 5256 goto next; 5257 } 5258 btrfs_node_key_to_cpu(c, key, slot); 5259 } 5260 return 0; 5261 } 5262 return 1; 5263 } 5264 5265 /* 5266 * search the tree again to find a leaf with greater keys 5267 * returns 0 if it found something or 1 if there are no greater leaves. 5268 * returns < 0 on io errors. 5269 */ 5270 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) 5271 { 5272 return btrfs_next_old_leaf(root, path, 0); 5273 } 5274 5275 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 5276 u64 time_seq) 5277 { 5278 int slot; 5279 int level; 5280 struct extent_buffer *c; 5281 struct extent_buffer *next; 5282 struct btrfs_key key; 5283 u32 nritems; 5284 int ret; 5285 int i; 5286 5287 nritems = btrfs_header_nritems(path->nodes[0]); 5288 if (nritems == 0) 5289 return 1; 5290 5291 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 5292 again: 5293 level = 1; 5294 next = NULL; 5295 btrfs_release_path(path); 5296 5297 path->keep_locks = 1; 5298 5299 if (time_seq) 5300 ret = btrfs_search_old_slot(root, &key, path, time_seq); 5301 else 5302 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5303 path->keep_locks = 0; 5304 5305 if (ret < 0) 5306 return ret; 5307 5308 nritems = btrfs_header_nritems(path->nodes[0]); 5309 /* 5310 * by releasing the path above we dropped all our locks. A balance 5311 * could have added more items next to the key that used to be 5312 * at the very end of the block. So, check again here and 5313 * advance the path if there are now more items available. 5314 */ 5315 if (nritems > 0 && path->slots[0] < nritems - 1) { 5316 if (ret == 0) 5317 path->slots[0]++; 5318 ret = 0; 5319 goto done; 5320 } 5321 /* 5322 * So the above check misses one case: 5323 * - after releasing the path above, someone has removed the item that 5324 * used to be at the very end of the block, and balance between leafs 5325 * gets another one with bigger key.offset to replace it. 5326 * 5327 * This one should be returned as well, or we can get leaf corruption 5328 * later(esp. in __btrfs_drop_extents()). 5329 * 5330 * And a bit more explanation about this check, 5331 * with ret > 0, the key isn't found, the path points to the slot 5332 * where it should be inserted, so the path->slots[0] item must be the 5333 * bigger one. 5334 */ 5335 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 5336 ret = 0; 5337 goto done; 5338 } 5339 5340 while (level < BTRFS_MAX_LEVEL) { 5341 if (!path->nodes[level]) { 5342 ret = 1; 5343 goto done; 5344 } 5345 5346 slot = path->slots[level] + 1; 5347 c = path->nodes[level]; 5348 if (slot >= btrfs_header_nritems(c)) { 5349 level++; 5350 if (level == BTRFS_MAX_LEVEL) { 5351 ret = 1; 5352 goto done; 5353 } 5354 continue; 5355 } 5356 5357 5358 /* 5359 * Our current level is where we're going to start from, and to 5360 * make sure lockdep doesn't complain we need to drop our locks 5361 * and nodes from 0 to our current level. 5362 */ 5363 for (i = 0; i < level; i++) { 5364 if (path->locks[level]) { 5365 btrfs_tree_read_unlock(path->nodes[i]); 5366 path->locks[i] = 0; 5367 } 5368 free_extent_buffer(path->nodes[i]); 5369 path->nodes[i] = NULL; 5370 } 5371 5372 next = c; 5373 ret = read_block_for_search(root, path, &next, level, 5374 slot, &key); 5375 if (ret == -EAGAIN) 5376 goto again; 5377 5378 if (ret < 0) { 5379 btrfs_release_path(path); 5380 goto done; 5381 } 5382 5383 if (!path->skip_locking) { 5384 ret = btrfs_try_tree_read_lock(next); 5385 if (!ret && time_seq) { 5386 /* 5387 * If we don't get the lock, we may be racing 5388 * with push_leaf_left, holding that lock while 5389 * itself waiting for the leaf we've currently 5390 * locked. To solve this situation, we give up 5391 * on our lock and cycle. 5392 */ 5393 free_extent_buffer(next); 5394 btrfs_release_path(path); 5395 cond_resched(); 5396 goto again; 5397 } 5398 if (!ret) 5399 btrfs_tree_read_lock(next); 5400 } 5401 break; 5402 } 5403 path->slots[level] = slot; 5404 while (1) { 5405 level--; 5406 path->nodes[level] = next; 5407 path->slots[level] = 0; 5408 if (!path->skip_locking) 5409 path->locks[level] = BTRFS_READ_LOCK; 5410 if (!level) 5411 break; 5412 5413 ret = read_block_for_search(root, path, &next, level, 5414 0, &key); 5415 if (ret == -EAGAIN) 5416 goto again; 5417 5418 if (ret < 0) { 5419 btrfs_release_path(path); 5420 goto done; 5421 } 5422 5423 if (!path->skip_locking) 5424 btrfs_tree_read_lock(next); 5425 } 5426 ret = 0; 5427 done: 5428 unlock_up(path, 0, 1, 0, NULL); 5429 5430 return ret; 5431 } 5432 5433 /* 5434 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5435 * searching until it gets past min_objectid or finds an item of 'type' 5436 * 5437 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5438 */ 5439 int btrfs_previous_item(struct btrfs_root *root, 5440 struct btrfs_path *path, u64 min_objectid, 5441 int type) 5442 { 5443 struct btrfs_key found_key; 5444 struct extent_buffer *leaf; 5445 u32 nritems; 5446 int ret; 5447 5448 while (1) { 5449 if (path->slots[0] == 0) { 5450 ret = btrfs_prev_leaf(root, path); 5451 if (ret != 0) 5452 return ret; 5453 } else { 5454 path->slots[0]--; 5455 } 5456 leaf = path->nodes[0]; 5457 nritems = btrfs_header_nritems(leaf); 5458 if (nritems == 0) 5459 return 1; 5460 if (path->slots[0] == nritems) 5461 path->slots[0]--; 5462 5463 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5464 if (found_key.objectid < min_objectid) 5465 break; 5466 if (found_key.type == type) 5467 return 0; 5468 if (found_key.objectid == min_objectid && 5469 found_key.type < type) 5470 break; 5471 } 5472 return 1; 5473 } 5474 5475 /* 5476 * search in extent tree to find a previous Metadata/Data extent item with 5477 * min objecitd. 5478 * 5479 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5480 */ 5481 int btrfs_previous_extent_item(struct btrfs_root *root, 5482 struct btrfs_path *path, u64 min_objectid) 5483 { 5484 struct btrfs_key found_key; 5485 struct extent_buffer *leaf; 5486 u32 nritems; 5487 int ret; 5488 5489 while (1) { 5490 if (path->slots[0] == 0) { 5491 ret = btrfs_prev_leaf(root, path); 5492 if (ret != 0) 5493 return ret; 5494 } else { 5495 path->slots[0]--; 5496 } 5497 leaf = path->nodes[0]; 5498 nritems = btrfs_header_nritems(leaf); 5499 if (nritems == 0) 5500 return 1; 5501 if (path->slots[0] == nritems) 5502 path->slots[0]--; 5503 5504 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5505 if (found_key.objectid < min_objectid) 5506 break; 5507 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5508 found_key.type == BTRFS_METADATA_ITEM_KEY) 5509 return 0; 5510 if (found_key.objectid == min_objectid && 5511 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5512 break; 5513 } 5514 return 1; 5515 } 5516