1 /* 2 * Copyright (C) 2007,2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/rbtree.h> 22 #include "ctree.h" 23 #include "disk-io.h" 24 #include "transaction.h" 25 #include "print-tree.h" 26 #include "locking.h" 27 28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 29 *root, struct btrfs_path *path, int level); 30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root 31 *root, struct btrfs_key *ins_key, 32 struct btrfs_path *path, int data_size, int extend); 33 static int push_node_left(struct btrfs_trans_handle *trans, 34 struct btrfs_root *root, struct extent_buffer *dst, 35 struct extent_buffer *src, int empty); 36 static int balance_node_right(struct btrfs_trans_handle *trans, 37 struct btrfs_root *root, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 41 struct btrfs_path *path, int level, int slot); 42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 43 struct extent_buffer *eb); 44 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr, 45 u32 blocksize, u64 parent_transid, 46 u64 time_seq); 47 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root, 48 u64 bytenr, u32 blocksize, 49 u64 time_seq); 50 51 struct btrfs_path *btrfs_alloc_path(void) 52 { 53 struct btrfs_path *path; 54 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 55 return path; 56 } 57 58 /* 59 * set all locked nodes in the path to blocking locks. This should 60 * be done before scheduling 61 */ 62 noinline void btrfs_set_path_blocking(struct btrfs_path *p) 63 { 64 int i; 65 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 66 if (!p->nodes[i] || !p->locks[i]) 67 continue; 68 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); 69 if (p->locks[i] == BTRFS_READ_LOCK) 70 p->locks[i] = BTRFS_READ_LOCK_BLOCKING; 71 else if (p->locks[i] == BTRFS_WRITE_LOCK) 72 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; 73 } 74 } 75 76 /* 77 * reset all the locked nodes in the patch to spinning locks. 78 * 79 * held is used to keep lockdep happy, when lockdep is enabled 80 * we set held to a blocking lock before we go around and 81 * retake all the spinlocks in the path. You can safely use NULL 82 * for held 83 */ 84 noinline void btrfs_clear_path_blocking(struct btrfs_path *p, 85 struct extent_buffer *held, int held_rw) 86 { 87 int i; 88 89 #ifdef CONFIG_DEBUG_LOCK_ALLOC 90 /* lockdep really cares that we take all of these spinlocks 91 * in the right order. If any of the locks in the path are not 92 * currently blocking, it is going to complain. So, make really 93 * really sure by forcing the path to blocking before we clear 94 * the path blocking. 95 */ 96 if (held) { 97 btrfs_set_lock_blocking_rw(held, held_rw); 98 if (held_rw == BTRFS_WRITE_LOCK) 99 held_rw = BTRFS_WRITE_LOCK_BLOCKING; 100 else if (held_rw == BTRFS_READ_LOCK) 101 held_rw = BTRFS_READ_LOCK_BLOCKING; 102 } 103 btrfs_set_path_blocking(p); 104 #endif 105 106 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 107 if (p->nodes[i] && p->locks[i]) { 108 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); 109 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) 110 p->locks[i] = BTRFS_WRITE_LOCK; 111 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) 112 p->locks[i] = BTRFS_READ_LOCK; 113 } 114 } 115 116 #ifdef CONFIG_DEBUG_LOCK_ALLOC 117 if (held) 118 btrfs_clear_lock_blocking_rw(held, held_rw); 119 #endif 120 } 121 122 /* this also releases the path */ 123 void btrfs_free_path(struct btrfs_path *p) 124 { 125 if (!p) 126 return; 127 btrfs_release_path(p); 128 kmem_cache_free(btrfs_path_cachep, p); 129 } 130 131 /* 132 * path release drops references on the extent buffers in the path 133 * and it drops any locks held by this path 134 * 135 * It is safe to call this on paths that no locks or extent buffers held. 136 */ 137 noinline void btrfs_release_path(struct btrfs_path *p) 138 { 139 int i; 140 141 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 142 p->slots[i] = 0; 143 if (!p->nodes[i]) 144 continue; 145 if (p->locks[i]) { 146 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 147 p->locks[i] = 0; 148 } 149 free_extent_buffer(p->nodes[i]); 150 p->nodes[i] = NULL; 151 } 152 } 153 154 /* 155 * safely gets a reference on the root node of a tree. A lock 156 * is not taken, so a concurrent writer may put a different node 157 * at the root of the tree. See btrfs_lock_root_node for the 158 * looping required. 159 * 160 * The extent buffer returned by this has a reference taken, so 161 * it won't disappear. It may stop being the root of the tree 162 * at any time because there are no locks held. 163 */ 164 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 165 { 166 struct extent_buffer *eb; 167 168 while (1) { 169 rcu_read_lock(); 170 eb = rcu_dereference(root->node); 171 172 /* 173 * RCU really hurts here, we could free up the root node because 174 * it was cow'ed but we may not get the new root node yet so do 175 * the inc_not_zero dance and if it doesn't work then 176 * synchronize_rcu and try again. 177 */ 178 if (atomic_inc_not_zero(&eb->refs)) { 179 rcu_read_unlock(); 180 break; 181 } 182 rcu_read_unlock(); 183 synchronize_rcu(); 184 } 185 return eb; 186 } 187 188 /* loop around taking references on and locking the root node of the 189 * tree until you end up with a lock on the root. A locked buffer 190 * is returned, with a reference held. 191 */ 192 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) 193 { 194 struct extent_buffer *eb; 195 196 while (1) { 197 eb = btrfs_root_node(root); 198 btrfs_tree_lock(eb); 199 if (eb == root->node) 200 break; 201 btrfs_tree_unlock(eb); 202 free_extent_buffer(eb); 203 } 204 return eb; 205 } 206 207 /* loop around taking references on and locking the root node of the 208 * tree until you end up with a lock on the root. A locked buffer 209 * is returned, with a reference held. 210 */ 211 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) 212 { 213 struct extent_buffer *eb; 214 215 while (1) { 216 eb = btrfs_root_node(root); 217 btrfs_tree_read_lock(eb); 218 if (eb == root->node) 219 break; 220 btrfs_tree_read_unlock(eb); 221 free_extent_buffer(eb); 222 } 223 return eb; 224 } 225 226 /* cowonly root (everything not a reference counted cow subvolume), just get 227 * put onto a simple dirty list. transaction.c walks this to make sure they 228 * get properly updated on disk. 229 */ 230 static void add_root_to_dirty_list(struct btrfs_root *root) 231 { 232 spin_lock(&root->fs_info->trans_lock); 233 if (root->track_dirty && list_empty(&root->dirty_list)) { 234 list_add(&root->dirty_list, 235 &root->fs_info->dirty_cowonly_roots); 236 } 237 spin_unlock(&root->fs_info->trans_lock); 238 } 239 240 /* 241 * used by snapshot creation to make a copy of a root for a tree with 242 * a given objectid. The buffer with the new root node is returned in 243 * cow_ret, and this func returns zero on success or a negative error code. 244 */ 245 int btrfs_copy_root(struct btrfs_trans_handle *trans, 246 struct btrfs_root *root, 247 struct extent_buffer *buf, 248 struct extent_buffer **cow_ret, u64 new_root_objectid) 249 { 250 struct extent_buffer *cow; 251 int ret = 0; 252 int level; 253 struct btrfs_disk_key disk_key; 254 255 WARN_ON(root->ref_cows && trans->transid != 256 root->fs_info->running_transaction->transid); 257 WARN_ON(root->ref_cows && trans->transid != root->last_trans); 258 259 level = btrfs_header_level(buf); 260 if (level == 0) 261 btrfs_item_key(buf, &disk_key, 0); 262 else 263 btrfs_node_key(buf, &disk_key, 0); 264 265 cow = btrfs_alloc_free_block(trans, root, buf->len, 0, 266 new_root_objectid, &disk_key, level, 267 buf->start, 0); 268 if (IS_ERR(cow)) 269 return PTR_ERR(cow); 270 271 copy_extent_buffer(cow, buf, 0, 0, cow->len); 272 btrfs_set_header_bytenr(cow, cow->start); 273 btrfs_set_header_generation(cow, trans->transid); 274 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 275 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 276 BTRFS_HEADER_FLAG_RELOC); 277 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 278 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 279 else 280 btrfs_set_header_owner(cow, new_root_objectid); 281 282 write_extent_buffer(cow, root->fs_info->fsid, 283 (unsigned long)btrfs_header_fsid(cow), 284 BTRFS_FSID_SIZE); 285 286 WARN_ON(btrfs_header_generation(buf) > trans->transid); 287 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 288 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 289 else 290 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 291 292 if (ret) 293 return ret; 294 295 btrfs_mark_buffer_dirty(cow); 296 *cow_ret = cow; 297 return 0; 298 } 299 300 enum mod_log_op { 301 MOD_LOG_KEY_REPLACE, 302 MOD_LOG_KEY_ADD, 303 MOD_LOG_KEY_REMOVE, 304 MOD_LOG_KEY_REMOVE_WHILE_FREEING, 305 MOD_LOG_KEY_REMOVE_WHILE_MOVING, 306 MOD_LOG_MOVE_KEYS, 307 MOD_LOG_ROOT_REPLACE, 308 }; 309 310 struct tree_mod_move { 311 int dst_slot; 312 int nr_items; 313 }; 314 315 struct tree_mod_root { 316 u64 logical; 317 u8 level; 318 }; 319 320 struct tree_mod_elem { 321 struct rb_node node; 322 u64 index; /* shifted logical */ 323 u64 seq; 324 enum mod_log_op op; 325 326 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ 327 int slot; 328 329 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ 330 u64 generation; 331 332 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ 333 struct btrfs_disk_key key; 334 u64 blockptr; 335 336 /* this is used for op == MOD_LOG_MOVE_KEYS */ 337 struct tree_mod_move move; 338 339 /* this is used for op == MOD_LOG_ROOT_REPLACE */ 340 struct tree_mod_root old_root; 341 }; 342 343 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) 344 { 345 read_lock(&fs_info->tree_mod_log_lock); 346 } 347 348 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) 349 { 350 read_unlock(&fs_info->tree_mod_log_lock); 351 } 352 353 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) 354 { 355 write_lock(&fs_info->tree_mod_log_lock); 356 } 357 358 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) 359 { 360 write_unlock(&fs_info->tree_mod_log_lock); 361 } 362 363 /* 364 * This adds a new blocker to the tree mod log's blocker list if the @elem 365 * passed does not already have a sequence number set. So when a caller expects 366 * to record tree modifications, it should ensure to set elem->seq to zero 367 * before calling btrfs_get_tree_mod_seq. 368 * Returns a fresh, unused tree log modification sequence number, even if no new 369 * blocker was added. 370 */ 371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 372 struct seq_list *elem) 373 { 374 u64 seq; 375 376 tree_mod_log_write_lock(fs_info); 377 spin_lock(&fs_info->tree_mod_seq_lock); 378 if (!elem->seq) { 379 elem->seq = btrfs_inc_tree_mod_seq(fs_info); 380 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); 381 } 382 seq = btrfs_inc_tree_mod_seq(fs_info); 383 spin_unlock(&fs_info->tree_mod_seq_lock); 384 tree_mod_log_write_unlock(fs_info); 385 386 return seq; 387 } 388 389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 390 struct seq_list *elem) 391 { 392 struct rb_root *tm_root; 393 struct rb_node *node; 394 struct rb_node *next; 395 struct seq_list *cur_elem; 396 struct tree_mod_elem *tm; 397 u64 min_seq = (u64)-1; 398 u64 seq_putting = elem->seq; 399 400 if (!seq_putting) 401 return; 402 403 spin_lock(&fs_info->tree_mod_seq_lock); 404 list_del(&elem->list); 405 elem->seq = 0; 406 407 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { 408 if (cur_elem->seq < min_seq) { 409 if (seq_putting > cur_elem->seq) { 410 /* 411 * blocker with lower sequence number exists, we 412 * cannot remove anything from the log 413 */ 414 spin_unlock(&fs_info->tree_mod_seq_lock); 415 return; 416 } 417 min_seq = cur_elem->seq; 418 } 419 } 420 spin_unlock(&fs_info->tree_mod_seq_lock); 421 422 /* 423 * anything that's lower than the lowest existing (read: blocked) 424 * sequence number can be removed from the tree. 425 */ 426 tree_mod_log_write_lock(fs_info); 427 tm_root = &fs_info->tree_mod_log; 428 for (node = rb_first(tm_root); node; node = next) { 429 next = rb_next(node); 430 tm = container_of(node, struct tree_mod_elem, node); 431 if (tm->seq > min_seq) 432 continue; 433 rb_erase(node, tm_root); 434 kfree(tm); 435 } 436 tree_mod_log_write_unlock(fs_info); 437 } 438 439 /* 440 * key order of the log: 441 * index -> sequence 442 * 443 * the index is the shifted logical of the *new* root node for root replace 444 * operations, or the shifted logical of the affected block for all other 445 * operations. 446 */ 447 static noinline int 448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) 449 { 450 struct rb_root *tm_root; 451 struct rb_node **new; 452 struct rb_node *parent = NULL; 453 struct tree_mod_elem *cur; 454 455 BUG_ON(!tm || !tm->seq); 456 457 tm_root = &fs_info->tree_mod_log; 458 new = &tm_root->rb_node; 459 while (*new) { 460 cur = container_of(*new, struct tree_mod_elem, node); 461 parent = *new; 462 if (cur->index < tm->index) 463 new = &((*new)->rb_left); 464 else if (cur->index > tm->index) 465 new = &((*new)->rb_right); 466 else if (cur->seq < tm->seq) 467 new = &((*new)->rb_left); 468 else if (cur->seq > tm->seq) 469 new = &((*new)->rb_right); 470 else { 471 kfree(tm); 472 return -EEXIST; 473 } 474 } 475 476 rb_link_node(&tm->node, parent, new); 477 rb_insert_color(&tm->node, tm_root); 478 return 0; 479 } 480 481 /* 482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it 483 * returns zero with the tree_mod_log_lock acquired. The caller must hold 484 * this until all tree mod log insertions are recorded in the rb tree and then 485 * call tree_mod_log_write_unlock() to release. 486 */ 487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, 488 struct extent_buffer *eb) { 489 smp_mb(); 490 if (list_empty(&(fs_info)->tree_mod_seq_list)) 491 return 1; 492 if (eb && btrfs_header_level(eb) == 0) 493 return 1; 494 495 tree_mod_log_write_lock(fs_info); 496 if (list_empty(&fs_info->tree_mod_seq_list)) { 497 /* 498 * someone emptied the list while we were waiting for the lock. 499 * we must not add to the list when no blocker exists. 500 */ 501 tree_mod_log_write_unlock(fs_info); 502 return 1; 503 } 504 505 return 0; 506 } 507 508 /* 509 * This allocates memory and gets a tree modification sequence number. 510 * 511 * Returns <0 on error. 512 * Returns >0 (the added sequence number) on success. 513 */ 514 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, 515 struct tree_mod_elem **tm_ret) 516 { 517 struct tree_mod_elem *tm; 518 519 /* 520 * once we switch from spin locks to something different, we should 521 * honor the flags parameter here. 522 */ 523 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC); 524 if (!tm) 525 return -ENOMEM; 526 527 tm->seq = btrfs_inc_tree_mod_seq(fs_info); 528 return tm->seq; 529 } 530 531 static inline int 532 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, 533 struct extent_buffer *eb, int slot, 534 enum mod_log_op op, gfp_t flags) 535 { 536 int ret; 537 struct tree_mod_elem *tm; 538 539 ret = tree_mod_alloc(fs_info, flags, &tm); 540 if (ret < 0) 541 return ret; 542 543 tm->index = eb->start >> PAGE_CACHE_SHIFT; 544 if (op != MOD_LOG_KEY_ADD) { 545 btrfs_node_key(eb, &tm->key, slot); 546 tm->blockptr = btrfs_node_blockptr(eb, slot); 547 } 548 tm->op = op; 549 tm->slot = slot; 550 tm->generation = btrfs_node_ptr_generation(eb, slot); 551 552 return __tree_mod_log_insert(fs_info, tm); 553 } 554 555 static noinline int 556 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, 557 struct extent_buffer *eb, int slot, 558 enum mod_log_op op, gfp_t flags) 559 { 560 int ret; 561 562 if (tree_mod_dont_log(fs_info, eb)) 563 return 0; 564 565 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags); 566 567 tree_mod_log_write_unlock(fs_info); 568 return ret; 569 } 570 571 static noinline int 572 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, 573 int slot, enum mod_log_op op) 574 { 575 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS); 576 } 577 578 static noinline int 579 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info, 580 struct extent_buffer *eb, int slot, 581 enum mod_log_op op) 582 { 583 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS); 584 } 585 586 static noinline int 587 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, 588 struct extent_buffer *eb, int dst_slot, int src_slot, 589 int nr_items, gfp_t flags) 590 { 591 struct tree_mod_elem *tm; 592 int ret; 593 int i; 594 595 if (tree_mod_dont_log(fs_info, eb)) 596 return 0; 597 598 /* 599 * When we override something during the move, we log these removals. 600 * This can only happen when we move towards the beginning of the 601 * buffer, i.e. dst_slot < src_slot. 602 */ 603 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 604 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot, 605 MOD_LOG_KEY_REMOVE_WHILE_MOVING); 606 BUG_ON(ret < 0); 607 } 608 609 ret = tree_mod_alloc(fs_info, flags, &tm); 610 if (ret < 0) 611 goto out; 612 613 tm->index = eb->start >> PAGE_CACHE_SHIFT; 614 tm->slot = src_slot; 615 tm->move.dst_slot = dst_slot; 616 tm->move.nr_items = nr_items; 617 tm->op = MOD_LOG_MOVE_KEYS; 618 619 ret = __tree_mod_log_insert(fs_info, tm); 620 out: 621 tree_mod_log_write_unlock(fs_info); 622 return ret; 623 } 624 625 static inline void 626 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 627 { 628 int i; 629 u32 nritems; 630 int ret; 631 632 if (btrfs_header_level(eb) == 0) 633 return; 634 635 nritems = btrfs_header_nritems(eb); 636 for (i = nritems - 1; i >= 0; i--) { 637 ret = tree_mod_log_insert_key_locked(fs_info, eb, i, 638 MOD_LOG_KEY_REMOVE_WHILE_FREEING); 639 BUG_ON(ret < 0); 640 } 641 } 642 643 static noinline int 644 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, 645 struct extent_buffer *old_root, 646 struct extent_buffer *new_root, gfp_t flags) 647 { 648 struct tree_mod_elem *tm; 649 int ret; 650 651 if (tree_mod_dont_log(fs_info, NULL)) 652 return 0; 653 654 ret = tree_mod_alloc(fs_info, flags, &tm); 655 if (ret < 0) 656 goto out; 657 658 tm->index = new_root->start >> PAGE_CACHE_SHIFT; 659 tm->old_root.logical = old_root->start; 660 tm->old_root.level = btrfs_header_level(old_root); 661 tm->generation = btrfs_header_generation(old_root); 662 tm->op = MOD_LOG_ROOT_REPLACE; 663 664 ret = __tree_mod_log_insert(fs_info, tm); 665 out: 666 tree_mod_log_write_unlock(fs_info); 667 return ret; 668 } 669 670 static struct tree_mod_elem * 671 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, 672 int smallest) 673 { 674 struct rb_root *tm_root; 675 struct rb_node *node; 676 struct tree_mod_elem *cur = NULL; 677 struct tree_mod_elem *found = NULL; 678 u64 index = start >> PAGE_CACHE_SHIFT; 679 680 tree_mod_log_read_lock(fs_info); 681 tm_root = &fs_info->tree_mod_log; 682 node = tm_root->rb_node; 683 while (node) { 684 cur = container_of(node, struct tree_mod_elem, node); 685 if (cur->index < index) { 686 node = node->rb_left; 687 } else if (cur->index > index) { 688 node = node->rb_right; 689 } else if (cur->seq < min_seq) { 690 node = node->rb_left; 691 } else if (!smallest) { 692 /* we want the node with the highest seq */ 693 if (found) 694 BUG_ON(found->seq > cur->seq); 695 found = cur; 696 node = node->rb_left; 697 } else if (cur->seq > min_seq) { 698 /* we want the node with the smallest seq */ 699 if (found) 700 BUG_ON(found->seq < cur->seq); 701 found = cur; 702 node = node->rb_right; 703 } else { 704 found = cur; 705 break; 706 } 707 } 708 tree_mod_log_read_unlock(fs_info); 709 710 return found; 711 } 712 713 /* 714 * this returns the element from the log with the smallest time sequence 715 * value that's in the log (the oldest log item). any element with a time 716 * sequence lower than min_seq will be ignored. 717 */ 718 static struct tree_mod_elem * 719 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, 720 u64 min_seq) 721 { 722 return __tree_mod_log_search(fs_info, start, min_seq, 1); 723 } 724 725 /* 726 * this returns the element from the log with the largest time sequence 727 * value that's in the log (the most recent log item). any element with 728 * a time sequence lower than min_seq will be ignored. 729 */ 730 static struct tree_mod_elem * 731 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) 732 { 733 return __tree_mod_log_search(fs_info, start, min_seq, 0); 734 } 735 736 static noinline void 737 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 738 struct extent_buffer *src, unsigned long dst_offset, 739 unsigned long src_offset, int nr_items) 740 { 741 int ret; 742 int i; 743 744 if (tree_mod_dont_log(fs_info, NULL)) 745 return; 746 747 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) { 748 tree_mod_log_write_unlock(fs_info); 749 return; 750 } 751 752 for (i = 0; i < nr_items; i++) { 753 ret = tree_mod_log_insert_key_locked(fs_info, src, 754 i + src_offset, 755 MOD_LOG_KEY_REMOVE); 756 BUG_ON(ret < 0); 757 ret = tree_mod_log_insert_key_locked(fs_info, dst, 758 i + dst_offset, 759 MOD_LOG_KEY_ADD); 760 BUG_ON(ret < 0); 761 } 762 763 tree_mod_log_write_unlock(fs_info); 764 } 765 766 static inline void 767 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 768 int dst_offset, int src_offset, int nr_items) 769 { 770 int ret; 771 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset, 772 nr_items, GFP_NOFS); 773 BUG_ON(ret < 0); 774 } 775 776 static noinline void 777 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, 778 struct extent_buffer *eb, int slot, int atomic) 779 { 780 int ret; 781 782 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot, 783 MOD_LOG_KEY_REPLACE, 784 atomic ? GFP_ATOMIC : GFP_NOFS); 785 BUG_ON(ret < 0); 786 } 787 788 static noinline void 789 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 790 { 791 if (tree_mod_dont_log(fs_info, eb)) 792 return; 793 794 __tree_mod_log_free_eb(fs_info, eb); 795 796 tree_mod_log_write_unlock(fs_info); 797 } 798 799 static noinline void 800 tree_mod_log_set_root_pointer(struct btrfs_root *root, 801 struct extent_buffer *new_root_node) 802 { 803 int ret; 804 ret = tree_mod_log_insert_root(root->fs_info, root->node, 805 new_root_node, GFP_NOFS); 806 BUG_ON(ret < 0); 807 } 808 809 /* 810 * check if the tree block can be shared by multiple trees 811 */ 812 int btrfs_block_can_be_shared(struct btrfs_root *root, 813 struct extent_buffer *buf) 814 { 815 /* 816 * Tree blocks not in refernece counted trees and tree roots 817 * are never shared. If a block was allocated after the last 818 * snapshot and the block was not allocated by tree relocation, 819 * we know the block is not shared. 820 */ 821 if (root->ref_cows && 822 buf != root->node && buf != root->commit_root && 823 (btrfs_header_generation(buf) <= 824 btrfs_root_last_snapshot(&root->root_item) || 825 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 826 return 1; 827 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 828 if (root->ref_cows && 829 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 830 return 1; 831 #endif 832 return 0; 833 } 834 835 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 836 struct btrfs_root *root, 837 struct extent_buffer *buf, 838 struct extent_buffer *cow, 839 int *last_ref) 840 { 841 u64 refs; 842 u64 owner; 843 u64 flags; 844 u64 new_flags = 0; 845 int ret; 846 847 /* 848 * Backrefs update rules: 849 * 850 * Always use full backrefs for extent pointers in tree block 851 * allocated by tree relocation. 852 * 853 * If a shared tree block is no longer referenced by its owner 854 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 855 * use full backrefs for extent pointers in tree block. 856 * 857 * If a tree block is been relocating 858 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 859 * use full backrefs for extent pointers in tree block. 860 * The reason for this is some operations (such as drop tree) 861 * are only allowed for blocks use full backrefs. 862 */ 863 864 if (btrfs_block_can_be_shared(root, buf)) { 865 ret = btrfs_lookup_extent_info(trans, root, buf->start, 866 buf->len, &refs, &flags); 867 if (ret) 868 return ret; 869 if (refs == 0) { 870 ret = -EROFS; 871 btrfs_std_error(root->fs_info, ret); 872 return ret; 873 } 874 } else { 875 refs = 1; 876 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 877 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 878 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 879 else 880 flags = 0; 881 } 882 883 owner = btrfs_header_owner(buf); 884 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 885 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 886 887 if (refs > 1) { 888 if ((owner == root->root_key.objectid || 889 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 890 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 891 ret = btrfs_inc_ref(trans, root, buf, 1, 1); 892 BUG_ON(ret); /* -ENOMEM */ 893 894 if (root->root_key.objectid == 895 BTRFS_TREE_RELOC_OBJECTID) { 896 ret = btrfs_dec_ref(trans, root, buf, 0, 1); 897 BUG_ON(ret); /* -ENOMEM */ 898 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 899 BUG_ON(ret); /* -ENOMEM */ 900 } 901 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 902 } else { 903 904 if (root->root_key.objectid == 905 BTRFS_TREE_RELOC_OBJECTID) 906 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 907 else 908 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 909 BUG_ON(ret); /* -ENOMEM */ 910 } 911 if (new_flags != 0) { 912 ret = btrfs_set_disk_extent_flags(trans, root, 913 buf->start, 914 buf->len, 915 new_flags, 0); 916 if (ret) 917 return ret; 918 } 919 } else { 920 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 921 if (root->root_key.objectid == 922 BTRFS_TREE_RELOC_OBJECTID) 923 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 924 else 925 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 926 BUG_ON(ret); /* -ENOMEM */ 927 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 928 BUG_ON(ret); /* -ENOMEM */ 929 } 930 tree_mod_log_free_eb(root->fs_info, buf); 931 clean_tree_block(trans, root, buf); 932 *last_ref = 1; 933 } 934 return 0; 935 } 936 937 /* 938 * does the dirty work in cow of a single block. The parent block (if 939 * supplied) is updated to point to the new cow copy. The new buffer is marked 940 * dirty and returned locked. If you modify the block it needs to be marked 941 * dirty again. 942 * 943 * search_start -- an allocation hint for the new block 944 * 945 * empty_size -- a hint that you plan on doing more cow. This is the size in 946 * bytes the allocator should try to find free next to the block it returns. 947 * This is just a hint and may be ignored by the allocator. 948 */ 949 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 950 struct btrfs_root *root, 951 struct extent_buffer *buf, 952 struct extent_buffer *parent, int parent_slot, 953 struct extent_buffer **cow_ret, 954 u64 search_start, u64 empty_size) 955 { 956 struct btrfs_disk_key disk_key; 957 struct extent_buffer *cow; 958 int level, ret; 959 int last_ref = 0; 960 int unlock_orig = 0; 961 u64 parent_start; 962 963 if (*cow_ret == buf) 964 unlock_orig = 1; 965 966 btrfs_assert_tree_locked(buf); 967 968 WARN_ON(root->ref_cows && trans->transid != 969 root->fs_info->running_transaction->transid); 970 WARN_ON(root->ref_cows && trans->transid != root->last_trans); 971 972 level = btrfs_header_level(buf); 973 974 if (level == 0) 975 btrfs_item_key(buf, &disk_key, 0); 976 else 977 btrfs_node_key(buf, &disk_key, 0); 978 979 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 980 if (parent) 981 parent_start = parent->start; 982 else 983 parent_start = 0; 984 } else 985 parent_start = 0; 986 987 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, 988 root->root_key.objectid, &disk_key, 989 level, search_start, empty_size); 990 if (IS_ERR(cow)) 991 return PTR_ERR(cow); 992 993 /* cow is set to blocking by btrfs_init_new_buffer */ 994 995 copy_extent_buffer(cow, buf, 0, 0, cow->len); 996 btrfs_set_header_bytenr(cow, cow->start); 997 btrfs_set_header_generation(cow, trans->transid); 998 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 999 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 1000 BTRFS_HEADER_FLAG_RELOC); 1001 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1002 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 1003 else 1004 btrfs_set_header_owner(cow, root->root_key.objectid); 1005 1006 write_extent_buffer(cow, root->fs_info->fsid, 1007 (unsigned long)btrfs_header_fsid(cow), 1008 BTRFS_FSID_SIZE); 1009 1010 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 1011 if (ret) { 1012 btrfs_abort_transaction(trans, root, ret); 1013 return ret; 1014 } 1015 1016 if (root->ref_cows) 1017 btrfs_reloc_cow_block(trans, root, buf, cow); 1018 1019 if (buf == root->node) { 1020 WARN_ON(parent && parent != buf); 1021 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 1022 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 1023 parent_start = buf->start; 1024 else 1025 parent_start = 0; 1026 1027 extent_buffer_get(cow); 1028 tree_mod_log_set_root_pointer(root, cow); 1029 rcu_assign_pointer(root->node, cow); 1030 1031 btrfs_free_tree_block(trans, root, buf, parent_start, 1032 last_ref); 1033 free_extent_buffer(buf); 1034 add_root_to_dirty_list(root); 1035 } else { 1036 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1037 parent_start = parent->start; 1038 else 1039 parent_start = 0; 1040 1041 WARN_ON(trans->transid != btrfs_header_generation(parent)); 1042 tree_mod_log_insert_key(root->fs_info, parent, parent_slot, 1043 MOD_LOG_KEY_REPLACE); 1044 btrfs_set_node_blockptr(parent, parent_slot, 1045 cow->start); 1046 btrfs_set_node_ptr_generation(parent, parent_slot, 1047 trans->transid); 1048 btrfs_mark_buffer_dirty(parent); 1049 btrfs_free_tree_block(trans, root, buf, parent_start, 1050 last_ref); 1051 } 1052 if (unlock_orig) 1053 btrfs_tree_unlock(buf); 1054 free_extent_buffer_stale(buf); 1055 btrfs_mark_buffer_dirty(cow); 1056 *cow_ret = cow; 1057 return 0; 1058 } 1059 1060 /* 1061 * returns the logical address of the oldest predecessor of the given root. 1062 * entries older than time_seq are ignored. 1063 */ 1064 static struct tree_mod_elem * 1065 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, 1066 struct btrfs_root *root, u64 time_seq) 1067 { 1068 struct tree_mod_elem *tm; 1069 struct tree_mod_elem *found = NULL; 1070 u64 root_logical = root->node->start; 1071 int looped = 0; 1072 1073 if (!time_seq) 1074 return 0; 1075 1076 /* 1077 * the very last operation that's logged for a root is the replacement 1078 * operation (if it is replaced at all). this has the index of the *new* 1079 * root, making it the very first operation that's logged for this root. 1080 */ 1081 while (1) { 1082 tm = tree_mod_log_search_oldest(fs_info, root_logical, 1083 time_seq); 1084 if (!looped && !tm) 1085 return 0; 1086 /* 1087 * if there are no tree operation for the oldest root, we simply 1088 * return it. this should only happen if that (old) root is at 1089 * level 0. 1090 */ 1091 if (!tm) 1092 break; 1093 1094 /* 1095 * if there's an operation that's not a root replacement, we 1096 * found the oldest version of our root. normally, we'll find a 1097 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. 1098 */ 1099 if (tm->op != MOD_LOG_ROOT_REPLACE) 1100 break; 1101 1102 found = tm; 1103 root_logical = tm->old_root.logical; 1104 BUG_ON(root_logical == root->node->start); 1105 looped = 1; 1106 } 1107 1108 /* if there's no old root to return, return what we found instead */ 1109 if (!found) 1110 found = tm; 1111 1112 return found; 1113 } 1114 1115 /* 1116 * tm is a pointer to the first operation to rewind within eb. then, all 1117 * previous operations will be rewinded (until we reach something older than 1118 * time_seq). 1119 */ 1120 static void 1121 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, 1122 struct tree_mod_elem *first_tm) 1123 { 1124 u32 n; 1125 struct rb_node *next; 1126 struct tree_mod_elem *tm = first_tm; 1127 unsigned long o_dst; 1128 unsigned long o_src; 1129 unsigned long p_size = sizeof(struct btrfs_key_ptr); 1130 1131 n = btrfs_header_nritems(eb); 1132 while (tm && tm->seq >= time_seq) { 1133 /* 1134 * all the operations are recorded with the operator used for 1135 * the modification. as we're going backwards, we do the 1136 * opposite of each operation here. 1137 */ 1138 switch (tm->op) { 1139 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1140 BUG_ON(tm->slot < n); 1141 /* Fallthrough */ 1142 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1143 case MOD_LOG_KEY_REMOVE: 1144 btrfs_set_node_key(eb, &tm->key, tm->slot); 1145 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1146 btrfs_set_node_ptr_generation(eb, tm->slot, 1147 tm->generation); 1148 n++; 1149 break; 1150 case MOD_LOG_KEY_REPLACE: 1151 BUG_ON(tm->slot >= n); 1152 btrfs_set_node_key(eb, &tm->key, tm->slot); 1153 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1154 btrfs_set_node_ptr_generation(eb, tm->slot, 1155 tm->generation); 1156 break; 1157 case MOD_LOG_KEY_ADD: 1158 /* if a move operation is needed it's in the log */ 1159 n--; 1160 break; 1161 case MOD_LOG_MOVE_KEYS: 1162 o_dst = btrfs_node_key_ptr_offset(tm->slot); 1163 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); 1164 memmove_extent_buffer(eb, o_dst, o_src, 1165 tm->move.nr_items * p_size); 1166 break; 1167 case MOD_LOG_ROOT_REPLACE: 1168 /* 1169 * this operation is special. for roots, this must be 1170 * handled explicitly before rewinding. 1171 * for non-roots, this operation may exist if the node 1172 * was a root: root A -> child B; then A gets empty and 1173 * B is promoted to the new root. in the mod log, we'll 1174 * have a root-replace operation for B, a tree block 1175 * that is no root. we simply ignore that operation. 1176 */ 1177 break; 1178 } 1179 next = rb_next(&tm->node); 1180 if (!next) 1181 break; 1182 tm = container_of(next, struct tree_mod_elem, node); 1183 if (tm->index != first_tm->index) 1184 break; 1185 } 1186 btrfs_set_header_nritems(eb, n); 1187 } 1188 1189 static struct extent_buffer * 1190 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, 1191 u64 time_seq) 1192 { 1193 struct extent_buffer *eb_rewin; 1194 struct tree_mod_elem *tm; 1195 1196 if (!time_seq) 1197 return eb; 1198 1199 if (btrfs_header_level(eb) == 0) 1200 return eb; 1201 1202 tm = tree_mod_log_search(fs_info, eb->start, time_seq); 1203 if (!tm) 1204 return eb; 1205 1206 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1207 BUG_ON(tm->slot != 0); 1208 eb_rewin = alloc_dummy_extent_buffer(eb->start, 1209 fs_info->tree_root->nodesize); 1210 BUG_ON(!eb_rewin); 1211 btrfs_set_header_bytenr(eb_rewin, eb->start); 1212 btrfs_set_header_backref_rev(eb_rewin, 1213 btrfs_header_backref_rev(eb)); 1214 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); 1215 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); 1216 } else { 1217 eb_rewin = btrfs_clone_extent_buffer(eb); 1218 BUG_ON(!eb_rewin); 1219 } 1220 1221 extent_buffer_get(eb_rewin); 1222 free_extent_buffer(eb); 1223 1224 __tree_mod_log_rewind(eb_rewin, time_seq, tm); 1225 WARN_ON(btrfs_header_nritems(eb_rewin) > 1226 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root)); 1227 1228 return eb_rewin; 1229 } 1230 1231 /* 1232 * get_old_root() rewinds the state of @root's root node to the given @time_seq 1233 * value. If there are no changes, the current root->root_node is returned. If 1234 * anything changed in between, there's a fresh buffer allocated on which the 1235 * rewind operations are done. In any case, the returned buffer is read locked. 1236 * Returns NULL on error (with no locks held). 1237 */ 1238 static inline struct extent_buffer * 1239 get_old_root(struct btrfs_root *root, u64 time_seq) 1240 { 1241 struct tree_mod_elem *tm; 1242 struct extent_buffer *eb; 1243 struct extent_buffer *old; 1244 struct tree_mod_root *old_root = NULL; 1245 u64 old_generation = 0; 1246 u64 logical; 1247 u32 blocksize; 1248 1249 eb = btrfs_read_lock_root_node(root); 1250 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); 1251 if (!tm) 1252 return root->node; 1253 1254 if (tm->op == MOD_LOG_ROOT_REPLACE) { 1255 old_root = &tm->old_root; 1256 old_generation = tm->generation; 1257 logical = old_root->logical; 1258 } else { 1259 logical = root->node->start; 1260 } 1261 1262 tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1263 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1264 btrfs_tree_read_unlock(root->node); 1265 free_extent_buffer(root->node); 1266 blocksize = btrfs_level_size(root, old_root->level); 1267 old = read_tree_block(root, logical, blocksize, 0); 1268 if (!old) { 1269 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n", 1270 logical); 1271 WARN_ON(1); 1272 } else { 1273 eb = btrfs_clone_extent_buffer(old); 1274 free_extent_buffer(old); 1275 } 1276 } else if (old_root) { 1277 btrfs_tree_read_unlock(root->node); 1278 free_extent_buffer(root->node); 1279 eb = alloc_dummy_extent_buffer(logical, root->nodesize); 1280 } else { 1281 eb = btrfs_clone_extent_buffer(root->node); 1282 btrfs_tree_read_unlock(root->node); 1283 free_extent_buffer(root->node); 1284 } 1285 1286 if (!eb) 1287 return NULL; 1288 extent_buffer_get(eb); 1289 btrfs_tree_read_lock(eb); 1290 if (old_root) { 1291 btrfs_set_header_bytenr(eb, eb->start); 1292 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); 1293 btrfs_set_header_owner(eb, root->root_key.objectid); 1294 btrfs_set_header_level(eb, old_root->level); 1295 btrfs_set_header_generation(eb, old_generation); 1296 } 1297 if (tm) 1298 __tree_mod_log_rewind(eb, time_seq, tm); 1299 else 1300 WARN_ON(btrfs_header_level(eb) != 0); 1301 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); 1302 1303 return eb; 1304 } 1305 1306 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) 1307 { 1308 struct tree_mod_elem *tm; 1309 int level; 1310 1311 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); 1312 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { 1313 level = tm->old_root.level; 1314 } else { 1315 rcu_read_lock(); 1316 level = btrfs_header_level(root->node); 1317 rcu_read_unlock(); 1318 } 1319 1320 return level; 1321 } 1322 1323 static inline int should_cow_block(struct btrfs_trans_handle *trans, 1324 struct btrfs_root *root, 1325 struct extent_buffer *buf) 1326 { 1327 /* ensure we can see the force_cow */ 1328 smp_rmb(); 1329 1330 /* 1331 * We do not need to cow a block if 1332 * 1) this block is not created or changed in this transaction; 1333 * 2) this block does not belong to TREE_RELOC tree; 1334 * 3) the root is not forced COW. 1335 * 1336 * What is forced COW: 1337 * when we create snapshot during commiting the transaction, 1338 * after we've finished coping src root, we must COW the shared 1339 * block to ensure the metadata consistency. 1340 */ 1341 if (btrfs_header_generation(buf) == trans->transid && 1342 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 1343 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 1344 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 1345 !root->force_cow) 1346 return 0; 1347 return 1; 1348 } 1349 1350 /* 1351 * cows a single block, see __btrfs_cow_block for the real work. 1352 * This version of it has extra checks so that a block isn't cow'd more than 1353 * once per transaction, as long as it hasn't been written yet 1354 */ 1355 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 1356 struct btrfs_root *root, struct extent_buffer *buf, 1357 struct extent_buffer *parent, int parent_slot, 1358 struct extent_buffer **cow_ret) 1359 { 1360 u64 search_start; 1361 int ret; 1362 1363 if (trans->transaction != root->fs_info->running_transaction) 1364 WARN(1, KERN_CRIT "trans %llu running %llu\n", 1365 (unsigned long long)trans->transid, 1366 (unsigned long long) 1367 root->fs_info->running_transaction->transid); 1368 1369 if (trans->transid != root->fs_info->generation) 1370 WARN(1, KERN_CRIT "trans %llu running %llu\n", 1371 (unsigned long long)trans->transid, 1372 (unsigned long long)root->fs_info->generation); 1373 1374 if (!should_cow_block(trans, root, buf)) { 1375 *cow_ret = buf; 1376 return 0; 1377 } 1378 1379 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 1380 1381 if (parent) 1382 btrfs_set_lock_blocking(parent); 1383 btrfs_set_lock_blocking(buf); 1384 1385 ret = __btrfs_cow_block(trans, root, buf, parent, 1386 parent_slot, cow_ret, search_start, 0); 1387 1388 trace_btrfs_cow_block(root, buf, *cow_ret); 1389 1390 return ret; 1391 } 1392 1393 /* 1394 * helper function for defrag to decide if two blocks pointed to by a 1395 * node are actually close by 1396 */ 1397 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 1398 { 1399 if (blocknr < other && other - (blocknr + blocksize) < 32768) 1400 return 1; 1401 if (blocknr > other && blocknr - (other + blocksize) < 32768) 1402 return 1; 1403 return 0; 1404 } 1405 1406 /* 1407 * compare two keys in a memcmp fashion 1408 */ 1409 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) 1410 { 1411 struct btrfs_key k1; 1412 1413 btrfs_disk_key_to_cpu(&k1, disk); 1414 1415 return btrfs_comp_cpu_keys(&k1, k2); 1416 } 1417 1418 /* 1419 * same as comp_keys only with two btrfs_key's 1420 */ 1421 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) 1422 { 1423 if (k1->objectid > k2->objectid) 1424 return 1; 1425 if (k1->objectid < k2->objectid) 1426 return -1; 1427 if (k1->type > k2->type) 1428 return 1; 1429 if (k1->type < k2->type) 1430 return -1; 1431 if (k1->offset > k2->offset) 1432 return 1; 1433 if (k1->offset < k2->offset) 1434 return -1; 1435 return 0; 1436 } 1437 1438 /* 1439 * this is used by the defrag code to go through all the 1440 * leaves pointed to by a node and reallocate them so that 1441 * disk order is close to key order 1442 */ 1443 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 1444 struct btrfs_root *root, struct extent_buffer *parent, 1445 int start_slot, u64 *last_ret, 1446 struct btrfs_key *progress) 1447 { 1448 struct extent_buffer *cur; 1449 u64 blocknr; 1450 u64 gen; 1451 u64 search_start = *last_ret; 1452 u64 last_block = 0; 1453 u64 other; 1454 u32 parent_nritems; 1455 int end_slot; 1456 int i; 1457 int err = 0; 1458 int parent_level; 1459 int uptodate; 1460 u32 blocksize; 1461 int progress_passed = 0; 1462 struct btrfs_disk_key disk_key; 1463 1464 parent_level = btrfs_header_level(parent); 1465 1466 WARN_ON(trans->transaction != root->fs_info->running_transaction); 1467 WARN_ON(trans->transid != root->fs_info->generation); 1468 1469 parent_nritems = btrfs_header_nritems(parent); 1470 blocksize = btrfs_level_size(root, parent_level - 1); 1471 end_slot = parent_nritems; 1472 1473 if (parent_nritems == 1) 1474 return 0; 1475 1476 btrfs_set_lock_blocking(parent); 1477 1478 for (i = start_slot; i < end_slot; i++) { 1479 int close = 1; 1480 1481 btrfs_node_key(parent, &disk_key, i); 1482 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 1483 continue; 1484 1485 progress_passed = 1; 1486 blocknr = btrfs_node_blockptr(parent, i); 1487 gen = btrfs_node_ptr_generation(parent, i); 1488 if (last_block == 0) 1489 last_block = blocknr; 1490 1491 if (i > 0) { 1492 other = btrfs_node_blockptr(parent, i - 1); 1493 close = close_blocks(blocknr, other, blocksize); 1494 } 1495 if (!close && i < end_slot - 2) { 1496 other = btrfs_node_blockptr(parent, i + 1); 1497 close = close_blocks(blocknr, other, blocksize); 1498 } 1499 if (close) { 1500 last_block = blocknr; 1501 continue; 1502 } 1503 1504 cur = btrfs_find_tree_block(root, blocknr, blocksize); 1505 if (cur) 1506 uptodate = btrfs_buffer_uptodate(cur, gen, 0); 1507 else 1508 uptodate = 0; 1509 if (!cur || !uptodate) { 1510 if (!cur) { 1511 cur = read_tree_block(root, blocknr, 1512 blocksize, gen); 1513 if (!cur) 1514 return -EIO; 1515 } else if (!uptodate) { 1516 err = btrfs_read_buffer(cur, gen); 1517 if (err) { 1518 free_extent_buffer(cur); 1519 return err; 1520 } 1521 } 1522 } 1523 if (search_start == 0) 1524 search_start = last_block; 1525 1526 btrfs_tree_lock(cur); 1527 btrfs_set_lock_blocking(cur); 1528 err = __btrfs_cow_block(trans, root, cur, parent, i, 1529 &cur, search_start, 1530 min(16 * blocksize, 1531 (end_slot - i) * blocksize)); 1532 if (err) { 1533 btrfs_tree_unlock(cur); 1534 free_extent_buffer(cur); 1535 break; 1536 } 1537 search_start = cur->start; 1538 last_block = cur->start; 1539 *last_ret = search_start; 1540 btrfs_tree_unlock(cur); 1541 free_extent_buffer(cur); 1542 } 1543 return err; 1544 } 1545 1546 /* 1547 * The leaf data grows from end-to-front in the node. 1548 * this returns the address of the start of the last item, 1549 * which is the stop of the leaf data stack 1550 */ 1551 static inline unsigned int leaf_data_end(struct btrfs_root *root, 1552 struct extent_buffer *leaf) 1553 { 1554 u32 nr = btrfs_header_nritems(leaf); 1555 if (nr == 0) 1556 return BTRFS_LEAF_DATA_SIZE(root); 1557 return btrfs_item_offset_nr(leaf, nr - 1); 1558 } 1559 1560 1561 /* 1562 * search for key in the extent_buffer. The items start at offset p, 1563 * and they are item_size apart. There are 'max' items in p. 1564 * 1565 * the slot in the array is returned via slot, and it points to 1566 * the place where you would insert key if it is not found in 1567 * the array. 1568 * 1569 * slot may point to max if the key is bigger than all of the keys 1570 */ 1571 static noinline int generic_bin_search(struct extent_buffer *eb, 1572 unsigned long p, 1573 int item_size, struct btrfs_key *key, 1574 int max, int *slot) 1575 { 1576 int low = 0; 1577 int high = max; 1578 int mid; 1579 int ret; 1580 struct btrfs_disk_key *tmp = NULL; 1581 struct btrfs_disk_key unaligned; 1582 unsigned long offset; 1583 char *kaddr = NULL; 1584 unsigned long map_start = 0; 1585 unsigned long map_len = 0; 1586 int err; 1587 1588 while (low < high) { 1589 mid = (low + high) / 2; 1590 offset = p + mid * item_size; 1591 1592 if (!kaddr || offset < map_start || 1593 (offset + sizeof(struct btrfs_disk_key)) > 1594 map_start + map_len) { 1595 1596 err = map_private_extent_buffer(eb, offset, 1597 sizeof(struct btrfs_disk_key), 1598 &kaddr, &map_start, &map_len); 1599 1600 if (!err) { 1601 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1602 map_start); 1603 } else { 1604 read_extent_buffer(eb, &unaligned, 1605 offset, sizeof(unaligned)); 1606 tmp = &unaligned; 1607 } 1608 1609 } else { 1610 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1611 map_start); 1612 } 1613 ret = comp_keys(tmp, key); 1614 1615 if (ret < 0) 1616 low = mid + 1; 1617 else if (ret > 0) 1618 high = mid; 1619 else { 1620 *slot = mid; 1621 return 0; 1622 } 1623 } 1624 *slot = low; 1625 return 1; 1626 } 1627 1628 /* 1629 * simple bin_search frontend that does the right thing for 1630 * leaves vs nodes 1631 */ 1632 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1633 int level, int *slot) 1634 { 1635 if (level == 0) 1636 return generic_bin_search(eb, 1637 offsetof(struct btrfs_leaf, items), 1638 sizeof(struct btrfs_item), 1639 key, btrfs_header_nritems(eb), 1640 slot); 1641 else 1642 return generic_bin_search(eb, 1643 offsetof(struct btrfs_node, ptrs), 1644 sizeof(struct btrfs_key_ptr), 1645 key, btrfs_header_nritems(eb), 1646 slot); 1647 } 1648 1649 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1650 int level, int *slot) 1651 { 1652 return bin_search(eb, key, level, slot); 1653 } 1654 1655 static void root_add_used(struct btrfs_root *root, u32 size) 1656 { 1657 spin_lock(&root->accounting_lock); 1658 btrfs_set_root_used(&root->root_item, 1659 btrfs_root_used(&root->root_item) + size); 1660 spin_unlock(&root->accounting_lock); 1661 } 1662 1663 static void root_sub_used(struct btrfs_root *root, u32 size) 1664 { 1665 spin_lock(&root->accounting_lock); 1666 btrfs_set_root_used(&root->root_item, 1667 btrfs_root_used(&root->root_item) - size); 1668 spin_unlock(&root->accounting_lock); 1669 } 1670 1671 /* given a node and slot number, this reads the blocks it points to. The 1672 * extent buffer is returned with a reference taken (but unlocked). 1673 * NULL is returned on error. 1674 */ 1675 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, 1676 struct extent_buffer *parent, int slot) 1677 { 1678 int level = btrfs_header_level(parent); 1679 if (slot < 0) 1680 return NULL; 1681 if (slot >= btrfs_header_nritems(parent)) 1682 return NULL; 1683 1684 BUG_ON(level == 0); 1685 1686 return read_tree_block(root, btrfs_node_blockptr(parent, slot), 1687 btrfs_level_size(root, level - 1), 1688 btrfs_node_ptr_generation(parent, slot)); 1689 } 1690 1691 /* 1692 * node level balancing, used to make sure nodes are in proper order for 1693 * item deletion. We balance from the top down, so we have to make sure 1694 * that a deletion won't leave an node completely empty later on. 1695 */ 1696 static noinline int balance_level(struct btrfs_trans_handle *trans, 1697 struct btrfs_root *root, 1698 struct btrfs_path *path, int level) 1699 { 1700 struct extent_buffer *right = NULL; 1701 struct extent_buffer *mid; 1702 struct extent_buffer *left = NULL; 1703 struct extent_buffer *parent = NULL; 1704 int ret = 0; 1705 int wret; 1706 int pslot; 1707 int orig_slot = path->slots[level]; 1708 u64 orig_ptr; 1709 1710 if (level == 0) 1711 return 0; 1712 1713 mid = path->nodes[level]; 1714 1715 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && 1716 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); 1717 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1718 1719 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1720 1721 if (level < BTRFS_MAX_LEVEL - 1) { 1722 parent = path->nodes[level + 1]; 1723 pslot = path->slots[level + 1]; 1724 } 1725 1726 /* 1727 * deal with the case where there is only one pointer in the root 1728 * by promoting the node below to a root 1729 */ 1730 if (!parent) { 1731 struct extent_buffer *child; 1732 1733 if (btrfs_header_nritems(mid) != 1) 1734 return 0; 1735 1736 /* promote the child to a root */ 1737 child = read_node_slot(root, mid, 0); 1738 if (!child) { 1739 ret = -EROFS; 1740 btrfs_std_error(root->fs_info, ret); 1741 goto enospc; 1742 } 1743 1744 btrfs_tree_lock(child); 1745 btrfs_set_lock_blocking(child); 1746 ret = btrfs_cow_block(trans, root, child, mid, 0, &child); 1747 if (ret) { 1748 btrfs_tree_unlock(child); 1749 free_extent_buffer(child); 1750 goto enospc; 1751 } 1752 1753 tree_mod_log_free_eb(root->fs_info, root->node); 1754 tree_mod_log_set_root_pointer(root, child); 1755 rcu_assign_pointer(root->node, child); 1756 1757 add_root_to_dirty_list(root); 1758 btrfs_tree_unlock(child); 1759 1760 path->locks[level] = 0; 1761 path->nodes[level] = NULL; 1762 clean_tree_block(trans, root, mid); 1763 btrfs_tree_unlock(mid); 1764 /* once for the path */ 1765 free_extent_buffer(mid); 1766 1767 root_sub_used(root, mid->len); 1768 btrfs_free_tree_block(trans, root, mid, 0, 1); 1769 /* once for the root ptr */ 1770 free_extent_buffer_stale(mid); 1771 return 0; 1772 } 1773 if (btrfs_header_nritems(mid) > 1774 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1775 return 0; 1776 1777 left = read_node_slot(root, parent, pslot - 1); 1778 if (left) { 1779 btrfs_tree_lock(left); 1780 btrfs_set_lock_blocking(left); 1781 wret = btrfs_cow_block(trans, root, left, 1782 parent, pslot - 1, &left); 1783 if (wret) { 1784 ret = wret; 1785 goto enospc; 1786 } 1787 } 1788 right = read_node_slot(root, parent, pslot + 1); 1789 if (right) { 1790 btrfs_tree_lock(right); 1791 btrfs_set_lock_blocking(right); 1792 wret = btrfs_cow_block(trans, root, right, 1793 parent, pslot + 1, &right); 1794 if (wret) { 1795 ret = wret; 1796 goto enospc; 1797 } 1798 } 1799 1800 /* first, try to make some room in the middle buffer */ 1801 if (left) { 1802 orig_slot += btrfs_header_nritems(left); 1803 wret = push_node_left(trans, root, left, mid, 1); 1804 if (wret < 0) 1805 ret = wret; 1806 } 1807 1808 /* 1809 * then try to empty the right most buffer into the middle 1810 */ 1811 if (right) { 1812 wret = push_node_left(trans, root, mid, right, 1); 1813 if (wret < 0 && wret != -ENOSPC) 1814 ret = wret; 1815 if (btrfs_header_nritems(right) == 0) { 1816 clean_tree_block(trans, root, right); 1817 btrfs_tree_unlock(right); 1818 del_ptr(trans, root, path, level + 1, pslot + 1); 1819 root_sub_used(root, right->len); 1820 btrfs_free_tree_block(trans, root, right, 0, 1); 1821 free_extent_buffer_stale(right); 1822 right = NULL; 1823 } else { 1824 struct btrfs_disk_key right_key; 1825 btrfs_node_key(right, &right_key, 0); 1826 tree_mod_log_set_node_key(root->fs_info, parent, 1827 pslot + 1, 0); 1828 btrfs_set_node_key(parent, &right_key, pslot + 1); 1829 btrfs_mark_buffer_dirty(parent); 1830 } 1831 } 1832 if (btrfs_header_nritems(mid) == 1) { 1833 /* 1834 * we're not allowed to leave a node with one item in the 1835 * tree during a delete. A deletion from lower in the tree 1836 * could try to delete the only pointer in this node. 1837 * So, pull some keys from the left. 1838 * There has to be a left pointer at this point because 1839 * otherwise we would have pulled some pointers from the 1840 * right 1841 */ 1842 if (!left) { 1843 ret = -EROFS; 1844 btrfs_std_error(root->fs_info, ret); 1845 goto enospc; 1846 } 1847 wret = balance_node_right(trans, root, mid, left); 1848 if (wret < 0) { 1849 ret = wret; 1850 goto enospc; 1851 } 1852 if (wret == 1) { 1853 wret = push_node_left(trans, root, left, mid, 1); 1854 if (wret < 0) 1855 ret = wret; 1856 } 1857 BUG_ON(wret == 1); 1858 } 1859 if (btrfs_header_nritems(mid) == 0) { 1860 clean_tree_block(trans, root, mid); 1861 btrfs_tree_unlock(mid); 1862 del_ptr(trans, root, path, level + 1, pslot); 1863 root_sub_used(root, mid->len); 1864 btrfs_free_tree_block(trans, root, mid, 0, 1); 1865 free_extent_buffer_stale(mid); 1866 mid = NULL; 1867 } else { 1868 /* update the parent key to reflect our changes */ 1869 struct btrfs_disk_key mid_key; 1870 btrfs_node_key(mid, &mid_key, 0); 1871 tree_mod_log_set_node_key(root->fs_info, parent, 1872 pslot, 0); 1873 btrfs_set_node_key(parent, &mid_key, pslot); 1874 btrfs_mark_buffer_dirty(parent); 1875 } 1876 1877 /* update the path */ 1878 if (left) { 1879 if (btrfs_header_nritems(left) > orig_slot) { 1880 extent_buffer_get(left); 1881 /* left was locked after cow */ 1882 path->nodes[level] = left; 1883 path->slots[level + 1] -= 1; 1884 path->slots[level] = orig_slot; 1885 if (mid) { 1886 btrfs_tree_unlock(mid); 1887 free_extent_buffer(mid); 1888 } 1889 } else { 1890 orig_slot -= btrfs_header_nritems(left); 1891 path->slots[level] = orig_slot; 1892 } 1893 } 1894 /* double check we haven't messed things up */ 1895 if (orig_ptr != 1896 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1897 BUG(); 1898 enospc: 1899 if (right) { 1900 btrfs_tree_unlock(right); 1901 free_extent_buffer(right); 1902 } 1903 if (left) { 1904 if (path->nodes[level] != left) 1905 btrfs_tree_unlock(left); 1906 free_extent_buffer(left); 1907 } 1908 return ret; 1909 } 1910 1911 /* Node balancing for insertion. Here we only split or push nodes around 1912 * when they are completely full. This is also done top down, so we 1913 * have to be pessimistic. 1914 */ 1915 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1916 struct btrfs_root *root, 1917 struct btrfs_path *path, int level) 1918 { 1919 struct extent_buffer *right = NULL; 1920 struct extent_buffer *mid; 1921 struct extent_buffer *left = NULL; 1922 struct extent_buffer *parent = NULL; 1923 int ret = 0; 1924 int wret; 1925 int pslot; 1926 int orig_slot = path->slots[level]; 1927 1928 if (level == 0) 1929 return 1; 1930 1931 mid = path->nodes[level]; 1932 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1933 1934 if (level < BTRFS_MAX_LEVEL - 1) { 1935 parent = path->nodes[level + 1]; 1936 pslot = path->slots[level + 1]; 1937 } 1938 1939 if (!parent) 1940 return 1; 1941 1942 left = read_node_slot(root, parent, pslot - 1); 1943 1944 /* first, try to make some room in the middle buffer */ 1945 if (left) { 1946 u32 left_nr; 1947 1948 btrfs_tree_lock(left); 1949 btrfs_set_lock_blocking(left); 1950 1951 left_nr = btrfs_header_nritems(left); 1952 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1953 wret = 1; 1954 } else { 1955 ret = btrfs_cow_block(trans, root, left, parent, 1956 pslot - 1, &left); 1957 if (ret) 1958 wret = 1; 1959 else { 1960 wret = push_node_left(trans, root, 1961 left, mid, 0); 1962 } 1963 } 1964 if (wret < 0) 1965 ret = wret; 1966 if (wret == 0) { 1967 struct btrfs_disk_key disk_key; 1968 orig_slot += left_nr; 1969 btrfs_node_key(mid, &disk_key, 0); 1970 tree_mod_log_set_node_key(root->fs_info, parent, 1971 pslot, 0); 1972 btrfs_set_node_key(parent, &disk_key, pslot); 1973 btrfs_mark_buffer_dirty(parent); 1974 if (btrfs_header_nritems(left) > orig_slot) { 1975 path->nodes[level] = left; 1976 path->slots[level + 1] -= 1; 1977 path->slots[level] = orig_slot; 1978 btrfs_tree_unlock(mid); 1979 free_extent_buffer(mid); 1980 } else { 1981 orig_slot -= 1982 btrfs_header_nritems(left); 1983 path->slots[level] = orig_slot; 1984 btrfs_tree_unlock(left); 1985 free_extent_buffer(left); 1986 } 1987 return 0; 1988 } 1989 btrfs_tree_unlock(left); 1990 free_extent_buffer(left); 1991 } 1992 right = read_node_slot(root, parent, pslot + 1); 1993 1994 /* 1995 * then try to empty the right most buffer into the middle 1996 */ 1997 if (right) { 1998 u32 right_nr; 1999 2000 btrfs_tree_lock(right); 2001 btrfs_set_lock_blocking(right); 2002 2003 right_nr = btrfs_header_nritems(right); 2004 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 2005 wret = 1; 2006 } else { 2007 ret = btrfs_cow_block(trans, root, right, 2008 parent, pslot + 1, 2009 &right); 2010 if (ret) 2011 wret = 1; 2012 else { 2013 wret = balance_node_right(trans, root, 2014 right, mid); 2015 } 2016 } 2017 if (wret < 0) 2018 ret = wret; 2019 if (wret == 0) { 2020 struct btrfs_disk_key disk_key; 2021 2022 btrfs_node_key(right, &disk_key, 0); 2023 tree_mod_log_set_node_key(root->fs_info, parent, 2024 pslot + 1, 0); 2025 btrfs_set_node_key(parent, &disk_key, pslot + 1); 2026 btrfs_mark_buffer_dirty(parent); 2027 2028 if (btrfs_header_nritems(mid) <= orig_slot) { 2029 path->nodes[level] = right; 2030 path->slots[level + 1] += 1; 2031 path->slots[level] = orig_slot - 2032 btrfs_header_nritems(mid); 2033 btrfs_tree_unlock(mid); 2034 free_extent_buffer(mid); 2035 } else { 2036 btrfs_tree_unlock(right); 2037 free_extent_buffer(right); 2038 } 2039 return 0; 2040 } 2041 btrfs_tree_unlock(right); 2042 free_extent_buffer(right); 2043 } 2044 return 1; 2045 } 2046 2047 /* 2048 * readahead one full node of leaves, finding things that are close 2049 * to the block in 'slot', and triggering ra on them. 2050 */ 2051 static void reada_for_search(struct btrfs_root *root, 2052 struct btrfs_path *path, 2053 int level, int slot, u64 objectid) 2054 { 2055 struct extent_buffer *node; 2056 struct btrfs_disk_key disk_key; 2057 u32 nritems; 2058 u64 search; 2059 u64 target; 2060 u64 nread = 0; 2061 u64 gen; 2062 int direction = path->reada; 2063 struct extent_buffer *eb; 2064 u32 nr; 2065 u32 blocksize; 2066 u32 nscan = 0; 2067 2068 if (level != 1) 2069 return; 2070 2071 if (!path->nodes[level]) 2072 return; 2073 2074 node = path->nodes[level]; 2075 2076 search = btrfs_node_blockptr(node, slot); 2077 blocksize = btrfs_level_size(root, level - 1); 2078 eb = btrfs_find_tree_block(root, search, blocksize); 2079 if (eb) { 2080 free_extent_buffer(eb); 2081 return; 2082 } 2083 2084 target = search; 2085 2086 nritems = btrfs_header_nritems(node); 2087 nr = slot; 2088 2089 while (1) { 2090 if (direction < 0) { 2091 if (nr == 0) 2092 break; 2093 nr--; 2094 } else if (direction > 0) { 2095 nr++; 2096 if (nr >= nritems) 2097 break; 2098 } 2099 if (path->reada < 0 && objectid) { 2100 btrfs_node_key(node, &disk_key, nr); 2101 if (btrfs_disk_key_objectid(&disk_key) != objectid) 2102 break; 2103 } 2104 search = btrfs_node_blockptr(node, nr); 2105 if ((search <= target && target - search <= 65536) || 2106 (search > target && search - target <= 65536)) { 2107 gen = btrfs_node_ptr_generation(node, nr); 2108 readahead_tree_block(root, search, blocksize, gen); 2109 nread += blocksize; 2110 } 2111 nscan++; 2112 if ((nread > 65536 || nscan > 32)) 2113 break; 2114 } 2115 } 2116 2117 /* 2118 * returns -EAGAIN if it had to drop the path, or zero if everything was in 2119 * cache 2120 */ 2121 static noinline int reada_for_balance(struct btrfs_root *root, 2122 struct btrfs_path *path, int level) 2123 { 2124 int slot; 2125 int nritems; 2126 struct extent_buffer *parent; 2127 struct extent_buffer *eb; 2128 u64 gen; 2129 u64 block1 = 0; 2130 u64 block2 = 0; 2131 int ret = 0; 2132 int blocksize; 2133 2134 parent = path->nodes[level + 1]; 2135 if (!parent) 2136 return 0; 2137 2138 nritems = btrfs_header_nritems(parent); 2139 slot = path->slots[level + 1]; 2140 blocksize = btrfs_level_size(root, level); 2141 2142 if (slot > 0) { 2143 block1 = btrfs_node_blockptr(parent, slot - 1); 2144 gen = btrfs_node_ptr_generation(parent, slot - 1); 2145 eb = btrfs_find_tree_block(root, block1, blocksize); 2146 /* 2147 * if we get -eagain from btrfs_buffer_uptodate, we 2148 * don't want to return eagain here. That will loop 2149 * forever 2150 */ 2151 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) 2152 block1 = 0; 2153 free_extent_buffer(eb); 2154 } 2155 if (slot + 1 < nritems) { 2156 block2 = btrfs_node_blockptr(parent, slot + 1); 2157 gen = btrfs_node_ptr_generation(parent, slot + 1); 2158 eb = btrfs_find_tree_block(root, block2, blocksize); 2159 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) 2160 block2 = 0; 2161 free_extent_buffer(eb); 2162 } 2163 if (block1 || block2) { 2164 ret = -EAGAIN; 2165 2166 /* release the whole path */ 2167 btrfs_release_path(path); 2168 2169 /* read the blocks */ 2170 if (block1) 2171 readahead_tree_block(root, block1, blocksize, 0); 2172 if (block2) 2173 readahead_tree_block(root, block2, blocksize, 0); 2174 2175 if (block1) { 2176 eb = read_tree_block(root, block1, blocksize, 0); 2177 free_extent_buffer(eb); 2178 } 2179 if (block2) { 2180 eb = read_tree_block(root, block2, blocksize, 0); 2181 free_extent_buffer(eb); 2182 } 2183 } 2184 return ret; 2185 } 2186 2187 2188 /* 2189 * when we walk down the tree, it is usually safe to unlock the higher layers 2190 * in the tree. The exceptions are when our path goes through slot 0, because 2191 * operations on the tree might require changing key pointers higher up in the 2192 * tree. 2193 * 2194 * callers might also have set path->keep_locks, which tells this code to keep 2195 * the lock if the path points to the last slot in the block. This is part of 2196 * walking through the tree, and selecting the next slot in the higher block. 2197 * 2198 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 2199 * if lowest_unlock is 1, level 0 won't be unlocked 2200 */ 2201 static noinline void unlock_up(struct btrfs_path *path, int level, 2202 int lowest_unlock, int min_write_lock_level, 2203 int *write_lock_level) 2204 { 2205 int i; 2206 int skip_level = level; 2207 int no_skips = 0; 2208 struct extent_buffer *t; 2209 2210 if (path->really_keep_locks) 2211 return; 2212 2213 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2214 if (!path->nodes[i]) 2215 break; 2216 if (!path->locks[i]) 2217 break; 2218 if (!no_skips && path->slots[i] == 0) { 2219 skip_level = i + 1; 2220 continue; 2221 } 2222 if (!no_skips && path->keep_locks) { 2223 u32 nritems; 2224 t = path->nodes[i]; 2225 nritems = btrfs_header_nritems(t); 2226 if (nritems < 1 || path->slots[i] >= nritems - 1) { 2227 skip_level = i + 1; 2228 continue; 2229 } 2230 } 2231 if (skip_level < i && i >= lowest_unlock) 2232 no_skips = 1; 2233 2234 t = path->nodes[i]; 2235 if (i >= lowest_unlock && i > skip_level && path->locks[i]) { 2236 btrfs_tree_unlock_rw(t, path->locks[i]); 2237 path->locks[i] = 0; 2238 if (write_lock_level && 2239 i > min_write_lock_level && 2240 i <= *write_lock_level) { 2241 *write_lock_level = i - 1; 2242 } 2243 } 2244 } 2245 } 2246 2247 /* 2248 * This releases any locks held in the path starting at level and 2249 * going all the way up to the root. 2250 * 2251 * btrfs_search_slot will keep the lock held on higher nodes in a few 2252 * corner cases, such as COW of the block at slot zero in the node. This 2253 * ignores those rules, and it should only be called when there are no 2254 * more updates to be done higher up in the tree. 2255 */ 2256 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) 2257 { 2258 int i; 2259 2260 if (path->keep_locks || path->really_keep_locks) 2261 return; 2262 2263 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2264 if (!path->nodes[i]) 2265 continue; 2266 if (!path->locks[i]) 2267 continue; 2268 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 2269 path->locks[i] = 0; 2270 } 2271 } 2272 2273 /* 2274 * helper function for btrfs_search_slot. The goal is to find a block 2275 * in cache without setting the path to blocking. If we find the block 2276 * we return zero and the path is unchanged. 2277 * 2278 * If we can't find the block, we set the path blocking and do some 2279 * reada. -EAGAIN is returned and the search must be repeated. 2280 */ 2281 static int 2282 read_block_for_search(struct btrfs_trans_handle *trans, 2283 struct btrfs_root *root, struct btrfs_path *p, 2284 struct extent_buffer **eb_ret, int level, int slot, 2285 struct btrfs_key *key, u64 time_seq) 2286 { 2287 u64 blocknr; 2288 u64 gen; 2289 u32 blocksize; 2290 struct extent_buffer *b = *eb_ret; 2291 struct extent_buffer *tmp; 2292 int ret; 2293 2294 blocknr = btrfs_node_blockptr(b, slot); 2295 gen = btrfs_node_ptr_generation(b, slot); 2296 blocksize = btrfs_level_size(root, level - 1); 2297 2298 tmp = btrfs_find_tree_block(root, blocknr, blocksize); 2299 if (tmp) { 2300 /* first we do an atomic uptodate check */ 2301 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) { 2302 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 2303 /* 2304 * we found an up to date block without 2305 * sleeping, return 2306 * right away 2307 */ 2308 *eb_ret = tmp; 2309 return 0; 2310 } 2311 /* the pages were up to date, but we failed 2312 * the generation number check. Do a full 2313 * read for the generation number that is correct. 2314 * We must do this without dropping locks so 2315 * we can trust our generation number 2316 */ 2317 free_extent_buffer(tmp); 2318 btrfs_set_path_blocking(p); 2319 2320 /* now we're allowed to do a blocking uptodate check */ 2321 tmp = read_tree_block(root, blocknr, blocksize, gen); 2322 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) { 2323 *eb_ret = tmp; 2324 return 0; 2325 } 2326 free_extent_buffer(tmp); 2327 btrfs_release_path(p); 2328 return -EIO; 2329 } 2330 } 2331 2332 /* 2333 * reduce lock contention at high levels 2334 * of the btree by dropping locks before 2335 * we read. Don't release the lock on the current 2336 * level because we need to walk this node to figure 2337 * out which blocks to read. 2338 */ 2339 btrfs_unlock_up_safe(p, level + 1); 2340 btrfs_set_path_blocking(p); 2341 2342 free_extent_buffer(tmp); 2343 if (p->reada) 2344 reada_for_search(root, p, level, slot, key->objectid); 2345 2346 btrfs_release_path(p); 2347 2348 ret = -EAGAIN; 2349 tmp = read_tree_block(root, blocknr, blocksize, 0); 2350 if (tmp) { 2351 /* 2352 * If the read above didn't mark this buffer up to date, 2353 * it will never end up being up to date. Set ret to EIO now 2354 * and give up so that our caller doesn't loop forever 2355 * on our EAGAINs. 2356 */ 2357 if (!btrfs_buffer_uptodate(tmp, 0, 0)) 2358 ret = -EIO; 2359 free_extent_buffer(tmp); 2360 } 2361 return ret; 2362 } 2363 2364 /* 2365 * helper function for btrfs_search_slot. This does all of the checks 2366 * for node-level blocks and does any balancing required based on 2367 * the ins_len. 2368 * 2369 * If no extra work was required, zero is returned. If we had to 2370 * drop the path, -EAGAIN is returned and btrfs_search_slot must 2371 * start over 2372 */ 2373 static int 2374 setup_nodes_for_search(struct btrfs_trans_handle *trans, 2375 struct btrfs_root *root, struct btrfs_path *p, 2376 struct extent_buffer *b, int level, int ins_len, 2377 int *write_lock_level) 2378 { 2379 int ret; 2380 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 2381 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { 2382 int sret; 2383 2384 if (*write_lock_level < level + 1) { 2385 *write_lock_level = level + 1; 2386 btrfs_release_path(p); 2387 goto again; 2388 } 2389 2390 sret = reada_for_balance(root, p, level); 2391 if (sret) 2392 goto again; 2393 2394 btrfs_set_path_blocking(p); 2395 sret = split_node(trans, root, p, level); 2396 btrfs_clear_path_blocking(p, NULL, 0); 2397 2398 BUG_ON(sret > 0); 2399 if (sret) { 2400 ret = sret; 2401 goto done; 2402 } 2403 b = p->nodes[level]; 2404 } else if (ins_len < 0 && btrfs_header_nritems(b) < 2405 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { 2406 int sret; 2407 2408 if (*write_lock_level < level + 1) { 2409 *write_lock_level = level + 1; 2410 btrfs_release_path(p); 2411 goto again; 2412 } 2413 2414 sret = reada_for_balance(root, p, level); 2415 if (sret) 2416 goto again; 2417 2418 btrfs_set_path_blocking(p); 2419 sret = balance_level(trans, root, p, level); 2420 btrfs_clear_path_blocking(p, NULL, 0); 2421 2422 if (sret) { 2423 ret = sret; 2424 goto done; 2425 } 2426 b = p->nodes[level]; 2427 if (!b) { 2428 btrfs_release_path(p); 2429 goto again; 2430 } 2431 BUG_ON(btrfs_header_nritems(b) == 1); 2432 } 2433 return 0; 2434 2435 again: 2436 ret = -EAGAIN; 2437 done: 2438 return ret; 2439 } 2440 2441 /* 2442 * look for key in the tree. path is filled in with nodes along the way 2443 * if key is found, we return zero and you can find the item in the leaf 2444 * level of the path (level 0) 2445 * 2446 * If the key isn't found, the path points to the slot where it should 2447 * be inserted, and 1 is returned. If there are other errors during the 2448 * search a negative error number is returned. 2449 * 2450 * if ins_len > 0, nodes and leaves will be split as we walk down the 2451 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if 2452 * possible) 2453 */ 2454 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 2455 *root, struct btrfs_key *key, struct btrfs_path *p, int 2456 ins_len, int cow) 2457 { 2458 struct extent_buffer *b; 2459 int slot; 2460 int ret; 2461 int err; 2462 int level; 2463 int lowest_unlock = 1; 2464 int root_lock; 2465 /* everything at write_lock_level or lower must be write locked */ 2466 int write_lock_level = 0; 2467 u8 lowest_level = 0; 2468 int min_write_lock_level; 2469 2470 lowest_level = p->lowest_level; 2471 WARN_ON(lowest_level && ins_len > 0); 2472 WARN_ON(p->nodes[0] != NULL); 2473 2474 if (ins_len < 0) { 2475 lowest_unlock = 2; 2476 2477 /* when we are removing items, we might have to go up to level 2478 * two as we update tree pointers Make sure we keep write 2479 * for those levels as well 2480 */ 2481 write_lock_level = 2; 2482 } else if (ins_len > 0) { 2483 /* 2484 * for inserting items, make sure we have a write lock on 2485 * level 1 so we can update keys 2486 */ 2487 write_lock_level = 1; 2488 } 2489 2490 if (!cow) 2491 write_lock_level = -1; 2492 2493 if (cow && (p->really_keep_locks || p->keep_locks || p->lowest_level)) 2494 write_lock_level = BTRFS_MAX_LEVEL; 2495 2496 min_write_lock_level = write_lock_level; 2497 2498 again: 2499 /* 2500 * we try very hard to do read locks on the root 2501 */ 2502 root_lock = BTRFS_READ_LOCK; 2503 level = 0; 2504 if (p->search_commit_root) { 2505 /* 2506 * the commit roots are read only 2507 * so we always do read locks 2508 */ 2509 b = root->commit_root; 2510 extent_buffer_get(b); 2511 level = btrfs_header_level(b); 2512 if (!p->skip_locking) 2513 btrfs_tree_read_lock(b); 2514 } else { 2515 if (p->skip_locking) { 2516 b = btrfs_root_node(root); 2517 level = btrfs_header_level(b); 2518 } else { 2519 /* we don't know the level of the root node 2520 * until we actually have it read locked 2521 */ 2522 b = btrfs_read_lock_root_node(root); 2523 level = btrfs_header_level(b); 2524 if (level <= write_lock_level) { 2525 /* whoops, must trade for write lock */ 2526 btrfs_tree_read_unlock(b); 2527 free_extent_buffer(b); 2528 b = btrfs_lock_root_node(root); 2529 root_lock = BTRFS_WRITE_LOCK; 2530 2531 /* the level might have changed, check again */ 2532 level = btrfs_header_level(b); 2533 } 2534 } 2535 } 2536 p->nodes[level] = b; 2537 if (!p->skip_locking) 2538 p->locks[level] = root_lock; 2539 2540 while (b) { 2541 level = btrfs_header_level(b); 2542 2543 /* 2544 * setup the path here so we can release it under lock 2545 * contention with the cow code 2546 */ 2547 if (cow) { 2548 /* 2549 * if we don't really need to cow this block 2550 * then we don't want to set the path blocking, 2551 * so we test it here 2552 */ 2553 if (!should_cow_block(trans, root, b)) 2554 goto cow_done; 2555 2556 btrfs_set_path_blocking(p); 2557 2558 /* 2559 * must have write locks on this node and the 2560 * parent 2561 */ 2562 if (level > write_lock_level || 2563 (level + 1 > write_lock_level && 2564 level + 1 < BTRFS_MAX_LEVEL && 2565 p->nodes[level + 1])) { 2566 write_lock_level = level + 1; 2567 btrfs_release_path(p); 2568 goto again; 2569 } 2570 2571 err = btrfs_cow_block(trans, root, b, 2572 p->nodes[level + 1], 2573 p->slots[level + 1], &b); 2574 if (err) { 2575 ret = err; 2576 goto done; 2577 } 2578 } 2579 cow_done: 2580 BUG_ON(!cow && ins_len); 2581 2582 p->nodes[level] = b; 2583 btrfs_clear_path_blocking(p, NULL, 0); 2584 2585 /* 2586 * we have a lock on b and as long as we aren't changing 2587 * the tree, there is no way to for the items in b to change. 2588 * It is safe to drop the lock on our parent before we 2589 * go through the expensive btree search on b. 2590 * 2591 * If cow is true, then we might be changing slot zero, 2592 * which may require changing the parent. So, we can't 2593 * drop the lock until after we know which slot we're 2594 * operating on. 2595 */ 2596 if (!cow) 2597 btrfs_unlock_up_safe(p, level + 1); 2598 2599 ret = bin_search(b, key, level, &slot); 2600 2601 if (level != 0) { 2602 int dec = 0; 2603 if (ret && slot > 0) { 2604 dec = 1; 2605 slot -= 1; 2606 } 2607 p->slots[level] = slot; 2608 err = setup_nodes_for_search(trans, root, p, b, level, 2609 ins_len, &write_lock_level); 2610 if (err == -EAGAIN) 2611 goto again; 2612 if (err) { 2613 ret = err; 2614 goto done; 2615 } 2616 b = p->nodes[level]; 2617 slot = p->slots[level]; 2618 2619 /* 2620 * slot 0 is special, if we change the key 2621 * we have to update the parent pointer 2622 * which means we must have a write lock 2623 * on the parent 2624 */ 2625 if (slot == 0 && cow && 2626 write_lock_level < level + 1) { 2627 write_lock_level = level + 1; 2628 btrfs_release_path(p); 2629 goto again; 2630 } 2631 2632 unlock_up(p, level, lowest_unlock, 2633 min_write_lock_level, &write_lock_level); 2634 2635 if (level == lowest_level) { 2636 if (dec) 2637 p->slots[level]++; 2638 goto done; 2639 } 2640 2641 err = read_block_for_search(trans, root, p, 2642 &b, level, slot, key, 0); 2643 if (err == -EAGAIN) 2644 goto again; 2645 if (err) { 2646 ret = err; 2647 goto done; 2648 } 2649 2650 if (!p->skip_locking) { 2651 level = btrfs_header_level(b); 2652 if (level <= write_lock_level) { 2653 err = btrfs_try_tree_write_lock(b); 2654 if (!err) { 2655 btrfs_set_path_blocking(p); 2656 btrfs_tree_lock(b); 2657 btrfs_clear_path_blocking(p, b, 2658 BTRFS_WRITE_LOCK); 2659 } 2660 p->locks[level] = BTRFS_WRITE_LOCK; 2661 } else { 2662 err = btrfs_try_tree_read_lock(b); 2663 if (!err) { 2664 btrfs_set_path_blocking(p); 2665 btrfs_tree_read_lock(b); 2666 btrfs_clear_path_blocking(p, b, 2667 BTRFS_READ_LOCK); 2668 } 2669 p->locks[level] = BTRFS_READ_LOCK; 2670 } 2671 p->nodes[level] = b; 2672 } 2673 } else { 2674 p->slots[level] = slot; 2675 if (ins_len > 0 && 2676 btrfs_leaf_free_space(root, b) < ins_len) { 2677 if (write_lock_level < 1) { 2678 write_lock_level = 1; 2679 btrfs_release_path(p); 2680 goto again; 2681 } 2682 2683 btrfs_set_path_blocking(p); 2684 err = split_leaf(trans, root, key, 2685 p, ins_len, ret == 0); 2686 btrfs_clear_path_blocking(p, NULL, 0); 2687 2688 BUG_ON(err > 0); 2689 if (err) { 2690 ret = err; 2691 goto done; 2692 } 2693 } 2694 if (!p->search_for_split) 2695 unlock_up(p, level, lowest_unlock, 2696 min_write_lock_level, &write_lock_level); 2697 goto done; 2698 } 2699 } 2700 ret = 1; 2701 done: 2702 /* 2703 * we don't really know what they plan on doing with the path 2704 * from here on, so for now just mark it as blocking 2705 */ 2706 if (!p->leave_spinning) 2707 btrfs_set_path_blocking(p); 2708 if (ret < 0) 2709 btrfs_release_path(p); 2710 return ret; 2711 } 2712 2713 /* 2714 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2715 * current state of the tree together with the operations recorded in the tree 2716 * modification log to search for the key in a previous version of this tree, as 2717 * denoted by the time_seq parameter. 2718 * 2719 * Naturally, there is no support for insert, delete or cow operations. 2720 * 2721 * The resulting path and return value will be set up as if we called 2722 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2723 */ 2724 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 2725 struct btrfs_path *p, u64 time_seq) 2726 { 2727 struct extent_buffer *b; 2728 int slot; 2729 int ret; 2730 int err; 2731 int level; 2732 int lowest_unlock = 1; 2733 u8 lowest_level = 0; 2734 2735 lowest_level = p->lowest_level; 2736 WARN_ON(p->nodes[0] != NULL); 2737 2738 if (p->search_commit_root) { 2739 BUG_ON(time_seq); 2740 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2741 } 2742 2743 again: 2744 b = get_old_root(root, time_seq); 2745 level = btrfs_header_level(b); 2746 p->locks[level] = BTRFS_READ_LOCK; 2747 2748 while (b) { 2749 level = btrfs_header_level(b); 2750 p->nodes[level] = b; 2751 btrfs_clear_path_blocking(p, NULL, 0); 2752 2753 /* 2754 * we have a lock on b and as long as we aren't changing 2755 * the tree, there is no way to for the items in b to change. 2756 * It is safe to drop the lock on our parent before we 2757 * go through the expensive btree search on b. 2758 */ 2759 btrfs_unlock_up_safe(p, level + 1); 2760 2761 ret = bin_search(b, key, level, &slot); 2762 2763 if (level != 0) { 2764 int dec = 0; 2765 if (ret && slot > 0) { 2766 dec = 1; 2767 slot -= 1; 2768 } 2769 p->slots[level] = slot; 2770 unlock_up(p, level, lowest_unlock, 0, NULL); 2771 2772 if (level == lowest_level) { 2773 if (dec) 2774 p->slots[level]++; 2775 goto done; 2776 } 2777 2778 err = read_block_for_search(NULL, root, p, &b, level, 2779 slot, key, time_seq); 2780 if (err == -EAGAIN) 2781 goto again; 2782 if (err) { 2783 ret = err; 2784 goto done; 2785 } 2786 2787 level = btrfs_header_level(b); 2788 err = btrfs_try_tree_read_lock(b); 2789 if (!err) { 2790 btrfs_set_path_blocking(p); 2791 btrfs_tree_read_lock(b); 2792 btrfs_clear_path_blocking(p, b, 2793 BTRFS_READ_LOCK); 2794 } 2795 p->locks[level] = BTRFS_READ_LOCK; 2796 p->nodes[level] = b; 2797 b = tree_mod_log_rewind(root->fs_info, b, time_seq); 2798 if (b != p->nodes[level]) { 2799 btrfs_tree_unlock_rw(p->nodes[level], 2800 p->locks[level]); 2801 p->locks[level] = 0; 2802 p->nodes[level] = b; 2803 } 2804 } else { 2805 p->slots[level] = slot; 2806 unlock_up(p, level, lowest_unlock, 0, NULL); 2807 goto done; 2808 } 2809 } 2810 ret = 1; 2811 done: 2812 if (!p->leave_spinning) 2813 btrfs_set_path_blocking(p); 2814 if (ret < 0) 2815 btrfs_release_path(p); 2816 2817 return ret; 2818 } 2819 2820 /* 2821 * helper to use instead of search slot if no exact match is needed but 2822 * instead the next or previous item should be returned. 2823 * When find_higher is true, the next higher item is returned, the next lower 2824 * otherwise. 2825 * When return_any and find_higher are both true, and no higher item is found, 2826 * return the next lower instead. 2827 * When return_any is true and find_higher is false, and no lower item is found, 2828 * return the next higher instead. 2829 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2830 * < 0 on error 2831 */ 2832 int btrfs_search_slot_for_read(struct btrfs_root *root, 2833 struct btrfs_key *key, struct btrfs_path *p, 2834 int find_higher, int return_any) 2835 { 2836 int ret; 2837 struct extent_buffer *leaf; 2838 2839 again: 2840 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2841 if (ret <= 0) 2842 return ret; 2843 /* 2844 * a return value of 1 means the path is at the position where the 2845 * item should be inserted. Normally this is the next bigger item, 2846 * but in case the previous item is the last in a leaf, path points 2847 * to the first free slot in the previous leaf, i.e. at an invalid 2848 * item. 2849 */ 2850 leaf = p->nodes[0]; 2851 2852 if (find_higher) { 2853 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2854 ret = btrfs_next_leaf(root, p); 2855 if (ret <= 0) 2856 return ret; 2857 if (!return_any) 2858 return 1; 2859 /* 2860 * no higher item found, return the next 2861 * lower instead 2862 */ 2863 return_any = 0; 2864 find_higher = 0; 2865 btrfs_release_path(p); 2866 goto again; 2867 } 2868 } else { 2869 if (p->slots[0] == 0) { 2870 ret = btrfs_prev_leaf(root, p); 2871 if (ret < 0) 2872 return ret; 2873 if (!ret) { 2874 p->slots[0] = btrfs_header_nritems(leaf) - 1; 2875 return 0; 2876 } 2877 if (!return_any) 2878 return 1; 2879 /* 2880 * no lower item found, return the next 2881 * higher instead 2882 */ 2883 return_any = 0; 2884 find_higher = 1; 2885 btrfs_release_path(p); 2886 goto again; 2887 } else { 2888 --p->slots[0]; 2889 } 2890 } 2891 return 0; 2892 } 2893 2894 /* 2895 * adjust the pointers going up the tree, starting at level 2896 * making sure the right key of each node is points to 'key'. 2897 * This is used after shifting pointers to the left, so it stops 2898 * fixing up pointers when a given leaf/node is not in slot 0 of the 2899 * higher levels 2900 * 2901 */ 2902 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2903 struct btrfs_root *root, struct btrfs_path *path, 2904 struct btrfs_disk_key *key, int level) 2905 { 2906 int i; 2907 struct extent_buffer *t; 2908 2909 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2910 int tslot = path->slots[i]; 2911 if (!path->nodes[i]) 2912 break; 2913 t = path->nodes[i]; 2914 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1); 2915 btrfs_set_node_key(t, key, tslot); 2916 btrfs_mark_buffer_dirty(path->nodes[i]); 2917 if (tslot != 0) 2918 break; 2919 } 2920 } 2921 2922 /* 2923 * update item key. 2924 * 2925 * This function isn't completely safe. It's the caller's responsibility 2926 * that the new key won't break the order 2927 */ 2928 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2929 struct btrfs_root *root, struct btrfs_path *path, 2930 struct btrfs_key *new_key) 2931 { 2932 struct btrfs_disk_key disk_key; 2933 struct extent_buffer *eb; 2934 int slot; 2935 2936 eb = path->nodes[0]; 2937 slot = path->slots[0]; 2938 if (slot > 0) { 2939 btrfs_item_key(eb, &disk_key, slot - 1); 2940 BUG_ON(comp_keys(&disk_key, new_key) >= 0); 2941 } 2942 if (slot < btrfs_header_nritems(eb) - 1) { 2943 btrfs_item_key(eb, &disk_key, slot + 1); 2944 BUG_ON(comp_keys(&disk_key, new_key) <= 0); 2945 } 2946 2947 btrfs_cpu_key_to_disk(&disk_key, new_key); 2948 btrfs_set_item_key(eb, &disk_key, slot); 2949 btrfs_mark_buffer_dirty(eb); 2950 if (slot == 0) 2951 fixup_low_keys(trans, root, path, &disk_key, 1); 2952 } 2953 2954 /* 2955 * try to push data from one node into the next node left in the 2956 * tree. 2957 * 2958 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2959 * error, and > 0 if there was no room in the left hand block. 2960 */ 2961 static int push_node_left(struct btrfs_trans_handle *trans, 2962 struct btrfs_root *root, struct extent_buffer *dst, 2963 struct extent_buffer *src, int empty) 2964 { 2965 int push_items = 0; 2966 int src_nritems; 2967 int dst_nritems; 2968 int ret = 0; 2969 2970 src_nritems = btrfs_header_nritems(src); 2971 dst_nritems = btrfs_header_nritems(dst); 2972 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 2973 WARN_ON(btrfs_header_generation(src) != trans->transid); 2974 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2975 2976 if (!empty && src_nritems <= 8) 2977 return 1; 2978 2979 if (push_items <= 0) 2980 return 1; 2981 2982 if (empty) { 2983 push_items = min(src_nritems, push_items); 2984 if (push_items < src_nritems) { 2985 /* leave at least 8 pointers in the node if 2986 * we aren't going to empty it 2987 */ 2988 if (src_nritems - push_items < 8) { 2989 if (push_items <= 8) 2990 return 1; 2991 push_items -= 8; 2992 } 2993 } 2994 } else 2995 push_items = min(src_nritems - 8, push_items); 2996 2997 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, 2998 push_items); 2999 copy_extent_buffer(dst, src, 3000 btrfs_node_key_ptr_offset(dst_nritems), 3001 btrfs_node_key_ptr_offset(0), 3002 push_items * sizeof(struct btrfs_key_ptr)); 3003 3004 if (push_items < src_nritems) { 3005 /* 3006 * don't call tree_mod_log_eb_move here, key removal was already 3007 * fully logged by tree_mod_log_eb_copy above. 3008 */ 3009 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 3010 btrfs_node_key_ptr_offset(push_items), 3011 (src_nritems - push_items) * 3012 sizeof(struct btrfs_key_ptr)); 3013 } 3014 btrfs_set_header_nritems(src, src_nritems - push_items); 3015 btrfs_set_header_nritems(dst, dst_nritems + push_items); 3016 btrfs_mark_buffer_dirty(src); 3017 btrfs_mark_buffer_dirty(dst); 3018 3019 return ret; 3020 } 3021 3022 /* 3023 * try to push data from one node into the next node right in the 3024 * tree. 3025 * 3026 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 3027 * error, and > 0 if there was no room in the right hand block. 3028 * 3029 * this will only push up to 1/2 the contents of the left node over 3030 */ 3031 static int balance_node_right(struct btrfs_trans_handle *trans, 3032 struct btrfs_root *root, 3033 struct extent_buffer *dst, 3034 struct extent_buffer *src) 3035 { 3036 int push_items = 0; 3037 int max_push; 3038 int src_nritems; 3039 int dst_nritems; 3040 int ret = 0; 3041 3042 WARN_ON(btrfs_header_generation(src) != trans->transid); 3043 WARN_ON(btrfs_header_generation(dst) != trans->transid); 3044 3045 src_nritems = btrfs_header_nritems(src); 3046 dst_nritems = btrfs_header_nritems(dst); 3047 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 3048 if (push_items <= 0) 3049 return 1; 3050 3051 if (src_nritems < 4) 3052 return 1; 3053 3054 max_push = src_nritems / 2 + 1; 3055 /* don't try to empty the node */ 3056 if (max_push >= src_nritems) 3057 return 1; 3058 3059 if (max_push < push_items) 3060 push_items = max_push; 3061 3062 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); 3063 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 3064 btrfs_node_key_ptr_offset(0), 3065 (dst_nritems) * 3066 sizeof(struct btrfs_key_ptr)); 3067 3068 tree_mod_log_eb_copy(root->fs_info, dst, src, 0, 3069 src_nritems - push_items, push_items); 3070 copy_extent_buffer(dst, src, 3071 btrfs_node_key_ptr_offset(0), 3072 btrfs_node_key_ptr_offset(src_nritems - push_items), 3073 push_items * sizeof(struct btrfs_key_ptr)); 3074 3075 btrfs_set_header_nritems(src, src_nritems - push_items); 3076 btrfs_set_header_nritems(dst, dst_nritems + push_items); 3077 3078 btrfs_mark_buffer_dirty(src); 3079 btrfs_mark_buffer_dirty(dst); 3080 3081 return ret; 3082 } 3083 3084 /* 3085 * helper function to insert a new root level in the tree. 3086 * A new node is allocated, and a single item is inserted to 3087 * point to the existing root 3088 * 3089 * returns zero on success or < 0 on failure. 3090 */ 3091 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 3092 struct btrfs_root *root, 3093 struct btrfs_path *path, int level) 3094 { 3095 u64 lower_gen; 3096 struct extent_buffer *lower; 3097 struct extent_buffer *c; 3098 struct extent_buffer *old; 3099 struct btrfs_disk_key lower_key; 3100 3101 BUG_ON(path->nodes[level]); 3102 BUG_ON(path->nodes[level-1] != root->node); 3103 3104 lower = path->nodes[level-1]; 3105 if (level == 1) 3106 btrfs_item_key(lower, &lower_key, 0); 3107 else 3108 btrfs_node_key(lower, &lower_key, 0); 3109 3110 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, 3111 root->root_key.objectid, &lower_key, 3112 level, root->node->start, 0); 3113 if (IS_ERR(c)) 3114 return PTR_ERR(c); 3115 3116 root_add_used(root, root->nodesize); 3117 3118 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); 3119 btrfs_set_header_nritems(c, 1); 3120 btrfs_set_header_level(c, level); 3121 btrfs_set_header_bytenr(c, c->start); 3122 btrfs_set_header_generation(c, trans->transid); 3123 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); 3124 btrfs_set_header_owner(c, root->root_key.objectid); 3125 3126 write_extent_buffer(c, root->fs_info->fsid, 3127 (unsigned long)btrfs_header_fsid(c), 3128 BTRFS_FSID_SIZE); 3129 3130 write_extent_buffer(c, root->fs_info->chunk_tree_uuid, 3131 (unsigned long)btrfs_header_chunk_tree_uuid(c), 3132 BTRFS_UUID_SIZE); 3133 3134 btrfs_set_node_key(c, &lower_key, 0); 3135 btrfs_set_node_blockptr(c, 0, lower->start); 3136 lower_gen = btrfs_header_generation(lower); 3137 WARN_ON(lower_gen != trans->transid); 3138 3139 btrfs_set_node_ptr_generation(c, 0, lower_gen); 3140 3141 btrfs_mark_buffer_dirty(c); 3142 3143 old = root->node; 3144 tree_mod_log_set_root_pointer(root, c); 3145 rcu_assign_pointer(root->node, c); 3146 3147 /* the super has an extra ref to root->node */ 3148 free_extent_buffer(old); 3149 3150 add_root_to_dirty_list(root); 3151 extent_buffer_get(c); 3152 path->nodes[level] = c; 3153 path->locks[level] = BTRFS_WRITE_LOCK; 3154 path->slots[level] = 0; 3155 return 0; 3156 } 3157 3158 /* 3159 * worker function to insert a single pointer in a node. 3160 * the node should have enough room for the pointer already 3161 * 3162 * slot and level indicate where you want the key to go, and 3163 * blocknr is the block the key points to. 3164 */ 3165 static void insert_ptr(struct btrfs_trans_handle *trans, 3166 struct btrfs_root *root, struct btrfs_path *path, 3167 struct btrfs_disk_key *key, u64 bytenr, 3168 int slot, int level) 3169 { 3170 struct extent_buffer *lower; 3171 int nritems; 3172 int ret; 3173 3174 BUG_ON(!path->nodes[level]); 3175 btrfs_assert_tree_locked(path->nodes[level]); 3176 lower = path->nodes[level]; 3177 nritems = btrfs_header_nritems(lower); 3178 BUG_ON(slot > nritems); 3179 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); 3180 if (slot != nritems) { 3181 if (level) 3182 tree_mod_log_eb_move(root->fs_info, lower, slot + 1, 3183 slot, nritems - slot); 3184 memmove_extent_buffer(lower, 3185 btrfs_node_key_ptr_offset(slot + 1), 3186 btrfs_node_key_ptr_offset(slot), 3187 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3188 } 3189 if (level) { 3190 ret = tree_mod_log_insert_key(root->fs_info, lower, slot, 3191 MOD_LOG_KEY_ADD); 3192 BUG_ON(ret < 0); 3193 } 3194 btrfs_set_node_key(lower, key, slot); 3195 btrfs_set_node_blockptr(lower, slot, bytenr); 3196 WARN_ON(trans->transid == 0); 3197 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3198 btrfs_set_header_nritems(lower, nritems + 1); 3199 btrfs_mark_buffer_dirty(lower); 3200 } 3201 3202 /* 3203 * split the node at the specified level in path in two. 3204 * The path is corrected to point to the appropriate node after the split 3205 * 3206 * Before splitting this tries to make some room in the node by pushing 3207 * left and right, if either one works, it returns right away. 3208 * 3209 * returns 0 on success and < 0 on failure 3210 */ 3211 static noinline int split_node(struct btrfs_trans_handle *trans, 3212 struct btrfs_root *root, 3213 struct btrfs_path *path, int level) 3214 { 3215 struct extent_buffer *c; 3216 struct extent_buffer *split; 3217 struct btrfs_disk_key disk_key; 3218 int mid; 3219 int ret; 3220 u32 c_nritems; 3221 3222 c = path->nodes[level]; 3223 WARN_ON(btrfs_header_generation(c) != trans->transid); 3224 if (c == root->node) { 3225 /* trying to split the root, lets make a new one */ 3226 ret = insert_new_root(trans, root, path, level + 1); 3227 if (ret) 3228 return ret; 3229 } else { 3230 ret = push_nodes_for_insert(trans, root, path, level); 3231 c = path->nodes[level]; 3232 if (!ret && btrfs_header_nritems(c) < 3233 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) 3234 return 0; 3235 if (ret < 0) 3236 return ret; 3237 } 3238 3239 c_nritems = btrfs_header_nritems(c); 3240 mid = (c_nritems + 1) / 2; 3241 btrfs_node_key(c, &disk_key, mid); 3242 3243 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, 3244 root->root_key.objectid, 3245 &disk_key, level, c->start, 0); 3246 if (IS_ERR(split)) 3247 return PTR_ERR(split); 3248 3249 root_add_used(root, root->nodesize); 3250 3251 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); 3252 btrfs_set_header_level(split, btrfs_header_level(c)); 3253 btrfs_set_header_bytenr(split, split->start); 3254 btrfs_set_header_generation(split, trans->transid); 3255 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); 3256 btrfs_set_header_owner(split, root->root_key.objectid); 3257 write_extent_buffer(split, root->fs_info->fsid, 3258 (unsigned long)btrfs_header_fsid(split), 3259 BTRFS_FSID_SIZE); 3260 write_extent_buffer(split, root->fs_info->chunk_tree_uuid, 3261 (unsigned long)btrfs_header_chunk_tree_uuid(split), 3262 BTRFS_UUID_SIZE); 3263 3264 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); 3265 copy_extent_buffer(split, c, 3266 btrfs_node_key_ptr_offset(0), 3267 btrfs_node_key_ptr_offset(mid), 3268 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3269 btrfs_set_header_nritems(split, c_nritems - mid); 3270 btrfs_set_header_nritems(c, mid); 3271 ret = 0; 3272 3273 btrfs_mark_buffer_dirty(c); 3274 btrfs_mark_buffer_dirty(split); 3275 3276 insert_ptr(trans, root, path, &disk_key, split->start, 3277 path->slots[level + 1] + 1, level + 1); 3278 3279 if (path->slots[level] >= mid) { 3280 path->slots[level] -= mid; 3281 btrfs_tree_unlock(c); 3282 free_extent_buffer(c); 3283 path->nodes[level] = split; 3284 path->slots[level + 1] += 1; 3285 } else { 3286 btrfs_tree_unlock(split); 3287 free_extent_buffer(split); 3288 } 3289 return ret; 3290 } 3291 3292 /* 3293 * how many bytes are required to store the items in a leaf. start 3294 * and nr indicate which items in the leaf to check. This totals up the 3295 * space used both by the item structs and the item data 3296 */ 3297 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 3298 { 3299 struct btrfs_item *start_item; 3300 struct btrfs_item *end_item; 3301 struct btrfs_map_token token; 3302 int data_len; 3303 int nritems = btrfs_header_nritems(l); 3304 int end = min(nritems, start + nr) - 1; 3305 3306 if (!nr) 3307 return 0; 3308 btrfs_init_map_token(&token); 3309 start_item = btrfs_item_nr(l, start); 3310 end_item = btrfs_item_nr(l, end); 3311 data_len = btrfs_token_item_offset(l, start_item, &token) + 3312 btrfs_token_item_size(l, start_item, &token); 3313 data_len = data_len - btrfs_token_item_offset(l, end_item, &token); 3314 data_len += sizeof(struct btrfs_item) * nr; 3315 WARN_ON(data_len < 0); 3316 return data_len; 3317 } 3318 3319 /* 3320 * The space between the end of the leaf items and 3321 * the start of the leaf data. IOW, how much room 3322 * the leaf has left for both items and data 3323 */ 3324 noinline int btrfs_leaf_free_space(struct btrfs_root *root, 3325 struct extent_buffer *leaf) 3326 { 3327 int nritems = btrfs_header_nritems(leaf); 3328 int ret; 3329 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); 3330 if (ret < 0) { 3331 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, " 3332 "used %d nritems %d\n", 3333 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), 3334 leaf_space_used(leaf, 0, nritems), nritems); 3335 } 3336 return ret; 3337 } 3338 3339 /* 3340 * min slot controls the lowest index we're willing to push to the 3341 * right. We'll push up to and including min_slot, but no lower 3342 */ 3343 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3344 struct btrfs_root *root, 3345 struct btrfs_path *path, 3346 int data_size, int empty, 3347 struct extent_buffer *right, 3348 int free_space, u32 left_nritems, 3349 u32 min_slot) 3350 { 3351 struct extent_buffer *left = path->nodes[0]; 3352 struct extent_buffer *upper = path->nodes[1]; 3353 struct btrfs_map_token token; 3354 struct btrfs_disk_key disk_key; 3355 int slot; 3356 u32 i; 3357 int push_space = 0; 3358 int push_items = 0; 3359 struct btrfs_item *item; 3360 u32 nr; 3361 u32 right_nritems; 3362 u32 data_end; 3363 u32 this_item_size; 3364 3365 btrfs_init_map_token(&token); 3366 3367 if (empty) 3368 nr = 0; 3369 else 3370 nr = max_t(u32, 1, min_slot); 3371 3372 if (path->slots[0] >= left_nritems) 3373 push_space += data_size; 3374 3375 slot = path->slots[1]; 3376 i = left_nritems - 1; 3377 while (i >= nr) { 3378 item = btrfs_item_nr(left, i); 3379 3380 if (!empty && push_items > 0) { 3381 if (path->slots[0] > i) 3382 break; 3383 if (path->slots[0] == i) { 3384 int space = btrfs_leaf_free_space(root, left); 3385 if (space + push_space * 2 > free_space) 3386 break; 3387 } 3388 } 3389 3390 if (path->slots[0] == i) 3391 push_space += data_size; 3392 3393 this_item_size = btrfs_item_size(left, item); 3394 if (this_item_size + sizeof(*item) + push_space > free_space) 3395 break; 3396 3397 push_items++; 3398 push_space += this_item_size + sizeof(*item); 3399 if (i == 0) 3400 break; 3401 i--; 3402 } 3403 3404 if (push_items == 0) 3405 goto out_unlock; 3406 3407 WARN_ON(!empty && push_items == left_nritems); 3408 3409 /* push left to right */ 3410 right_nritems = btrfs_header_nritems(right); 3411 3412 push_space = btrfs_item_end_nr(left, left_nritems - push_items); 3413 push_space -= leaf_data_end(root, left); 3414 3415 /* make room in the right data area */ 3416 data_end = leaf_data_end(root, right); 3417 memmove_extent_buffer(right, 3418 btrfs_leaf_data(right) + data_end - push_space, 3419 btrfs_leaf_data(right) + data_end, 3420 BTRFS_LEAF_DATA_SIZE(root) - data_end); 3421 3422 /* copy from the left data area */ 3423 copy_extent_buffer(right, left, btrfs_leaf_data(right) + 3424 BTRFS_LEAF_DATA_SIZE(root) - push_space, 3425 btrfs_leaf_data(left) + leaf_data_end(root, left), 3426 push_space); 3427 3428 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), 3429 btrfs_item_nr_offset(0), 3430 right_nritems * sizeof(struct btrfs_item)); 3431 3432 /* copy the items from left to right */ 3433 copy_extent_buffer(right, left, btrfs_item_nr_offset(0), 3434 btrfs_item_nr_offset(left_nritems - push_items), 3435 push_items * sizeof(struct btrfs_item)); 3436 3437 /* update the item pointers */ 3438 right_nritems += push_items; 3439 btrfs_set_header_nritems(right, right_nritems); 3440 push_space = BTRFS_LEAF_DATA_SIZE(root); 3441 for (i = 0; i < right_nritems; i++) { 3442 item = btrfs_item_nr(right, i); 3443 push_space -= btrfs_token_item_size(right, item, &token); 3444 btrfs_set_token_item_offset(right, item, push_space, &token); 3445 } 3446 3447 left_nritems -= push_items; 3448 btrfs_set_header_nritems(left, left_nritems); 3449 3450 if (left_nritems) 3451 btrfs_mark_buffer_dirty(left); 3452 else 3453 clean_tree_block(trans, root, left); 3454 3455 btrfs_mark_buffer_dirty(right); 3456 3457 btrfs_item_key(right, &disk_key, 0); 3458 btrfs_set_node_key(upper, &disk_key, slot + 1); 3459 btrfs_mark_buffer_dirty(upper); 3460 3461 /* then fixup the leaf pointer in the path */ 3462 if (path->slots[0] >= left_nritems) { 3463 path->slots[0] -= left_nritems; 3464 if (btrfs_header_nritems(path->nodes[0]) == 0) 3465 clean_tree_block(trans, root, path->nodes[0]); 3466 btrfs_tree_unlock(path->nodes[0]); 3467 free_extent_buffer(path->nodes[0]); 3468 path->nodes[0] = right; 3469 path->slots[1] += 1; 3470 } else { 3471 btrfs_tree_unlock(right); 3472 free_extent_buffer(right); 3473 } 3474 return 0; 3475 3476 out_unlock: 3477 btrfs_tree_unlock(right); 3478 free_extent_buffer(right); 3479 return 1; 3480 } 3481 3482 /* 3483 * push some data in the path leaf to the right, trying to free up at 3484 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3485 * 3486 * returns 1 if the push failed because the other node didn't have enough 3487 * room, 0 if everything worked out and < 0 if there were major errors. 3488 * 3489 * this will push starting from min_slot to the end of the leaf. It won't 3490 * push any slot lower than min_slot 3491 */ 3492 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3493 *root, struct btrfs_path *path, 3494 int min_data_size, int data_size, 3495 int empty, u32 min_slot) 3496 { 3497 struct extent_buffer *left = path->nodes[0]; 3498 struct extent_buffer *right; 3499 struct extent_buffer *upper; 3500 int slot; 3501 int free_space; 3502 u32 left_nritems; 3503 int ret; 3504 3505 if (!path->nodes[1]) 3506 return 1; 3507 3508 slot = path->slots[1]; 3509 upper = path->nodes[1]; 3510 if (slot >= btrfs_header_nritems(upper) - 1) 3511 return 1; 3512 3513 btrfs_assert_tree_locked(path->nodes[1]); 3514 3515 right = read_node_slot(root, upper, slot + 1); 3516 if (right == NULL) 3517 return 1; 3518 3519 btrfs_tree_lock(right); 3520 btrfs_set_lock_blocking(right); 3521 3522 free_space = btrfs_leaf_free_space(root, right); 3523 if (free_space < data_size) 3524 goto out_unlock; 3525 3526 /* cow and double check */ 3527 ret = btrfs_cow_block(trans, root, right, upper, 3528 slot + 1, &right); 3529 if (ret) 3530 goto out_unlock; 3531 3532 free_space = btrfs_leaf_free_space(root, right); 3533 if (free_space < data_size) 3534 goto out_unlock; 3535 3536 left_nritems = btrfs_header_nritems(left); 3537 if (left_nritems == 0) 3538 goto out_unlock; 3539 3540 return __push_leaf_right(trans, root, path, min_data_size, empty, 3541 right, free_space, left_nritems, min_slot); 3542 out_unlock: 3543 btrfs_tree_unlock(right); 3544 free_extent_buffer(right); 3545 return 1; 3546 } 3547 3548 /* 3549 * push some data in the path leaf to the left, trying to free up at 3550 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3551 * 3552 * max_slot can put a limit on how far into the leaf we'll push items. The 3553 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3554 * items 3555 */ 3556 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3557 struct btrfs_root *root, 3558 struct btrfs_path *path, int data_size, 3559 int empty, struct extent_buffer *left, 3560 int free_space, u32 right_nritems, 3561 u32 max_slot) 3562 { 3563 struct btrfs_disk_key disk_key; 3564 struct extent_buffer *right = path->nodes[0]; 3565 int i; 3566 int push_space = 0; 3567 int push_items = 0; 3568 struct btrfs_item *item; 3569 u32 old_left_nritems; 3570 u32 nr; 3571 int ret = 0; 3572 u32 this_item_size; 3573 u32 old_left_item_size; 3574 struct btrfs_map_token token; 3575 3576 btrfs_init_map_token(&token); 3577 3578 if (empty) 3579 nr = min(right_nritems, max_slot); 3580 else 3581 nr = min(right_nritems - 1, max_slot); 3582 3583 for (i = 0; i < nr; i++) { 3584 item = btrfs_item_nr(right, i); 3585 3586 if (!empty && push_items > 0) { 3587 if (path->slots[0] < i) 3588 break; 3589 if (path->slots[0] == i) { 3590 int space = btrfs_leaf_free_space(root, right); 3591 if (space + push_space * 2 > free_space) 3592 break; 3593 } 3594 } 3595 3596 if (path->slots[0] == i) 3597 push_space += data_size; 3598 3599 this_item_size = btrfs_item_size(right, item); 3600 if (this_item_size + sizeof(*item) + push_space > free_space) 3601 break; 3602 3603 push_items++; 3604 push_space += this_item_size + sizeof(*item); 3605 } 3606 3607 if (push_items == 0) { 3608 ret = 1; 3609 goto out; 3610 } 3611 if (!empty && push_items == btrfs_header_nritems(right)) 3612 WARN_ON(1); 3613 3614 /* push data from right to left */ 3615 copy_extent_buffer(left, right, 3616 btrfs_item_nr_offset(btrfs_header_nritems(left)), 3617 btrfs_item_nr_offset(0), 3618 push_items * sizeof(struct btrfs_item)); 3619 3620 push_space = BTRFS_LEAF_DATA_SIZE(root) - 3621 btrfs_item_offset_nr(right, push_items - 1); 3622 3623 copy_extent_buffer(left, right, btrfs_leaf_data(left) + 3624 leaf_data_end(root, left) - push_space, 3625 btrfs_leaf_data(right) + 3626 btrfs_item_offset_nr(right, push_items - 1), 3627 push_space); 3628 old_left_nritems = btrfs_header_nritems(left); 3629 BUG_ON(old_left_nritems <= 0); 3630 3631 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); 3632 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3633 u32 ioff; 3634 3635 item = btrfs_item_nr(left, i); 3636 3637 ioff = btrfs_token_item_offset(left, item, &token); 3638 btrfs_set_token_item_offset(left, item, 3639 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), 3640 &token); 3641 } 3642 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3643 3644 /* fixup right node */ 3645 if (push_items > right_nritems) 3646 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3647 right_nritems); 3648 3649 if (push_items < right_nritems) { 3650 push_space = btrfs_item_offset_nr(right, push_items - 1) - 3651 leaf_data_end(root, right); 3652 memmove_extent_buffer(right, btrfs_leaf_data(right) + 3653 BTRFS_LEAF_DATA_SIZE(root) - push_space, 3654 btrfs_leaf_data(right) + 3655 leaf_data_end(root, right), push_space); 3656 3657 memmove_extent_buffer(right, btrfs_item_nr_offset(0), 3658 btrfs_item_nr_offset(push_items), 3659 (btrfs_header_nritems(right) - push_items) * 3660 sizeof(struct btrfs_item)); 3661 } 3662 right_nritems -= push_items; 3663 btrfs_set_header_nritems(right, right_nritems); 3664 push_space = BTRFS_LEAF_DATA_SIZE(root); 3665 for (i = 0; i < right_nritems; i++) { 3666 item = btrfs_item_nr(right, i); 3667 3668 push_space = push_space - btrfs_token_item_size(right, 3669 item, &token); 3670 btrfs_set_token_item_offset(right, item, push_space, &token); 3671 } 3672 3673 btrfs_mark_buffer_dirty(left); 3674 if (right_nritems) 3675 btrfs_mark_buffer_dirty(right); 3676 else 3677 clean_tree_block(trans, root, right); 3678 3679 btrfs_item_key(right, &disk_key, 0); 3680 fixup_low_keys(trans, root, path, &disk_key, 1); 3681 3682 /* then fixup the leaf pointer in the path */ 3683 if (path->slots[0] < push_items) { 3684 path->slots[0] += old_left_nritems; 3685 btrfs_tree_unlock(path->nodes[0]); 3686 free_extent_buffer(path->nodes[0]); 3687 path->nodes[0] = left; 3688 path->slots[1] -= 1; 3689 } else { 3690 btrfs_tree_unlock(left); 3691 free_extent_buffer(left); 3692 path->slots[0] -= push_items; 3693 } 3694 BUG_ON(path->slots[0] < 0); 3695 return ret; 3696 out: 3697 btrfs_tree_unlock(left); 3698 free_extent_buffer(left); 3699 return ret; 3700 } 3701 3702 /* 3703 * push some data in the path leaf to the left, trying to free up at 3704 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3705 * 3706 * max_slot can put a limit on how far into the leaf we'll push items. The 3707 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3708 * items 3709 */ 3710 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3711 *root, struct btrfs_path *path, int min_data_size, 3712 int data_size, int empty, u32 max_slot) 3713 { 3714 struct extent_buffer *right = path->nodes[0]; 3715 struct extent_buffer *left; 3716 int slot; 3717 int free_space; 3718 u32 right_nritems; 3719 int ret = 0; 3720 3721 slot = path->slots[1]; 3722 if (slot == 0) 3723 return 1; 3724 if (!path->nodes[1]) 3725 return 1; 3726 3727 right_nritems = btrfs_header_nritems(right); 3728 if (right_nritems == 0) 3729 return 1; 3730 3731 btrfs_assert_tree_locked(path->nodes[1]); 3732 3733 left = read_node_slot(root, path->nodes[1], slot - 1); 3734 if (left == NULL) 3735 return 1; 3736 3737 btrfs_tree_lock(left); 3738 btrfs_set_lock_blocking(left); 3739 3740 free_space = btrfs_leaf_free_space(root, left); 3741 if (free_space < data_size) { 3742 ret = 1; 3743 goto out; 3744 } 3745 3746 /* cow and double check */ 3747 ret = btrfs_cow_block(trans, root, left, 3748 path->nodes[1], slot - 1, &left); 3749 if (ret) { 3750 /* we hit -ENOSPC, but it isn't fatal here */ 3751 if (ret == -ENOSPC) 3752 ret = 1; 3753 goto out; 3754 } 3755 3756 free_space = btrfs_leaf_free_space(root, left); 3757 if (free_space < data_size) { 3758 ret = 1; 3759 goto out; 3760 } 3761 3762 return __push_leaf_left(trans, root, path, min_data_size, 3763 empty, left, free_space, right_nritems, 3764 max_slot); 3765 out: 3766 btrfs_tree_unlock(left); 3767 free_extent_buffer(left); 3768 return ret; 3769 } 3770 3771 /* 3772 * split the path's leaf in two, making sure there is at least data_size 3773 * available for the resulting leaf level of the path. 3774 */ 3775 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3776 struct btrfs_root *root, 3777 struct btrfs_path *path, 3778 struct extent_buffer *l, 3779 struct extent_buffer *right, 3780 int slot, int mid, int nritems) 3781 { 3782 int data_copy_size; 3783 int rt_data_off; 3784 int i; 3785 struct btrfs_disk_key disk_key; 3786 struct btrfs_map_token token; 3787 3788 btrfs_init_map_token(&token); 3789 3790 nritems = nritems - mid; 3791 btrfs_set_header_nritems(right, nritems); 3792 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); 3793 3794 copy_extent_buffer(right, l, btrfs_item_nr_offset(0), 3795 btrfs_item_nr_offset(mid), 3796 nritems * sizeof(struct btrfs_item)); 3797 3798 copy_extent_buffer(right, l, 3799 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - 3800 data_copy_size, btrfs_leaf_data(l) + 3801 leaf_data_end(root, l), data_copy_size); 3802 3803 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - 3804 btrfs_item_end_nr(l, mid); 3805 3806 for (i = 0; i < nritems; i++) { 3807 struct btrfs_item *item = btrfs_item_nr(right, i); 3808 u32 ioff; 3809 3810 ioff = btrfs_token_item_offset(right, item, &token); 3811 btrfs_set_token_item_offset(right, item, 3812 ioff + rt_data_off, &token); 3813 } 3814 3815 btrfs_set_header_nritems(l, mid); 3816 btrfs_item_key(right, &disk_key, 0); 3817 insert_ptr(trans, root, path, &disk_key, right->start, 3818 path->slots[1] + 1, 1); 3819 3820 btrfs_mark_buffer_dirty(right); 3821 btrfs_mark_buffer_dirty(l); 3822 BUG_ON(path->slots[0] != slot); 3823 3824 if (mid <= slot) { 3825 btrfs_tree_unlock(path->nodes[0]); 3826 free_extent_buffer(path->nodes[0]); 3827 path->nodes[0] = right; 3828 path->slots[0] -= mid; 3829 path->slots[1] += 1; 3830 } else { 3831 btrfs_tree_unlock(right); 3832 free_extent_buffer(right); 3833 } 3834 3835 BUG_ON(path->slots[0] < 0); 3836 } 3837 3838 /* 3839 * double splits happen when we need to insert a big item in the middle 3840 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3841 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3842 * A B C 3843 * 3844 * We avoid this by trying to push the items on either side of our target 3845 * into the adjacent leaves. If all goes well we can avoid the double split 3846 * completely. 3847 */ 3848 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3849 struct btrfs_root *root, 3850 struct btrfs_path *path, 3851 int data_size) 3852 { 3853 int ret; 3854 int progress = 0; 3855 int slot; 3856 u32 nritems; 3857 3858 slot = path->slots[0]; 3859 3860 /* 3861 * try to push all the items after our slot into the 3862 * right leaf 3863 */ 3864 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); 3865 if (ret < 0) 3866 return ret; 3867 3868 if (ret == 0) 3869 progress++; 3870 3871 nritems = btrfs_header_nritems(path->nodes[0]); 3872 /* 3873 * our goal is to get our slot at the start or end of a leaf. If 3874 * we've done so we're done 3875 */ 3876 if (path->slots[0] == 0 || path->slots[0] == nritems) 3877 return 0; 3878 3879 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) 3880 return 0; 3881 3882 /* try to push all the items before our slot into the next leaf */ 3883 slot = path->slots[0]; 3884 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); 3885 if (ret < 0) 3886 return ret; 3887 3888 if (ret == 0) 3889 progress++; 3890 3891 if (progress) 3892 return 0; 3893 return 1; 3894 } 3895 3896 /* 3897 * split the path's leaf in two, making sure there is at least data_size 3898 * available for the resulting leaf level of the path. 3899 * 3900 * returns 0 if all went well and < 0 on failure. 3901 */ 3902 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3903 struct btrfs_root *root, 3904 struct btrfs_key *ins_key, 3905 struct btrfs_path *path, int data_size, 3906 int extend) 3907 { 3908 struct btrfs_disk_key disk_key; 3909 struct extent_buffer *l; 3910 u32 nritems; 3911 int mid; 3912 int slot; 3913 struct extent_buffer *right; 3914 int ret = 0; 3915 int wret; 3916 int split; 3917 int num_doubles = 0; 3918 int tried_avoid_double = 0; 3919 3920 l = path->nodes[0]; 3921 slot = path->slots[0]; 3922 if (extend && data_size + btrfs_item_size_nr(l, slot) + 3923 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) 3924 return -EOVERFLOW; 3925 3926 /* first try to make some room by pushing left and right */ 3927 if (data_size) { 3928 wret = push_leaf_right(trans, root, path, data_size, 3929 data_size, 0, 0); 3930 if (wret < 0) 3931 return wret; 3932 if (wret) { 3933 wret = push_leaf_left(trans, root, path, data_size, 3934 data_size, 0, (u32)-1); 3935 if (wret < 0) 3936 return wret; 3937 } 3938 l = path->nodes[0]; 3939 3940 /* did the pushes work? */ 3941 if (btrfs_leaf_free_space(root, l) >= data_size) 3942 return 0; 3943 } 3944 3945 if (!path->nodes[1]) { 3946 ret = insert_new_root(trans, root, path, 1); 3947 if (ret) 3948 return ret; 3949 } 3950 again: 3951 split = 1; 3952 l = path->nodes[0]; 3953 slot = path->slots[0]; 3954 nritems = btrfs_header_nritems(l); 3955 mid = (nritems + 1) / 2; 3956 3957 if (mid <= slot) { 3958 if (nritems == 1 || 3959 leaf_space_used(l, mid, nritems - mid) + data_size > 3960 BTRFS_LEAF_DATA_SIZE(root)) { 3961 if (slot >= nritems) { 3962 split = 0; 3963 } else { 3964 mid = slot; 3965 if (mid != nritems && 3966 leaf_space_used(l, mid, nritems - mid) + 3967 data_size > BTRFS_LEAF_DATA_SIZE(root)) { 3968 if (data_size && !tried_avoid_double) 3969 goto push_for_double; 3970 split = 2; 3971 } 3972 } 3973 } 3974 } else { 3975 if (leaf_space_used(l, 0, mid) + data_size > 3976 BTRFS_LEAF_DATA_SIZE(root)) { 3977 if (!extend && data_size && slot == 0) { 3978 split = 0; 3979 } else if ((extend || !data_size) && slot == 0) { 3980 mid = 1; 3981 } else { 3982 mid = slot; 3983 if (mid != nritems && 3984 leaf_space_used(l, mid, nritems - mid) + 3985 data_size > BTRFS_LEAF_DATA_SIZE(root)) { 3986 if (data_size && !tried_avoid_double) 3987 goto push_for_double; 3988 split = 2 ; 3989 } 3990 } 3991 } 3992 } 3993 3994 if (split == 0) 3995 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3996 else 3997 btrfs_item_key(l, &disk_key, mid); 3998 3999 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, 4000 root->root_key.objectid, 4001 &disk_key, 0, l->start, 0); 4002 if (IS_ERR(right)) 4003 return PTR_ERR(right); 4004 4005 root_add_used(root, root->leafsize); 4006 4007 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); 4008 btrfs_set_header_bytenr(right, right->start); 4009 btrfs_set_header_generation(right, trans->transid); 4010 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); 4011 btrfs_set_header_owner(right, root->root_key.objectid); 4012 btrfs_set_header_level(right, 0); 4013 write_extent_buffer(right, root->fs_info->fsid, 4014 (unsigned long)btrfs_header_fsid(right), 4015 BTRFS_FSID_SIZE); 4016 4017 write_extent_buffer(right, root->fs_info->chunk_tree_uuid, 4018 (unsigned long)btrfs_header_chunk_tree_uuid(right), 4019 BTRFS_UUID_SIZE); 4020 4021 if (split == 0) { 4022 if (mid <= slot) { 4023 btrfs_set_header_nritems(right, 0); 4024 insert_ptr(trans, root, path, &disk_key, right->start, 4025 path->slots[1] + 1, 1); 4026 btrfs_tree_unlock(path->nodes[0]); 4027 free_extent_buffer(path->nodes[0]); 4028 path->nodes[0] = right; 4029 path->slots[0] = 0; 4030 path->slots[1] += 1; 4031 } else { 4032 btrfs_set_header_nritems(right, 0); 4033 insert_ptr(trans, root, path, &disk_key, right->start, 4034 path->slots[1], 1); 4035 btrfs_tree_unlock(path->nodes[0]); 4036 free_extent_buffer(path->nodes[0]); 4037 path->nodes[0] = right; 4038 path->slots[0] = 0; 4039 if (path->slots[1] == 0) 4040 fixup_low_keys(trans, root, path, 4041 &disk_key, 1); 4042 } 4043 btrfs_mark_buffer_dirty(right); 4044 return ret; 4045 } 4046 4047 copy_for_split(trans, root, path, l, right, slot, mid, nritems); 4048 4049 if (split == 2) { 4050 BUG_ON(num_doubles != 0); 4051 num_doubles++; 4052 goto again; 4053 } 4054 4055 return 0; 4056 4057 push_for_double: 4058 push_for_double_split(trans, root, path, data_size); 4059 tried_avoid_double = 1; 4060 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) 4061 return 0; 4062 goto again; 4063 } 4064 4065 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 4066 struct btrfs_root *root, 4067 struct btrfs_path *path, int ins_len) 4068 { 4069 struct btrfs_key key; 4070 struct extent_buffer *leaf; 4071 struct btrfs_file_extent_item *fi; 4072 u64 extent_len = 0; 4073 u32 item_size; 4074 int ret; 4075 4076 leaf = path->nodes[0]; 4077 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4078 4079 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 4080 key.type != BTRFS_EXTENT_CSUM_KEY); 4081 4082 if (btrfs_leaf_free_space(root, leaf) >= ins_len) 4083 return 0; 4084 4085 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4086 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4087 fi = btrfs_item_ptr(leaf, path->slots[0], 4088 struct btrfs_file_extent_item); 4089 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 4090 } 4091 btrfs_release_path(path); 4092 4093 path->keep_locks = 1; 4094 path->search_for_split = 1; 4095 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 4096 path->search_for_split = 0; 4097 if (ret < 0) 4098 goto err; 4099 4100 ret = -EAGAIN; 4101 leaf = path->nodes[0]; 4102 /* if our item isn't there or got smaller, return now */ 4103 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) 4104 goto err; 4105 4106 /* the leaf has changed, it now has room. return now */ 4107 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) 4108 goto err; 4109 4110 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4111 fi = btrfs_item_ptr(leaf, path->slots[0], 4112 struct btrfs_file_extent_item); 4113 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 4114 goto err; 4115 } 4116 4117 btrfs_set_path_blocking(path); 4118 ret = split_leaf(trans, root, &key, path, ins_len, 1); 4119 if (ret) 4120 goto err; 4121 4122 path->keep_locks = 0; 4123 btrfs_unlock_up_safe(path, 1); 4124 return 0; 4125 err: 4126 path->keep_locks = 0; 4127 return ret; 4128 } 4129 4130 static noinline int split_item(struct btrfs_trans_handle *trans, 4131 struct btrfs_root *root, 4132 struct btrfs_path *path, 4133 struct btrfs_key *new_key, 4134 unsigned long split_offset) 4135 { 4136 struct extent_buffer *leaf; 4137 struct btrfs_item *item; 4138 struct btrfs_item *new_item; 4139 int slot; 4140 char *buf; 4141 u32 nritems; 4142 u32 item_size; 4143 u32 orig_offset; 4144 struct btrfs_disk_key disk_key; 4145 4146 leaf = path->nodes[0]; 4147 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); 4148 4149 btrfs_set_path_blocking(path); 4150 4151 item = btrfs_item_nr(leaf, path->slots[0]); 4152 orig_offset = btrfs_item_offset(leaf, item); 4153 item_size = btrfs_item_size(leaf, item); 4154 4155 buf = kmalloc(item_size, GFP_NOFS); 4156 if (!buf) 4157 return -ENOMEM; 4158 4159 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 4160 path->slots[0]), item_size); 4161 4162 slot = path->slots[0] + 1; 4163 nritems = btrfs_header_nritems(leaf); 4164 if (slot != nritems) { 4165 /* shift the items */ 4166 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), 4167 btrfs_item_nr_offset(slot), 4168 (nritems - slot) * sizeof(struct btrfs_item)); 4169 } 4170 4171 btrfs_cpu_key_to_disk(&disk_key, new_key); 4172 btrfs_set_item_key(leaf, &disk_key, slot); 4173 4174 new_item = btrfs_item_nr(leaf, slot); 4175 4176 btrfs_set_item_offset(leaf, new_item, orig_offset); 4177 btrfs_set_item_size(leaf, new_item, item_size - split_offset); 4178 4179 btrfs_set_item_offset(leaf, item, 4180 orig_offset + item_size - split_offset); 4181 btrfs_set_item_size(leaf, item, split_offset); 4182 4183 btrfs_set_header_nritems(leaf, nritems + 1); 4184 4185 /* write the data for the start of the original item */ 4186 write_extent_buffer(leaf, buf, 4187 btrfs_item_ptr_offset(leaf, path->slots[0]), 4188 split_offset); 4189 4190 /* write the data for the new item */ 4191 write_extent_buffer(leaf, buf + split_offset, 4192 btrfs_item_ptr_offset(leaf, slot), 4193 item_size - split_offset); 4194 btrfs_mark_buffer_dirty(leaf); 4195 4196 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); 4197 kfree(buf); 4198 return 0; 4199 } 4200 4201 /* 4202 * This function splits a single item into two items, 4203 * giving 'new_key' to the new item and splitting the 4204 * old one at split_offset (from the start of the item). 4205 * 4206 * The path may be released by this operation. After 4207 * the split, the path is pointing to the old item. The 4208 * new item is going to be in the same node as the old one. 4209 * 4210 * Note, the item being split must be smaller enough to live alone on 4211 * a tree block with room for one extra struct btrfs_item 4212 * 4213 * This allows us to split the item in place, keeping a lock on the 4214 * leaf the entire time. 4215 */ 4216 int btrfs_split_item(struct btrfs_trans_handle *trans, 4217 struct btrfs_root *root, 4218 struct btrfs_path *path, 4219 struct btrfs_key *new_key, 4220 unsigned long split_offset) 4221 { 4222 int ret; 4223 ret = setup_leaf_for_split(trans, root, path, 4224 sizeof(struct btrfs_item)); 4225 if (ret) 4226 return ret; 4227 4228 ret = split_item(trans, root, path, new_key, split_offset); 4229 return ret; 4230 } 4231 4232 /* 4233 * This function duplicate a item, giving 'new_key' to the new item. 4234 * It guarantees both items live in the same tree leaf and the new item 4235 * is contiguous with the original item. 4236 * 4237 * This allows us to split file extent in place, keeping a lock on the 4238 * leaf the entire time. 4239 */ 4240 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4241 struct btrfs_root *root, 4242 struct btrfs_path *path, 4243 struct btrfs_key *new_key) 4244 { 4245 struct extent_buffer *leaf; 4246 int ret; 4247 u32 item_size; 4248 4249 leaf = path->nodes[0]; 4250 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4251 ret = setup_leaf_for_split(trans, root, path, 4252 item_size + sizeof(struct btrfs_item)); 4253 if (ret) 4254 return ret; 4255 4256 path->slots[0]++; 4257 setup_items_for_insert(trans, root, path, new_key, &item_size, 4258 item_size, item_size + 4259 sizeof(struct btrfs_item), 1); 4260 leaf = path->nodes[0]; 4261 memcpy_extent_buffer(leaf, 4262 btrfs_item_ptr_offset(leaf, path->slots[0]), 4263 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4264 item_size); 4265 return 0; 4266 } 4267 4268 /* 4269 * make the item pointed to by the path smaller. new_size indicates 4270 * how small to make it, and from_end tells us if we just chop bytes 4271 * off the end of the item or if we shift the item to chop bytes off 4272 * the front. 4273 */ 4274 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 4275 struct btrfs_root *root, 4276 struct btrfs_path *path, 4277 u32 new_size, int from_end) 4278 { 4279 int slot; 4280 struct extent_buffer *leaf; 4281 struct btrfs_item *item; 4282 u32 nritems; 4283 unsigned int data_end; 4284 unsigned int old_data_start; 4285 unsigned int old_size; 4286 unsigned int size_diff; 4287 int i; 4288 struct btrfs_map_token token; 4289 4290 btrfs_init_map_token(&token); 4291 4292 leaf = path->nodes[0]; 4293 slot = path->slots[0]; 4294 4295 old_size = btrfs_item_size_nr(leaf, slot); 4296 if (old_size == new_size) 4297 return; 4298 4299 nritems = btrfs_header_nritems(leaf); 4300 data_end = leaf_data_end(root, leaf); 4301 4302 old_data_start = btrfs_item_offset_nr(leaf, slot); 4303 4304 size_diff = old_size - new_size; 4305 4306 BUG_ON(slot < 0); 4307 BUG_ON(slot >= nritems); 4308 4309 /* 4310 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4311 */ 4312 /* first correct the data pointers */ 4313 for (i = slot; i < nritems; i++) { 4314 u32 ioff; 4315 item = btrfs_item_nr(leaf, i); 4316 4317 ioff = btrfs_token_item_offset(leaf, item, &token); 4318 btrfs_set_token_item_offset(leaf, item, 4319 ioff + size_diff, &token); 4320 } 4321 4322 /* shift the data */ 4323 if (from_end) { 4324 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4325 data_end + size_diff, btrfs_leaf_data(leaf) + 4326 data_end, old_data_start + new_size - data_end); 4327 } else { 4328 struct btrfs_disk_key disk_key; 4329 u64 offset; 4330 4331 btrfs_item_key(leaf, &disk_key, slot); 4332 4333 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4334 unsigned long ptr; 4335 struct btrfs_file_extent_item *fi; 4336 4337 fi = btrfs_item_ptr(leaf, slot, 4338 struct btrfs_file_extent_item); 4339 fi = (struct btrfs_file_extent_item *)( 4340 (unsigned long)fi - size_diff); 4341 4342 if (btrfs_file_extent_type(leaf, fi) == 4343 BTRFS_FILE_EXTENT_INLINE) { 4344 ptr = btrfs_item_ptr_offset(leaf, slot); 4345 memmove_extent_buffer(leaf, ptr, 4346 (unsigned long)fi, 4347 offsetof(struct btrfs_file_extent_item, 4348 disk_bytenr)); 4349 } 4350 } 4351 4352 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4353 data_end + size_diff, btrfs_leaf_data(leaf) + 4354 data_end, old_data_start - data_end); 4355 4356 offset = btrfs_disk_key_offset(&disk_key); 4357 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4358 btrfs_set_item_key(leaf, &disk_key, slot); 4359 if (slot == 0) 4360 fixup_low_keys(trans, root, path, &disk_key, 1); 4361 } 4362 4363 item = btrfs_item_nr(leaf, slot); 4364 btrfs_set_item_size(leaf, item, new_size); 4365 btrfs_mark_buffer_dirty(leaf); 4366 4367 if (btrfs_leaf_free_space(root, leaf) < 0) { 4368 btrfs_print_leaf(root, leaf); 4369 BUG(); 4370 } 4371 } 4372 4373 /* 4374 * make the item pointed to by the path bigger, data_size is the new size. 4375 */ 4376 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4377 struct btrfs_root *root, struct btrfs_path *path, 4378 u32 data_size) 4379 { 4380 int slot; 4381 struct extent_buffer *leaf; 4382 struct btrfs_item *item; 4383 u32 nritems; 4384 unsigned int data_end; 4385 unsigned int old_data; 4386 unsigned int old_size; 4387 int i; 4388 struct btrfs_map_token token; 4389 4390 btrfs_init_map_token(&token); 4391 4392 leaf = path->nodes[0]; 4393 4394 nritems = btrfs_header_nritems(leaf); 4395 data_end = leaf_data_end(root, leaf); 4396 4397 if (btrfs_leaf_free_space(root, leaf) < data_size) { 4398 btrfs_print_leaf(root, leaf); 4399 BUG(); 4400 } 4401 slot = path->slots[0]; 4402 old_data = btrfs_item_end_nr(leaf, slot); 4403 4404 BUG_ON(slot < 0); 4405 if (slot >= nritems) { 4406 btrfs_print_leaf(root, leaf); 4407 printk(KERN_CRIT "slot %d too large, nritems %d\n", 4408 slot, nritems); 4409 BUG_ON(1); 4410 } 4411 4412 /* 4413 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4414 */ 4415 /* first correct the data pointers */ 4416 for (i = slot; i < nritems; i++) { 4417 u32 ioff; 4418 item = btrfs_item_nr(leaf, i); 4419 4420 ioff = btrfs_token_item_offset(leaf, item, &token); 4421 btrfs_set_token_item_offset(leaf, item, 4422 ioff - data_size, &token); 4423 } 4424 4425 /* shift the data */ 4426 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4427 data_end - data_size, btrfs_leaf_data(leaf) + 4428 data_end, old_data - data_end); 4429 4430 data_end = old_data; 4431 old_size = btrfs_item_size_nr(leaf, slot); 4432 item = btrfs_item_nr(leaf, slot); 4433 btrfs_set_item_size(leaf, item, old_size + data_size); 4434 btrfs_mark_buffer_dirty(leaf); 4435 4436 if (btrfs_leaf_free_space(root, leaf) < 0) { 4437 btrfs_print_leaf(root, leaf); 4438 BUG(); 4439 } 4440 } 4441 4442 /* 4443 * this is a helper for btrfs_insert_empty_items, the main goal here is 4444 * to save stack depth by doing the bulk of the work in a function 4445 * that doesn't call btrfs_search_slot 4446 */ 4447 void setup_items_for_insert(struct btrfs_trans_handle *trans, 4448 struct btrfs_root *root, struct btrfs_path *path, 4449 struct btrfs_key *cpu_key, u32 *data_size, 4450 u32 total_data, u32 total_size, int nr) 4451 { 4452 struct btrfs_item *item; 4453 int i; 4454 u32 nritems; 4455 unsigned int data_end; 4456 struct btrfs_disk_key disk_key; 4457 struct extent_buffer *leaf; 4458 int slot; 4459 struct btrfs_map_token token; 4460 4461 btrfs_init_map_token(&token); 4462 4463 leaf = path->nodes[0]; 4464 slot = path->slots[0]; 4465 4466 nritems = btrfs_header_nritems(leaf); 4467 data_end = leaf_data_end(root, leaf); 4468 4469 if (btrfs_leaf_free_space(root, leaf) < total_size) { 4470 btrfs_print_leaf(root, leaf); 4471 printk(KERN_CRIT "not enough freespace need %u have %d\n", 4472 total_size, btrfs_leaf_free_space(root, leaf)); 4473 BUG(); 4474 } 4475 4476 if (slot != nritems) { 4477 unsigned int old_data = btrfs_item_end_nr(leaf, slot); 4478 4479 if (old_data < data_end) { 4480 btrfs_print_leaf(root, leaf); 4481 printk(KERN_CRIT "slot %d old_data %d data_end %d\n", 4482 slot, old_data, data_end); 4483 BUG_ON(1); 4484 } 4485 /* 4486 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4487 */ 4488 /* first correct the data pointers */ 4489 for (i = slot; i < nritems; i++) { 4490 u32 ioff; 4491 4492 item = btrfs_item_nr(leaf, i); 4493 ioff = btrfs_token_item_offset(leaf, item, &token); 4494 btrfs_set_token_item_offset(leaf, item, 4495 ioff - total_data, &token); 4496 } 4497 /* shift the items */ 4498 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 4499 btrfs_item_nr_offset(slot), 4500 (nritems - slot) * sizeof(struct btrfs_item)); 4501 4502 /* shift the data */ 4503 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4504 data_end - total_data, btrfs_leaf_data(leaf) + 4505 data_end, old_data - data_end); 4506 data_end = old_data; 4507 } 4508 4509 /* setup the item for the new data */ 4510 for (i = 0; i < nr; i++) { 4511 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 4512 btrfs_set_item_key(leaf, &disk_key, slot + i); 4513 item = btrfs_item_nr(leaf, slot + i); 4514 btrfs_set_token_item_offset(leaf, item, 4515 data_end - data_size[i], &token); 4516 data_end -= data_size[i]; 4517 btrfs_set_token_item_size(leaf, item, data_size[i], &token); 4518 } 4519 4520 btrfs_set_header_nritems(leaf, nritems + nr); 4521 4522 if (slot == 0) { 4523 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 4524 fixup_low_keys(trans, root, path, &disk_key, 1); 4525 } 4526 btrfs_unlock_up_safe(path, 1); 4527 btrfs_mark_buffer_dirty(leaf); 4528 4529 if (btrfs_leaf_free_space(root, leaf) < 0) { 4530 btrfs_print_leaf(root, leaf); 4531 BUG(); 4532 } 4533 } 4534 4535 /* 4536 * Given a key and some data, insert items into the tree. 4537 * This does all the path init required, making room in the tree if needed. 4538 */ 4539 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4540 struct btrfs_root *root, 4541 struct btrfs_path *path, 4542 struct btrfs_key *cpu_key, u32 *data_size, 4543 int nr) 4544 { 4545 int ret = 0; 4546 int slot; 4547 int i; 4548 u32 total_size = 0; 4549 u32 total_data = 0; 4550 4551 for (i = 0; i < nr; i++) 4552 total_data += data_size[i]; 4553 4554 total_size = total_data + (nr * sizeof(struct btrfs_item)); 4555 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); 4556 if (ret == 0) 4557 return -EEXIST; 4558 if (ret < 0) 4559 return ret; 4560 4561 slot = path->slots[0]; 4562 BUG_ON(slot < 0); 4563 4564 setup_items_for_insert(trans, root, path, cpu_key, data_size, 4565 total_data, total_size, nr); 4566 return 0; 4567 } 4568 4569 /* 4570 * Given a key and some data, insert an item into the tree. 4571 * This does all the path init required, making room in the tree if needed. 4572 */ 4573 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 4574 *root, struct btrfs_key *cpu_key, void *data, u32 4575 data_size) 4576 { 4577 int ret = 0; 4578 struct btrfs_path *path; 4579 struct extent_buffer *leaf; 4580 unsigned long ptr; 4581 4582 path = btrfs_alloc_path(); 4583 if (!path) 4584 return -ENOMEM; 4585 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4586 if (!ret) { 4587 leaf = path->nodes[0]; 4588 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4589 write_extent_buffer(leaf, data, ptr, data_size); 4590 btrfs_mark_buffer_dirty(leaf); 4591 } 4592 btrfs_free_path(path); 4593 return ret; 4594 } 4595 4596 /* 4597 * delete the pointer from a given node. 4598 * 4599 * the tree should have been previously balanced so the deletion does not 4600 * empty a node. 4601 */ 4602 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4603 struct btrfs_path *path, int level, int slot) 4604 { 4605 struct extent_buffer *parent = path->nodes[level]; 4606 u32 nritems; 4607 int ret; 4608 4609 nritems = btrfs_header_nritems(parent); 4610 if (slot != nritems - 1) { 4611 if (level) 4612 tree_mod_log_eb_move(root->fs_info, parent, slot, 4613 slot + 1, nritems - slot - 1); 4614 memmove_extent_buffer(parent, 4615 btrfs_node_key_ptr_offset(slot), 4616 btrfs_node_key_ptr_offset(slot + 1), 4617 sizeof(struct btrfs_key_ptr) * 4618 (nritems - slot - 1)); 4619 } else if (level) { 4620 ret = tree_mod_log_insert_key(root->fs_info, parent, slot, 4621 MOD_LOG_KEY_REMOVE); 4622 BUG_ON(ret < 0); 4623 } 4624 4625 nritems--; 4626 btrfs_set_header_nritems(parent, nritems); 4627 if (nritems == 0 && parent == root->node) { 4628 BUG_ON(btrfs_header_level(root->node) != 1); 4629 /* just turn the root into a leaf and break */ 4630 btrfs_set_header_level(root->node, 0); 4631 } else if (slot == 0) { 4632 struct btrfs_disk_key disk_key; 4633 4634 btrfs_node_key(parent, &disk_key, 0); 4635 fixup_low_keys(trans, root, path, &disk_key, level + 1); 4636 } 4637 btrfs_mark_buffer_dirty(parent); 4638 } 4639 4640 /* 4641 * a helper function to delete the leaf pointed to by path->slots[1] and 4642 * path->nodes[1]. 4643 * 4644 * This deletes the pointer in path->nodes[1] and frees the leaf 4645 * block extent. zero is returned if it all worked out, < 0 otherwise. 4646 * 4647 * The path must have already been setup for deleting the leaf, including 4648 * all the proper balancing. path->nodes[1] must be locked. 4649 */ 4650 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4651 struct btrfs_root *root, 4652 struct btrfs_path *path, 4653 struct extent_buffer *leaf) 4654 { 4655 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4656 del_ptr(trans, root, path, 1, path->slots[1]); 4657 4658 /* 4659 * btrfs_free_extent is expensive, we want to make sure we 4660 * aren't holding any locks when we call it 4661 */ 4662 btrfs_unlock_up_safe(path, 0); 4663 4664 root_sub_used(root, leaf->len); 4665 4666 extent_buffer_get(leaf); 4667 btrfs_free_tree_block(trans, root, leaf, 0, 1); 4668 free_extent_buffer_stale(leaf); 4669 } 4670 /* 4671 * delete the item at the leaf level in path. If that empties 4672 * the leaf, remove it from the tree 4673 */ 4674 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4675 struct btrfs_path *path, int slot, int nr) 4676 { 4677 struct extent_buffer *leaf; 4678 struct btrfs_item *item; 4679 int last_off; 4680 int dsize = 0; 4681 int ret = 0; 4682 int wret; 4683 int i; 4684 u32 nritems; 4685 struct btrfs_map_token token; 4686 4687 btrfs_init_map_token(&token); 4688 4689 leaf = path->nodes[0]; 4690 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); 4691 4692 for (i = 0; i < nr; i++) 4693 dsize += btrfs_item_size_nr(leaf, slot + i); 4694 4695 nritems = btrfs_header_nritems(leaf); 4696 4697 if (slot + nr != nritems) { 4698 int data_end = leaf_data_end(root, leaf); 4699 4700 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4701 data_end + dsize, 4702 btrfs_leaf_data(leaf) + data_end, 4703 last_off - data_end); 4704 4705 for (i = slot + nr; i < nritems; i++) { 4706 u32 ioff; 4707 4708 item = btrfs_item_nr(leaf, i); 4709 ioff = btrfs_token_item_offset(leaf, item, &token); 4710 btrfs_set_token_item_offset(leaf, item, 4711 ioff + dsize, &token); 4712 } 4713 4714 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 4715 btrfs_item_nr_offset(slot + nr), 4716 sizeof(struct btrfs_item) * 4717 (nritems - slot - nr)); 4718 } 4719 btrfs_set_header_nritems(leaf, nritems - nr); 4720 nritems -= nr; 4721 4722 /* delete the leaf if we've emptied it */ 4723 if (nritems == 0) { 4724 if (leaf == root->node) { 4725 btrfs_set_header_level(leaf, 0); 4726 } else { 4727 btrfs_set_path_blocking(path); 4728 clean_tree_block(trans, root, leaf); 4729 btrfs_del_leaf(trans, root, path, leaf); 4730 } 4731 } else { 4732 int used = leaf_space_used(leaf, 0, nritems); 4733 if (slot == 0) { 4734 struct btrfs_disk_key disk_key; 4735 4736 btrfs_item_key(leaf, &disk_key, 0); 4737 fixup_low_keys(trans, root, path, &disk_key, 1); 4738 } 4739 4740 /* delete the leaf if it is mostly empty */ 4741 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { 4742 /* push_leaf_left fixes the path. 4743 * make sure the path still points to our leaf 4744 * for possible call to del_ptr below 4745 */ 4746 slot = path->slots[1]; 4747 extent_buffer_get(leaf); 4748 4749 btrfs_set_path_blocking(path); 4750 wret = push_leaf_left(trans, root, path, 1, 1, 4751 1, (u32)-1); 4752 if (wret < 0 && wret != -ENOSPC) 4753 ret = wret; 4754 4755 if (path->nodes[0] == leaf && 4756 btrfs_header_nritems(leaf)) { 4757 wret = push_leaf_right(trans, root, path, 1, 4758 1, 1, 0); 4759 if (wret < 0 && wret != -ENOSPC) 4760 ret = wret; 4761 } 4762 4763 if (btrfs_header_nritems(leaf) == 0) { 4764 path->slots[1] = slot; 4765 btrfs_del_leaf(trans, root, path, leaf); 4766 free_extent_buffer(leaf); 4767 ret = 0; 4768 } else { 4769 /* if we're still in the path, make sure 4770 * we're dirty. Otherwise, one of the 4771 * push_leaf functions must have already 4772 * dirtied this buffer 4773 */ 4774 if (path->nodes[0] == leaf) 4775 btrfs_mark_buffer_dirty(leaf); 4776 free_extent_buffer(leaf); 4777 } 4778 } else { 4779 btrfs_mark_buffer_dirty(leaf); 4780 } 4781 } 4782 return ret; 4783 } 4784 4785 /* 4786 * search the tree again to find a leaf with lesser keys 4787 * returns 0 if it found something or 1 if there are no lesser leaves. 4788 * returns < 0 on io errors. 4789 * 4790 * This may release the path, and so you may lose any locks held at the 4791 * time you call it. 4792 */ 4793 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 4794 { 4795 struct btrfs_key key; 4796 struct btrfs_disk_key found_key; 4797 int ret; 4798 4799 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 4800 4801 if (key.offset > 0) 4802 key.offset--; 4803 else if (key.type > 0) 4804 key.type--; 4805 else if (key.objectid > 0) 4806 key.objectid--; 4807 else 4808 return 1; 4809 4810 btrfs_release_path(path); 4811 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4812 if (ret < 0) 4813 return ret; 4814 btrfs_item_key(path->nodes[0], &found_key, 0); 4815 ret = comp_keys(&found_key, &key); 4816 if (ret < 0) 4817 return 0; 4818 return 1; 4819 } 4820 4821 /* 4822 * A helper function to walk down the tree starting at min_key, and looking 4823 * for nodes or leaves that are have a minimum transaction id. 4824 * This is used by the btree defrag code, and tree logging 4825 * 4826 * This does not cow, but it does stuff the starting key it finds back 4827 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4828 * key and get a writable path. 4829 * 4830 * This does lock as it descends, and path->keep_locks should be set 4831 * to 1 by the caller. 4832 * 4833 * This honors path->lowest_level to prevent descent past a given level 4834 * of the tree. 4835 * 4836 * min_trans indicates the oldest transaction that you are interested 4837 * in walking through. Any nodes or leaves older than min_trans are 4838 * skipped over (without reading them). 4839 * 4840 * returns zero if something useful was found, < 0 on error and 1 if there 4841 * was nothing in the tree that matched the search criteria. 4842 */ 4843 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4844 struct btrfs_key *max_key, 4845 struct btrfs_path *path, 4846 u64 min_trans) 4847 { 4848 struct extent_buffer *cur; 4849 struct btrfs_key found_key; 4850 int slot; 4851 int sret; 4852 u32 nritems; 4853 int level; 4854 int ret = 1; 4855 4856 WARN_ON(!path->keep_locks); 4857 again: 4858 cur = btrfs_read_lock_root_node(root); 4859 level = btrfs_header_level(cur); 4860 WARN_ON(path->nodes[level]); 4861 path->nodes[level] = cur; 4862 path->locks[level] = BTRFS_READ_LOCK; 4863 4864 if (btrfs_header_generation(cur) < min_trans) { 4865 ret = 1; 4866 goto out; 4867 } 4868 while (1) { 4869 nritems = btrfs_header_nritems(cur); 4870 level = btrfs_header_level(cur); 4871 sret = bin_search(cur, min_key, level, &slot); 4872 4873 /* at the lowest level, we're done, setup the path and exit */ 4874 if (level == path->lowest_level) { 4875 if (slot >= nritems) 4876 goto find_next_key; 4877 ret = 0; 4878 path->slots[level] = slot; 4879 btrfs_item_key_to_cpu(cur, &found_key, slot); 4880 goto out; 4881 } 4882 if (sret && slot > 0) 4883 slot--; 4884 /* 4885 * check this node pointer against the min_trans parameters. 4886 * If it is too old, old, skip to the next one. 4887 */ 4888 while (slot < nritems) { 4889 u64 blockptr; 4890 u64 gen; 4891 4892 blockptr = btrfs_node_blockptr(cur, slot); 4893 gen = btrfs_node_ptr_generation(cur, slot); 4894 if (gen < min_trans) { 4895 slot++; 4896 continue; 4897 } 4898 break; 4899 } 4900 find_next_key: 4901 /* 4902 * we didn't find a candidate key in this node, walk forward 4903 * and find another one 4904 */ 4905 if (slot >= nritems) { 4906 path->slots[level] = slot; 4907 btrfs_set_path_blocking(path); 4908 sret = btrfs_find_next_key(root, path, min_key, level, 4909 min_trans); 4910 if (sret == 0) { 4911 btrfs_release_path(path); 4912 goto again; 4913 } else { 4914 goto out; 4915 } 4916 } 4917 /* save our key for returning back */ 4918 btrfs_node_key_to_cpu(cur, &found_key, slot); 4919 path->slots[level] = slot; 4920 if (level == path->lowest_level) { 4921 ret = 0; 4922 unlock_up(path, level, 1, 0, NULL); 4923 goto out; 4924 } 4925 btrfs_set_path_blocking(path); 4926 cur = read_node_slot(root, cur, slot); 4927 BUG_ON(!cur); /* -ENOMEM */ 4928 4929 btrfs_tree_read_lock(cur); 4930 4931 path->locks[level - 1] = BTRFS_READ_LOCK; 4932 path->nodes[level - 1] = cur; 4933 unlock_up(path, level, 1, 0, NULL); 4934 btrfs_clear_path_blocking(path, NULL, 0); 4935 } 4936 out: 4937 if (ret == 0) 4938 memcpy(min_key, &found_key, sizeof(found_key)); 4939 btrfs_set_path_blocking(path); 4940 return ret; 4941 } 4942 4943 static void tree_move_down(struct btrfs_root *root, 4944 struct btrfs_path *path, 4945 int *level, int root_level) 4946 { 4947 BUG_ON(*level == 0); 4948 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], 4949 path->slots[*level]); 4950 path->slots[*level - 1] = 0; 4951 (*level)--; 4952 } 4953 4954 static int tree_move_next_or_upnext(struct btrfs_root *root, 4955 struct btrfs_path *path, 4956 int *level, int root_level) 4957 { 4958 int ret = 0; 4959 int nritems; 4960 nritems = btrfs_header_nritems(path->nodes[*level]); 4961 4962 path->slots[*level]++; 4963 4964 while (path->slots[*level] >= nritems) { 4965 if (*level == root_level) 4966 return -1; 4967 4968 /* move upnext */ 4969 path->slots[*level] = 0; 4970 free_extent_buffer(path->nodes[*level]); 4971 path->nodes[*level] = NULL; 4972 (*level)++; 4973 path->slots[*level]++; 4974 4975 nritems = btrfs_header_nritems(path->nodes[*level]); 4976 ret = 1; 4977 } 4978 return ret; 4979 } 4980 4981 /* 4982 * Returns 1 if it had to move up and next. 0 is returned if it moved only next 4983 * or down. 4984 */ 4985 static int tree_advance(struct btrfs_root *root, 4986 struct btrfs_path *path, 4987 int *level, int root_level, 4988 int allow_down, 4989 struct btrfs_key *key) 4990 { 4991 int ret; 4992 4993 if (*level == 0 || !allow_down) { 4994 ret = tree_move_next_or_upnext(root, path, level, root_level); 4995 } else { 4996 tree_move_down(root, path, level, root_level); 4997 ret = 0; 4998 } 4999 if (ret >= 0) { 5000 if (*level == 0) 5001 btrfs_item_key_to_cpu(path->nodes[*level], key, 5002 path->slots[*level]); 5003 else 5004 btrfs_node_key_to_cpu(path->nodes[*level], key, 5005 path->slots[*level]); 5006 } 5007 return ret; 5008 } 5009 5010 static int tree_compare_item(struct btrfs_root *left_root, 5011 struct btrfs_path *left_path, 5012 struct btrfs_path *right_path, 5013 char *tmp_buf) 5014 { 5015 int cmp; 5016 int len1, len2; 5017 unsigned long off1, off2; 5018 5019 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); 5020 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); 5021 if (len1 != len2) 5022 return 1; 5023 5024 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); 5025 off2 = btrfs_item_ptr_offset(right_path->nodes[0], 5026 right_path->slots[0]); 5027 5028 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); 5029 5030 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); 5031 if (cmp) 5032 return 1; 5033 return 0; 5034 } 5035 5036 #define ADVANCE 1 5037 #define ADVANCE_ONLY_NEXT -1 5038 5039 /* 5040 * This function compares two trees and calls the provided callback for 5041 * every changed/new/deleted item it finds. 5042 * If shared tree blocks are encountered, whole subtrees are skipped, making 5043 * the compare pretty fast on snapshotted subvolumes. 5044 * 5045 * This currently works on commit roots only. As commit roots are read only, 5046 * we don't do any locking. The commit roots are protected with transactions. 5047 * Transactions are ended and rejoined when a commit is tried in between. 5048 * 5049 * This function checks for modifications done to the trees while comparing. 5050 * If it detects a change, it aborts immediately. 5051 */ 5052 int btrfs_compare_trees(struct btrfs_root *left_root, 5053 struct btrfs_root *right_root, 5054 btrfs_changed_cb_t changed_cb, void *ctx) 5055 { 5056 int ret; 5057 int cmp; 5058 struct btrfs_trans_handle *trans = NULL; 5059 struct btrfs_path *left_path = NULL; 5060 struct btrfs_path *right_path = NULL; 5061 struct btrfs_key left_key; 5062 struct btrfs_key right_key; 5063 char *tmp_buf = NULL; 5064 int left_root_level; 5065 int right_root_level; 5066 int left_level; 5067 int right_level; 5068 int left_end_reached; 5069 int right_end_reached; 5070 int advance_left; 5071 int advance_right; 5072 u64 left_blockptr; 5073 u64 right_blockptr; 5074 u64 left_start_ctransid; 5075 u64 right_start_ctransid; 5076 u64 ctransid; 5077 5078 left_path = btrfs_alloc_path(); 5079 if (!left_path) { 5080 ret = -ENOMEM; 5081 goto out; 5082 } 5083 right_path = btrfs_alloc_path(); 5084 if (!right_path) { 5085 ret = -ENOMEM; 5086 goto out; 5087 } 5088 5089 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS); 5090 if (!tmp_buf) { 5091 ret = -ENOMEM; 5092 goto out; 5093 } 5094 5095 left_path->search_commit_root = 1; 5096 left_path->skip_locking = 1; 5097 right_path->search_commit_root = 1; 5098 right_path->skip_locking = 1; 5099 5100 spin_lock(&left_root->root_item_lock); 5101 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item); 5102 spin_unlock(&left_root->root_item_lock); 5103 5104 spin_lock(&right_root->root_item_lock); 5105 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item); 5106 spin_unlock(&right_root->root_item_lock); 5107 5108 trans = btrfs_join_transaction(left_root); 5109 if (IS_ERR(trans)) { 5110 ret = PTR_ERR(trans); 5111 trans = NULL; 5112 goto out; 5113 } 5114 5115 /* 5116 * Strategy: Go to the first items of both trees. Then do 5117 * 5118 * If both trees are at level 0 5119 * Compare keys of current items 5120 * If left < right treat left item as new, advance left tree 5121 * and repeat 5122 * If left > right treat right item as deleted, advance right tree 5123 * and repeat 5124 * If left == right do deep compare of items, treat as changed if 5125 * needed, advance both trees and repeat 5126 * If both trees are at the same level but not at level 0 5127 * Compare keys of current nodes/leafs 5128 * If left < right advance left tree and repeat 5129 * If left > right advance right tree and repeat 5130 * If left == right compare blockptrs of the next nodes/leafs 5131 * If they match advance both trees but stay at the same level 5132 * and repeat 5133 * If they don't match advance both trees while allowing to go 5134 * deeper and repeat 5135 * If tree levels are different 5136 * Advance the tree that needs it and repeat 5137 * 5138 * Advancing a tree means: 5139 * If we are at level 0, try to go to the next slot. If that's not 5140 * possible, go one level up and repeat. Stop when we found a level 5141 * where we could go to the next slot. We may at this point be on a 5142 * node or a leaf. 5143 * 5144 * If we are not at level 0 and not on shared tree blocks, go one 5145 * level deeper. 5146 * 5147 * If we are not at level 0 and on shared tree blocks, go one slot to 5148 * the right if possible or go up and right. 5149 */ 5150 5151 left_level = btrfs_header_level(left_root->commit_root); 5152 left_root_level = left_level; 5153 left_path->nodes[left_level] = left_root->commit_root; 5154 extent_buffer_get(left_path->nodes[left_level]); 5155 5156 right_level = btrfs_header_level(right_root->commit_root); 5157 right_root_level = right_level; 5158 right_path->nodes[right_level] = right_root->commit_root; 5159 extent_buffer_get(right_path->nodes[right_level]); 5160 5161 if (left_level == 0) 5162 btrfs_item_key_to_cpu(left_path->nodes[left_level], 5163 &left_key, left_path->slots[left_level]); 5164 else 5165 btrfs_node_key_to_cpu(left_path->nodes[left_level], 5166 &left_key, left_path->slots[left_level]); 5167 if (right_level == 0) 5168 btrfs_item_key_to_cpu(right_path->nodes[right_level], 5169 &right_key, right_path->slots[right_level]); 5170 else 5171 btrfs_node_key_to_cpu(right_path->nodes[right_level], 5172 &right_key, right_path->slots[right_level]); 5173 5174 left_end_reached = right_end_reached = 0; 5175 advance_left = advance_right = 0; 5176 5177 while (1) { 5178 /* 5179 * We need to make sure the transaction does not get committed 5180 * while we do anything on commit roots. This means, we need to 5181 * join and leave transactions for every item that we process. 5182 */ 5183 if (trans && btrfs_should_end_transaction(trans, left_root)) { 5184 btrfs_release_path(left_path); 5185 btrfs_release_path(right_path); 5186 5187 ret = btrfs_end_transaction(trans, left_root); 5188 trans = NULL; 5189 if (ret < 0) 5190 goto out; 5191 } 5192 /* now rejoin the transaction */ 5193 if (!trans) { 5194 trans = btrfs_join_transaction(left_root); 5195 if (IS_ERR(trans)) { 5196 ret = PTR_ERR(trans); 5197 trans = NULL; 5198 goto out; 5199 } 5200 5201 spin_lock(&left_root->root_item_lock); 5202 ctransid = btrfs_root_ctransid(&left_root->root_item); 5203 spin_unlock(&left_root->root_item_lock); 5204 if (ctransid != left_start_ctransid) 5205 left_start_ctransid = 0; 5206 5207 spin_lock(&right_root->root_item_lock); 5208 ctransid = btrfs_root_ctransid(&right_root->root_item); 5209 spin_unlock(&right_root->root_item_lock); 5210 if (ctransid != right_start_ctransid) 5211 right_start_ctransid = 0; 5212 5213 if (!left_start_ctransid || !right_start_ctransid) { 5214 WARN(1, KERN_WARNING 5215 "btrfs: btrfs_compare_tree detected " 5216 "a change in one of the trees while " 5217 "iterating. This is probably a " 5218 "bug.\n"); 5219 ret = -EIO; 5220 goto out; 5221 } 5222 5223 /* 5224 * the commit root may have changed, so start again 5225 * where we stopped 5226 */ 5227 left_path->lowest_level = left_level; 5228 right_path->lowest_level = right_level; 5229 ret = btrfs_search_slot(NULL, left_root, 5230 &left_key, left_path, 0, 0); 5231 if (ret < 0) 5232 goto out; 5233 ret = btrfs_search_slot(NULL, right_root, 5234 &right_key, right_path, 0, 0); 5235 if (ret < 0) 5236 goto out; 5237 } 5238 5239 if (advance_left && !left_end_reached) { 5240 ret = tree_advance(left_root, left_path, &left_level, 5241 left_root_level, 5242 advance_left != ADVANCE_ONLY_NEXT, 5243 &left_key); 5244 if (ret < 0) 5245 left_end_reached = ADVANCE; 5246 advance_left = 0; 5247 } 5248 if (advance_right && !right_end_reached) { 5249 ret = tree_advance(right_root, right_path, &right_level, 5250 right_root_level, 5251 advance_right != ADVANCE_ONLY_NEXT, 5252 &right_key); 5253 if (ret < 0) 5254 right_end_reached = ADVANCE; 5255 advance_right = 0; 5256 } 5257 5258 if (left_end_reached && right_end_reached) { 5259 ret = 0; 5260 goto out; 5261 } else if (left_end_reached) { 5262 if (right_level == 0) { 5263 ret = changed_cb(left_root, right_root, 5264 left_path, right_path, 5265 &right_key, 5266 BTRFS_COMPARE_TREE_DELETED, 5267 ctx); 5268 if (ret < 0) 5269 goto out; 5270 } 5271 advance_right = ADVANCE; 5272 continue; 5273 } else if (right_end_reached) { 5274 if (left_level == 0) { 5275 ret = changed_cb(left_root, right_root, 5276 left_path, right_path, 5277 &left_key, 5278 BTRFS_COMPARE_TREE_NEW, 5279 ctx); 5280 if (ret < 0) 5281 goto out; 5282 } 5283 advance_left = ADVANCE; 5284 continue; 5285 } 5286 5287 if (left_level == 0 && right_level == 0) { 5288 cmp = btrfs_comp_cpu_keys(&left_key, &right_key); 5289 if (cmp < 0) { 5290 ret = changed_cb(left_root, right_root, 5291 left_path, right_path, 5292 &left_key, 5293 BTRFS_COMPARE_TREE_NEW, 5294 ctx); 5295 if (ret < 0) 5296 goto out; 5297 advance_left = ADVANCE; 5298 } else if (cmp > 0) { 5299 ret = changed_cb(left_root, right_root, 5300 left_path, right_path, 5301 &right_key, 5302 BTRFS_COMPARE_TREE_DELETED, 5303 ctx); 5304 if (ret < 0) 5305 goto out; 5306 advance_right = ADVANCE; 5307 } else { 5308 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); 5309 ret = tree_compare_item(left_root, left_path, 5310 right_path, tmp_buf); 5311 if (ret) { 5312 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); 5313 ret = changed_cb(left_root, right_root, 5314 left_path, right_path, 5315 &left_key, 5316 BTRFS_COMPARE_TREE_CHANGED, 5317 ctx); 5318 if (ret < 0) 5319 goto out; 5320 } 5321 advance_left = ADVANCE; 5322 advance_right = ADVANCE; 5323 } 5324 } else if (left_level == right_level) { 5325 cmp = btrfs_comp_cpu_keys(&left_key, &right_key); 5326 if (cmp < 0) { 5327 advance_left = ADVANCE; 5328 } else if (cmp > 0) { 5329 advance_right = ADVANCE; 5330 } else { 5331 left_blockptr = btrfs_node_blockptr( 5332 left_path->nodes[left_level], 5333 left_path->slots[left_level]); 5334 right_blockptr = btrfs_node_blockptr( 5335 right_path->nodes[right_level], 5336 right_path->slots[right_level]); 5337 if (left_blockptr == right_blockptr) { 5338 /* 5339 * As we're on a shared block, don't 5340 * allow to go deeper. 5341 */ 5342 advance_left = ADVANCE_ONLY_NEXT; 5343 advance_right = ADVANCE_ONLY_NEXT; 5344 } else { 5345 advance_left = ADVANCE; 5346 advance_right = ADVANCE; 5347 } 5348 } 5349 } else if (left_level < right_level) { 5350 advance_right = ADVANCE; 5351 } else { 5352 advance_left = ADVANCE; 5353 } 5354 } 5355 5356 out: 5357 btrfs_free_path(left_path); 5358 btrfs_free_path(right_path); 5359 kfree(tmp_buf); 5360 5361 if (trans) { 5362 if (!ret) 5363 ret = btrfs_end_transaction(trans, left_root); 5364 else 5365 btrfs_end_transaction(trans, left_root); 5366 } 5367 5368 return ret; 5369 } 5370 5371 /* 5372 * this is similar to btrfs_next_leaf, but does not try to preserve 5373 * and fixup the path. It looks for and returns the next key in the 5374 * tree based on the current path and the min_trans parameters. 5375 * 5376 * 0 is returned if another key is found, < 0 if there are any errors 5377 * and 1 is returned if there are no higher keys in the tree 5378 * 5379 * path->keep_locks should be set to 1 on the search made before 5380 * calling this function. 5381 */ 5382 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 5383 struct btrfs_key *key, int level, u64 min_trans) 5384 { 5385 int slot; 5386 struct extent_buffer *c; 5387 5388 WARN_ON(!path->keep_locks); 5389 while (level < BTRFS_MAX_LEVEL) { 5390 if (!path->nodes[level]) 5391 return 1; 5392 5393 slot = path->slots[level] + 1; 5394 c = path->nodes[level]; 5395 next: 5396 if (slot >= btrfs_header_nritems(c)) { 5397 int ret; 5398 int orig_lowest; 5399 struct btrfs_key cur_key; 5400 if (level + 1 >= BTRFS_MAX_LEVEL || 5401 !path->nodes[level + 1]) 5402 return 1; 5403 5404 if (path->locks[level + 1]) { 5405 level++; 5406 continue; 5407 } 5408 5409 slot = btrfs_header_nritems(c) - 1; 5410 if (level == 0) 5411 btrfs_item_key_to_cpu(c, &cur_key, slot); 5412 else 5413 btrfs_node_key_to_cpu(c, &cur_key, slot); 5414 5415 orig_lowest = path->lowest_level; 5416 btrfs_release_path(path); 5417 path->lowest_level = level; 5418 ret = btrfs_search_slot(NULL, root, &cur_key, path, 5419 0, 0); 5420 path->lowest_level = orig_lowest; 5421 if (ret < 0) 5422 return ret; 5423 5424 c = path->nodes[level]; 5425 slot = path->slots[level]; 5426 if (ret == 0) 5427 slot++; 5428 goto next; 5429 } 5430 5431 if (level == 0) 5432 btrfs_item_key_to_cpu(c, key, slot); 5433 else { 5434 u64 gen = btrfs_node_ptr_generation(c, slot); 5435 5436 if (gen < min_trans) { 5437 slot++; 5438 goto next; 5439 } 5440 btrfs_node_key_to_cpu(c, key, slot); 5441 } 5442 return 0; 5443 } 5444 return 1; 5445 } 5446 5447 /* 5448 * search the tree again to find a leaf with greater keys 5449 * returns 0 if it found something or 1 if there are no greater leaves. 5450 * returns < 0 on io errors. 5451 */ 5452 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) 5453 { 5454 return btrfs_next_old_leaf(root, path, 0); 5455 } 5456 5457 /* Release the path up to but not including the given level */ 5458 static void btrfs_release_level(struct btrfs_path *path, int level) 5459 { 5460 int i; 5461 5462 for (i = 0; i < level; i++) { 5463 path->slots[i] = 0; 5464 if (!path->nodes[i]) 5465 continue; 5466 if (path->locks[i]) { 5467 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 5468 path->locks[i] = 0; 5469 } 5470 free_extent_buffer(path->nodes[i]); 5471 path->nodes[i] = NULL; 5472 } 5473 } 5474 5475 /* 5476 * This function assumes 2 things 5477 * 5478 * 1) You are using path->keep_locks 5479 * 2) You are not inserting items. 5480 * 5481 * If either of these are not true do not use this function. If you need a next 5482 * leaf with either of these not being true then this function can be easily 5483 * adapted to do that, but at the moment these are the limitations. 5484 */ 5485 int btrfs_next_leaf_write(struct btrfs_trans_handle *trans, 5486 struct btrfs_root *root, struct btrfs_path *path, 5487 int del) 5488 { 5489 struct extent_buffer *b; 5490 struct btrfs_key key; 5491 u32 nritems; 5492 int level = 1; 5493 int slot; 5494 int ret = 1; 5495 int write_lock_level = BTRFS_MAX_LEVEL; 5496 int ins_len = del ? -1 : 0; 5497 5498 WARN_ON(!(path->keep_locks || path->really_keep_locks)); 5499 5500 nritems = btrfs_header_nritems(path->nodes[0]); 5501 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 5502 5503 while (path->nodes[level]) { 5504 nritems = btrfs_header_nritems(path->nodes[level]); 5505 if (!(path->locks[level] & BTRFS_WRITE_LOCK)) { 5506 search: 5507 btrfs_release_path(path); 5508 ret = btrfs_search_slot(trans, root, &key, path, 5509 ins_len, 1); 5510 if (ret < 0) 5511 goto out; 5512 level = 1; 5513 continue; 5514 } 5515 5516 if (path->slots[level] >= nritems - 1) { 5517 level++; 5518 continue; 5519 } 5520 5521 btrfs_release_level(path, level); 5522 break; 5523 } 5524 5525 if (!path->nodes[level]) { 5526 ret = 1; 5527 goto out; 5528 } 5529 5530 path->slots[level]++; 5531 b = path->nodes[level]; 5532 5533 while (b) { 5534 level = btrfs_header_level(b); 5535 5536 if (!should_cow_block(trans, root, b)) 5537 goto cow_done; 5538 5539 btrfs_set_path_blocking(path); 5540 ret = btrfs_cow_block(trans, root, b, 5541 path->nodes[level + 1], 5542 path->slots[level + 1], &b); 5543 if (ret) 5544 goto out; 5545 cow_done: 5546 path->nodes[level] = b; 5547 btrfs_clear_path_blocking(path, NULL, 0); 5548 if (level != 0) { 5549 ret = setup_nodes_for_search(trans, root, path, b, 5550 level, ins_len, 5551 &write_lock_level); 5552 if (ret == -EAGAIN) 5553 goto search; 5554 if (ret) 5555 goto out; 5556 5557 b = path->nodes[level]; 5558 slot = path->slots[level]; 5559 5560 ret = read_block_for_search(trans, root, path, 5561 &b, level, slot, &key, 0); 5562 if (ret == -EAGAIN) 5563 goto search; 5564 if (ret) 5565 goto out; 5566 level = btrfs_header_level(b); 5567 if (!btrfs_try_tree_write_lock(b)) { 5568 btrfs_set_path_blocking(path); 5569 btrfs_tree_lock(b); 5570 btrfs_clear_path_blocking(path, b, 5571 BTRFS_WRITE_LOCK); 5572 } 5573 path->locks[level] = BTRFS_WRITE_LOCK; 5574 path->nodes[level] = b; 5575 path->slots[level] = 0; 5576 } else { 5577 path->slots[level] = 0; 5578 ret = 0; 5579 break; 5580 } 5581 } 5582 5583 out: 5584 if (ret) 5585 btrfs_release_path(path); 5586 5587 return ret; 5588 } 5589 5590 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 5591 u64 time_seq) 5592 { 5593 int slot; 5594 int level; 5595 struct extent_buffer *c; 5596 struct extent_buffer *next; 5597 struct btrfs_key key; 5598 u32 nritems; 5599 int ret; 5600 int old_spinning = path->leave_spinning; 5601 int next_rw_lock = 0; 5602 5603 nritems = btrfs_header_nritems(path->nodes[0]); 5604 if (nritems == 0) 5605 return 1; 5606 5607 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 5608 again: 5609 level = 1; 5610 next = NULL; 5611 next_rw_lock = 0; 5612 btrfs_release_path(path); 5613 5614 path->keep_locks = 1; 5615 path->leave_spinning = 1; 5616 5617 if (time_seq) 5618 ret = btrfs_search_old_slot(root, &key, path, time_seq); 5619 else 5620 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5621 path->keep_locks = 0; 5622 5623 if (ret < 0) 5624 return ret; 5625 5626 nritems = btrfs_header_nritems(path->nodes[0]); 5627 /* 5628 * by releasing the path above we dropped all our locks. A balance 5629 * could have added more items next to the key that used to be 5630 * at the very end of the block. So, check again here and 5631 * advance the path if there are now more items available. 5632 */ 5633 if (nritems > 0 && path->slots[0] < nritems - 1) { 5634 if (ret == 0) 5635 path->slots[0]++; 5636 ret = 0; 5637 goto done; 5638 } 5639 5640 while (level < BTRFS_MAX_LEVEL) { 5641 if (!path->nodes[level]) { 5642 ret = 1; 5643 goto done; 5644 } 5645 5646 slot = path->slots[level] + 1; 5647 c = path->nodes[level]; 5648 if (slot >= btrfs_header_nritems(c)) { 5649 level++; 5650 if (level == BTRFS_MAX_LEVEL) { 5651 ret = 1; 5652 goto done; 5653 } 5654 continue; 5655 } 5656 5657 if (next) { 5658 btrfs_tree_unlock_rw(next, next_rw_lock); 5659 free_extent_buffer(next); 5660 } 5661 5662 next = c; 5663 next_rw_lock = path->locks[level]; 5664 ret = read_block_for_search(NULL, root, path, &next, level, 5665 slot, &key, 0); 5666 if (ret == -EAGAIN) 5667 goto again; 5668 5669 if (ret < 0) { 5670 btrfs_release_path(path); 5671 goto done; 5672 } 5673 5674 if (!path->skip_locking) { 5675 ret = btrfs_try_tree_read_lock(next); 5676 if (!ret && time_seq) { 5677 /* 5678 * If we don't get the lock, we may be racing 5679 * with push_leaf_left, holding that lock while 5680 * itself waiting for the leaf we've currently 5681 * locked. To solve this situation, we give up 5682 * on our lock and cycle. 5683 */ 5684 free_extent_buffer(next); 5685 btrfs_release_path(path); 5686 cond_resched(); 5687 goto again; 5688 } 5689 if (!ret) { 5690 btrfs_set_path_blocking(path); 5691 btrfs_tree_read_lock(next); 5692 btrfs_clear_path_blocking(path, next, 5693 BTRFS_READ_LOCK); 5694 } 5695 next_rw_lock = BTRFS_READ_LOCK; 5696 } 5697 break; 5698 } 5699 path->slots[level] = slot; 5700 while (1) { 5701 level--; 5702 c = path->nodes[level]; 5703 if (path->locks[level]) 5704 btrfs_tree_unlock_rw(c, path->locks[level]); 5705 5706 free_extent_buffer(c); 5707 path->nodes[level] = next; 5708 path->slots[level] = 0; 5709 if (!path->skip_locking) 5710 path->locks[level] = next_rw_lock; 5711 if (!level) 5712 break; 5713 5714 ret = read_block_for_search(NULL, root, path, &next, level, 5715 0, &key, 0); 5716 if (ret == -EAGAIN) 5717 goto again; 5718 5719 if (ret < 0) { 5720 btrfs_release_path(path); 5721 goto done; 5722 } 5723 5724 if (!path->skip_locking) { 5725 ret = btrfs_try_tree_read_lock(next); 5726 if (!ret) { 5727 btrfs_set_path_blocking(path); 5728 btrfs_tree_read_lock(next); 5729 btrfs_clear_path_blocking(path, next, 5730 BTRFS_READ_LOCK); 5731 } 5732 next_rw_lock = BTRFS_READ_LOCK; 5733 } 5734 } 5735 ret = 0; 5736 done: 5737 unlock_up(path, 0, 1, 0, NULL); 5738 path->leave_spinning = old_spinning; 5739 if (!old_spinning) 5740 btrfs_set_path_blocking(path); 5741 5742 return ret; 5743 } 5744 5745 /* 5746 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5747 * searching until it gets past min_objectid or finds an item of 'type' 5748 * 5749 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5750 */ 5751 int btrfs_previous_item(struct btrfs_root *root, 5752 struct btrfs_path *path, u64 min_objectid, 5753 int type) 5754 { 5755 struct btrfs_key found_key; 5756 struct extent_buffer *leaf; 5757 u32 nritems; 5758 int ret; 5759 5760 while (1) { 5761 if (path->slots[0] == 0) { 5762 btrfs_set_path_blocking(path); 5763 ret = btrfs_prev_leaf(root, path); 5764 if (ret != 0) 5765 return ret; 5766 } else { 5767 path->slots[0]--; 5768 } 5769 leaf = path->nodes[0]; 5770 nritems = btrfs_header_nritems(leaf); 5771 if (nritems == 0) 5772 return 1; 5773 if (path->slots[0] == nritems) 5774 path->slots[0]--; 5775 5776 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5777 if (found_key.objectid < min_objectid) 5778 break; 5779 if (found_key.type == type) 5780 return 0; 5781 if (found_key.objectid == min_objectid && 5782 found_key.type < type) 5783 break; 5784 } 5785 return 1; 5786 } 5787