1 /* 2 * Copyright (C) 2007,2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/rbtree.h> 22 #include "ctree.h" 23 #include "disk-io.h" 24 #include "transaction.h" 25 #include "print-tree.h" 26 #include "locking.h" 27 28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 29 *root, struct btrfs_path *path, int level); 30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root 31 *root, struct btrfs_key *ins_key, 32 struct btrfs_path *path, int data_size, int extend); 33 static int push_node_left(struct btrfs_trans_handle *trans, 34 struct btrfs_root *root, struct extent_buffer *dst, 35 struct extent_buffer *src, int empty); 36 static int balance_node_right(struct btrfs_trans_handle *trans, 37 struct btrfs_root *root, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 41 struct btrfs_path *path, int level, int slot, 42 int tree_mod_log); 43 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 44 struct extent_buffer *eb); 45 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr, 46 u32 blocksize, u64 parent_transid, 47 u64 time_seq); 48 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root, 49 u64 bytenr, u32 blocksize, 50 u64 time_seq); 51 52 struct btrfs_path *btrfs_alloc_path(void) 53 { 54 struct btrfs_path *path; 55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 56 return path; 57 } 58 59 /* 60 * set all locked nodes in the path to blocking locks. This should 61 * be done before scheduling 62 */ 63 noinline void btrfs_set_path_blocking(struct btrfs_path *p) 64 { 65 int i; 66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 67 if (!p->nodes[i] || !p->locks[i]) 68 continue; 69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); 70 if (p->locks[i] == BTRFS_READ_LOCK) 71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING; 72 else if (p->locks[i] == BTRFS_WRITE_LOCK) 73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; 74 } 75 } 76 77 /* 78 * reset all the locked nodes in the patch to spinning locks. 79 * 80 * held is used to keep lockdep happy, when lockdep is enabled 81 * we set held to a blocking lock before we go around and 82 * retake all the spinlocks in the path. You can safely use NULL 83 * for held 84 */ 85 noinline void btrfs_clear_path_blocking(struct btrfs_path *p, 86 struct extent_buffer *held, int held_rw) 87 { 88 int i; 89 90 #ifdef CONFIG_DEBUG_LOCK_ALLOC 91 /* lockdep really cares that we take all of these spinlocks 92 * in the right order. If any of the locks in the path are not 93 * currently blocking, it is going to complain. So, make really 94 * really sure by forcing the path to blocking before we clear 95 * the path blocking. 96 */ 97 if (held) { 98 btrfs_set_lock_blocking_rw(held, held_rw); 99 if (held_rw == BTRFS_WRITE_LOCK) 100 held_rw = BTRFS_WRITE_LOCK_BLOCKING; 101 else if (held_rw == BTRFS_READ_LOCK) 102 held_rw = BTRFS_READ_LOCK_BLOCKING; 103 } 104 btrfs_set_path_blocking(p); 105 #endif 106 107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 108 if (p->nodes[i] && p->locks[i]) { 109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); 110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) 111 p->locks[i] = BTRFS_WRITE_LOCK; 112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) 113 p->locks[i] = BTRFS_READ_LOCK; 114 } 115 } 116 117 #ifdef CONFIG_DEBUG_LOCK_ALLOC 118 if (held) 119 btrfs_clear_lock_blocking_rw(held, held_rw); 120 #endif 121 } 122 123 /* this also releases the path */ 124 void btrfs_free_path(struct btrfs_path *p) 125 { 126 if (!p) 127 return; 128 btrfs_release_path(p); 129 kmem_cache_free(btrfs_path_cachep, p); 130 } 131 132 /* 133 * path release drops references on the extent buffers in the path 134 * and it drops any locks held by this path 135 * 136 * It is safe to call this on paths that no locks or extent buffers held. 137 */ 138 noinline void btrfs_release_path(struct btrfs_path *p) 139 { 140 int i; 141 142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 143 p->slots[i] = 0; 144 if (!p->nodes[i]) 145 continue; 146 if (p->locks[i]) { 147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 148 p->locks[i] = 0; 149 } 150 free_extent_buffer(p->nodes[i]); 151 p->nodes[i] = NULL; 152 } 153 } 154 155 /* 156 * safely gets a reference on the root node of a tree. A lock 157 * is not taken, so a concurrent writer may put a different node 158 * at the root of the tree. See btrfs_lock_root_node for the 159 * looping required. 160 * 161 * The extent buffer returned by this has a reference taken, so 162 * it won't disappear. It may stop being the root of the tree 163 * at any time because there are no locks held. 164 */ 165 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 166 { 167 struct extent_buffer *eb; 168 169 while (1) { 170 rcu_read_lock(); 171 eb = rcu_dereference(root->node); 172 173 /* 174 * RCU really hurts here, we could free up the root node because 175 * it was cow'ed but we may not get the new root node yet so do 176 * the inc_not_zero dance and if it doesn't work then 177 * synchronize_rcu and try again. 178 */ 179 if (atomic_inc_not_zero(&eb->refs)) { 180 rcu_read_unlock(); 181 break; 182 } 183 rcu_read_unlock(); 184 synchronize_rcu(); 185 } 186 return eb; 187 } 188 189 /* loop around taking references on and locking the root node of the 190 * tree until you end up with a lock on the root. A locked buffer 191 * is returned, with a reference held. 192 */ 193 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) 194 { 195 struct extent_buffer *eb; 196 197 while (1) { 198 eb = btrfs_root_node(root); 199 btrfs_tree_lock(eb); 200 if (eb == root->node) 201 break; 202 btrfs_tree_unlock(eb); 203 free_extent_buffer(eb); 204 } 205 return eb; 206 } 207 208 /* loop around taking references on and locking the root node of the 209 * tree until you end up with a lock on the root. A locked buffer 210 * is returned, with a reference held. 211 */ 212 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) 213 { 214 struct extent_buffer *eb; 215 216 while (1) { 217 eb = btrfs_root_node(root); 218 btrfs_tree_read_lock(eb); 219 if (eb == root->node) 220 break; 221 btrfs_tree_read_unlock(eb); 222 free_extent_buffer(eb); 223 } 224 return eb; 225 } 226 227 /* cowonly root (everything not a reference counted cow subvolume), just get 228 * put onto a simple dirty list. transaction.c walks this to make sure they 229 * get properly updated on disk. 230 */ 231 static void add_root_to_dirty_list(struct btrfs_root *root) 232 { 233 spin_lock(&root->fs_info->trans_lock); 234 if (root->track_dirty && list_empty(&root->dirty_list)) { 235 list_add(&root->dirty_list, 236 &root->fs_info->dirty_cowonly_roots); 237 } 238 spin_unlock(&root->fs_info->trans_lock); 239 } 240 241 /* 242 * used by snapshot creation to make a copy of a root for a tree with 243 * a given objectid. The buffer with the new root node is returned in 244 * cow_ret, and this func returns zero on success or a negative error code. 245 */ 246 int btrfs_copy_root(struct btrfs_trans_handle *trans, 247 struct btrfs_root *root, 248 struct extent_buffer *buf, 249 struct extent_buffer **cow_ret, u64 new_root_objectid) 250 { 251 struct extent_buffer *cow; 252 int ret = 0; 253 int level; 254 struct btrfs_disk_key disk_key; 255 256 WARN_ON(root->ref_cows && trans->transid != 257 root->fs_info->running_transaction->transid); 258 WARN_ON(root->ref_cows && trans->transid != root->last_trans); 259 260 level = btrfs_header_level(buf); 261 if (level == 0) 262 btrfs_item_key(buf, &disk_key, 0); 263 else 264 btrfs_node_key(buf, &disk_key, 0); 265 266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0, 267 new_root_objectid, &disk_key, level, 268 buf->start, 0); 269 if (IS_ERR(cow)) 270 return PTR_ERR(cow); 271 272 copy_extent_buffer(cow, buf, 0, 0, cow->len); 273 btrfs_set_header_bytenr(cow, cow->start); 274 btrfs_set_header_generation(cow, trans->transid); 275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 277 BTRFS_HEADER_FLAG_RELOC); 278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 280 else 281 btrfs_set_header_owner(cow, new_root_objectid); 282 283 write_extent_buffer(cow, root->fs_info->fsid, 284 (unsigned long)btrfs_header_fsid(cow), 285 BTRFS_FSID_SIZE); 286 287 WARN_ON(btrfs_header_generation(buf) > trans->transid); 288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 289 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 290 else 291 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 292 293 if (ret) 294 return ret; 295 296 btrfs_mark_buffer_dirty(cow); 297 *cow_ret = cow; 298 return 0; 299 } 300 301 enum mod_log_op { 302 MOD_LOG_KEY_REPLACE, 303 MOD_LOG_KEY_ADD, 304 MOD_LOG_KEY_REMOVE, 305 MOD_LOG_KEY_REMOVE_WHILE_FREEING, 306 MOD_LOG_KEY_REMOVE_WHILE_MOVING, 307 MOD_LOG_MOVE_KEYS, 308 MOD_LOG_ROOT_REPLACE, 309 }; 310 311 struct tree_mod_move { 312 int dst_slot; 313 int nr_items; 314 }; 315 316 struct tree_mod_root { 317 u64 logical; 318 u8 level; 319 }; 320 321 struct tree_mod_elem { 322 struct rb_node node; 323 u64 index; /* shifted logical */ 324 u64 seq; 325 enum mod_log_op op; 326 327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ 328 int slot; 329 330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ 331 u64 generation; 332 333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ 334 struct btrfs_disk_key key; 335 u64 blockptr; 336 337 /* this is used for op == MOD_LOG_MOVE_KEYS */ 338 struct tree_mod_move move; 339 340 /* this is used for op == MOD_LOG_ROOT_REPLACE */ 341 struct tree_mod_root old_root; 342 }; 343 344 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) 345 { 346 read_lock(&fs_info->tree_mod_log_lock); 347 } 348 349 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) 350 { 351 read_unlock(&fs_info->tree_mod_log_lock); 352 } 353 354 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) 355 { 356 write_lock(&fs_info->tree_mod_log_lock); 357 } 358 359 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) 360 { 361 write_unlock(&fs_info->tree_mod_log_lock); 362 } 363 364 /* 365 * This adds a new blocker to the tree mod log's blocker list if the @elem 366 * passed does not already have a sequence number set. So when a caller expects 367 * to record tree modifications, it should ensure to set elem->seq to zero 368 * before calling btrfs_get_tree_mod_seq. 369 * Returns a fresh, unused tree log modification sequence number, even if no new 370 * blocker was added. 371 */ 372 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 373 struct seq_list *elem) 374 { 375 u64 seq; 376 377 tree_mod_log_write_lock(fs_info); 378 spin_lock(&fs_info->tree_mod_seq_lock); 379 if (!elem->seq) { 380 elem->seq = btrfs_inc_tree_mod_seq(fs_info); 381 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); 382 } 383 seq = btrfs_inc_tree_mod_seq(fs_info); 384 spin_unlock(&fs_info->tree_mod_seq_lock); 385 tree_mod_log_write_unlock(fs_info); 386 387 return seq; 388 } 389 390 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 391 struct seq_list *elem) 392 { 393 struct rb_root *tm_root; 394 struct rb_node *node; 395 struct rb_node *next; 396 struct seq_list *cur_elem; 397 struct tree_mod_elem *tm; 398 u64 min_seq = (u64)-1; 399 u64 seq_putting = elem->seq; 400 401 if (!seq_putting) 402 return; 403 404 spin_lock(&fs_info->tree_mod_seq_lock); 405 list_del(&elem->list); 406 elem->seq = 0; 407 408 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { 409 if (cur_elem->seq < min_seq) { 410 if (seq_putting > cur_elem->seq) { 411 /* 412 * blocker with lower sequence number exists, we 413 * cannot remove anything from the log 414 */ 415 spin_unlock(&fs_info->tree_mod_seq_lock); 416 return; 417 } 418 min_seq = cur_elem->seq; 419 } 420 } 421 spin_unlock(&fs_info->tree_mod_seq_lock); 422 423 /* 424 * we removed the lowest blocker from the blocker list, so there may be 425 * more processible delayed refs. 426 */ 427 wake_up(&fs_info->tree_mod_seq_wait); 428 429 /* 430 * anything that's lower than the lowest existing (read: blocked) 431 * sequence number can be removed from the tree. 432 */ 433 tree_mod_log_write_lock(fs_info); 434 tm_root = &fs_info->tree_mod_log; 435 for (node = rb_first(tm_root); node; node = next) { 436 next = rb_next(node); 437 tm = container_of(node, struct tree_mod_elem, node); 438 if (tm->seq > min_seq) 439 continue; 440 rb_erase(node, tm_root); 441 kfree(tm); 442 } 443 tree_mod_log_write_unlock(fs_info); 444 } 445 446 /* 447 * key order of the log: 448 * index -> sequence 449 * 450 * the index is the shifted logical of the *new* root node for root replace 451 * operations, or the shifted logical of the affected block for all other 452 * operations. 453 */ 454 static noinline int 455 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) 456 { 457 struct rb_root *tm_root; 458 struct rb_node **new; 459 struct rb_node *parent = NULL; 460 struct tree_mod_elem *cur; 461 462 BUG_ON(!tm || !tm->seq); 463 464 tm_root = &fs_info->tree_mod_log; 465 new = &tm_root->rb_node; 466 while (*new) { 467 cur = container_of(*new, struct tree_mod_elem, node); 468 parent = *new; 469 if (cur->index < tm->index) 470 new = &((*new)->rb_left); 471 else if (cur->index > tm->index) 472 new = &((*new)->rb_right); 473 else if (cur->seq < tm->seq) 474 new = &((*new)->rb_left); 475 else if (cur->seq > tm->seq) 476 new = &((*new)->rb_right); 477 else { 478 kfree(tm); 479 return -EEXIST; 480 } 481 } 482 483 rb_link_node(&tm->node, parent, new); 484 rb_insert_color(&tm->node, tm_root); 485 return 0; 486 } 487 488 /* 489 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it 490 * returns zero with the tree_mod_log_lock acquired. The caller must hold 491 * this until all tree mod log insertions are recorded in the rb tree and then 492 * call tree_mod_log_write_unlock() to release. 493 */ 494 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, 495 struct extent_buffer *eb) { 496 smp_mb(); 497 if (list_empty(&(fs_info)->tree_mod_seq_list)) 498 return 1; 499 if (eb && btrfs_header_level(eb) == 0) 500 return 1; 501 502 tree_mod_log_write_lock(fs_info); 503 if (list_empty(&fs_info->tree_mod_seq_list)) { 504 /* 505 * someone emptied the list while we were waiting for the lock. 506 * we must not add to the list when no blocker exists. 507 */ 508 tree_mod_log_write_unlock(fs_info); 509 return 1; 510 } 511 512 return 0; 513 } 514 515 /* 516 * This allocates memory and gets a tree modification sequence number. 517 * 518 * Returns <0 on error. 519 * Returns >0 (the added sequence number) on success. 520 */ 521 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, 522 struct tree_mod_elem **tm_ret) 523 { 524 struct tree_mod_elem *tm; 525 526 /* 527 * once we switch from spin locks to something different, we should 528 * honor the flags parameter here. 529 */ 530 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC); 531 if (!tm) 532 return -ENOMEM; 533 534 tm->seq = btrfs_inc_tree_mod_seq(fs_info); 535 return tm->seq; 536 } 537 538 static inline int 539 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, 540 struct extent_buffer *eb, int slot, 541 enum mod_log_op op, gfp_t flags) 542 { 543 int ret; 544 struct tree_mod_elem *tm; 545 546 ret = tree_mod_alloc(fs_info, flags, &tm); 547 if (ret < 0) 548 return ret; 549 550 tm->index = eb->start >> PAGE_CACHE_SHIFT; 551 if (op != MOD_LOG_KEY_ADD) { 552 btrfs_node_key(eb, &tm->key, slot); 553 tm->blockptr = btrfs_node_blockptr(eb, slot); 554 } 555 tm->op = op; 556 tm->slot = slot; 557 tm->generation = btrfs_node_ptr_generation(eb, slot); 558 559 return __tree_mod_log_insert(fs_info, tm); 560 } 561 562 static noinline int 563 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, 564 struct extent_buffer *eb, int slot, 565 enum mod_log_op op, gfp_t flags) 566 { 567 int ret; 568 569 if (tree_mod_dont_log(fs_info, eb)) 570 return 0; 571 572 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags); 573 574 tree_mod_log_write_unlock(fs_info); 575 return ret; 576 } 577 578 static noinline int 579 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, 580 int slot, enum mod_log_op op) 581 { 582 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS); 583 } 584 585 static noinline int 586 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info, 587 struct extent_buffer *eb, int slot, 588 enum mod_log_op op) 589 { 590 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS); 591 } 592 593 static noinline int 594 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, 595 struct extent_buffer *eb, int dst_slot, int src_slot, 596 int nr_items, gfp_t flags) 597 { 598 struct tree_mod_elem *tm; 599 int ret; 600 int i; 601 602 if (tree_mod_dont_log(fs_info, eb)) 603 return 0; 604 605 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 606 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot, 607 MOD_LOG_KEY_REMOVE_WHILE_MOVING); 608 BUG_ON(ret < 0); 609 } 610 611 ret = tree_mod_alloc(fs_info, flags, &tm); 612 if (ret < 0) 613 goto out; 614 615 tm->index = eb->start >> PAGE_CACHE_SHIFT; 616 tm->slot = src_slot; 617 tm->move.dst_slot = dst_slot; 618 tm->move.nr_items = nr_items; 619 tm->op = MOD_LOG_MOVE_KEYS; 620 621 ret = __tree_mod_log_insert(fs_info, tm); 622 out: 623 tree_mod_log_write_unlock(fs_info); 624 return ret; 625 } 626 627 static inline void 628 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 629 { 630 int i; 631 u32 nritems; 632 int ret; 633 634 nritems = btrfs_header_nritems(eb); 635 for (i = nritems - 1; i >= 0; i--) { 636 ret = tree_mod_log_insert_key_locked(fs_info, eb, i, 637 MOD_LOG_KEY_REMOVE_WHILE_FREEING); 638 BUG_ON(ret < 0); 639 } 640 } 641 642 static noinline int 643 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, 644 struct extent_buffer *old_root, 645 struct extent_buffer *new_root, gfp_t flags) 646 { 647 struct tree_mod_elem *tm; 648 int ret; 649 650 if (tree_mod_dont_log(fs_info, NULL)) 651 return 0; 652 653 __tree_mod_log_free_eb(fs_info, old_root); 654 655 ret = tree_mod_alloc(fs_info, flags, &tm); 656 if (ret < 0) 657 goto out; 658 659 tm->index = new_root->start >> PAGE_CACHE_SHIFT; 660 tm->old_root.logical = old_root->start; 661 tm->old_root.level = btrfs_header_level(old_root); 662 tm->generation = btrfs_header_generation(old_root); 663 tm->op = MOD_LOG_ROOT_REPLACE; 664 665 ret = __tree_mod_log_insert(fs_info, tm); 666 out: 667 tree_mod_log_write_unlock(fs_info); 668 return ret; 669 } 670 671 static struct tree_mod_elem * 672 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, 673 int smallest) 674 { 675 struct rb_root *tm_root; 676 struct rb_node *node; 677 struct tree_mod_elem *cur = NULL; 678 struct tree_mod_elem *found = NULL; 679 u64 index = start >> PAGE_CACHE_SHIFT; 680 681 tree_mod_log_read_lock(fs_info); 682 tm_root = &fs_info->tree_mod_log; 683 node = tm_root->rb_node; 684 while (node) { 685 cur = container_of(node, struct tree_mod_elem, node); 686 if (cur->index < index) { 687 node = node->rb_left; 688 } else if (cur->index > index) { 689 node = node->rb_right; 690 } else if (cur->seq < min_seq) { 691 node = node->rb_left; 692 } else if (!smallest) { 693 /* we want the node with the highest seq */ 694 if (found) 695 BUG_ON(found->seq > cur->seq); 696 found = cur; 697 node = node->rb_left; 698 } else if (cur->seq > min_seq) { 699 /* we want the node with the smallest seq */ 700 if (found) 701 BUG_ON(found->seq < cur->seq); 702 found = cur; 703 node = node->rb_right; 704 } else { 705 found = cur; 706 break; 707 } 708 } 709 tree_mod_log_read_unlock(fs_info); 710 711 return found; 712 } 713 714 /* 715 * this returns the element from the log with the smallest time sequence 716 * value that's in the log (the oldest log item). any element with a time 717 * sequence lower than min_seq will be ignored. 718 */ 719 static struct tree_mod_elem * 720 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, 721 u64 min_seq) 722 { 723 return __tree_mod_log_search(fs_info, start, min_seq, 1); 724 } 725 726 /* 727 * this returns the element from the log with the largest time sequence 728 * value that's in the log (the most recent log item). any element with 729 * a time sequence lower than min_seq will be ignored. 730 */ 731 static struct tree_mod_elem * 732 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) 733 { 734 return __tree_mod_log_search(fs_info, start, min_seq, 0); 735 } 736 737 static noinline void 738 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 739 struct extent_buffer *src, unsigned long dst_offset, 740 unsigned long src_offset, int nr_items) 741 { 742 int ret; 743 int i; 744 745 if (tree_mod_dont_log(fs_info, NULL)) 746 return; 747 748 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) { 749 tree_mod_log_write_unlock(fs_info); 750 return; 751 } 752 753 for (i = 0; i < nr_items; i++) { 754 ret = tree_mod_log_insert_key_locked(fs_info, src, 755 i + src_offset, 756 MOD_LOG_KEY_REMOVE); 757 BUG_ON(ret < 0); 758 ret = tree_mod_log_insert_key_locked(fs_info, dst, 759 i + dst_offset, 760 MOD_LOG_KEY_ADD); 761 BUG_ON(ret < 0); 762 } 763 764 tree_mod_log_write_unlock(fs_info); 765 } 766 767 static inline void 768 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 769 int dst_offset, int src_offset, int nr_items) 770 { 771 int ret; 772 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset, 773 nr_items, GFP_NOFS); 774 BUG_ON(ret < 0); 775 } 776 777 static noinline void 778 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, 779 struct extent_buffer *eb, 780 struct btrfs_disk_key *disk_key, int slot, int atomic) 781 { 782 int ret; 783 784 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot, 785 MOD_LOG_KEY_REPLACE, 786 atomic ? GFP_ATOMIC : GFP_NOFS); 787 BUG_ON(ret < 0); 788 } 789 790 static noinline void 791 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 792 { 793 if (tree_mod_dont_log(fs_info, eb)) 794 return; 795 796 __tree_mod_log_free_eb(fs_info, eb); 797 798 tree_mod_log_write_unlock(fs_info); 799 } 800 801 static noinline void 802 tree_mod_log_set_root_pointer(struct btrfs_root *root, 803 struct extent_buffer *new_root_node) 804 { 805 int ret; 806 ret = tree_mod_log_insert_root(root->fs_info, root->node, 807 new_root_node, GFP_NOFS); 808 BUG_ON(ret < 0); 809 } 810 811 /* 812 * check if the tree block can be shared by multiple trees 813 */ 814 int btrfs_block_can_be_shared(struct btrfs_root *root, 815 struct extent_buffer *buf) 816 { 817 /* 818 * Tree blocks not in refernece counted trees and tree roots 819 * are never shared. If a block was allocated after the last 820 * snapshot and the block was not allocated by tree relocation, 821 * we know the block is not shared. 822 */ 823 if (root->ref_cows && 824 buf != root->node && buf != root->commit_root && 825 (btrfs_header_generation(buf) <= 826 btrfs_root_last_snapshot(&root->root_item) || 827 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 828 return 1; 829 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 830 if (root->ref_cows && 831 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 832 return 1; 833 #endif 834 return 0; 835 } 836 837 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 838 struct btrfs_root *root, 839 struct extent_buffer *buf, 840 struct extent_buffer *cow, 841 int *last_ref) 842 { 843 u64 refs; 844 u64 owner; 845 u64 flags; 846 u64 new_flags = 0; 847 int ret; 848 849 /* 850 * Backrefs update rules: 851 * 852 * Always use full backrefs for extent pointers in tree block 853 * allocated by tree relocation. 854 * 855 * If a shared tree block is no longer referenced by its owner 856 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 857 * use full backrefs for extent pointers in tree block. 858 * 859 * If a tree block is been relocating 860 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 861 * use full backrefs for extent pointers in tree block. 862 * The reason for this is some operations (such as drop tree) 863 * are only allowed for blocks use full backrefs. 864 */ 865 866 if (btrfs_block_can_be_shared(root, buf)) { 867 ret = btrfs_lookup_extent_info(trans, root, buf->start, 868 buf->len, &refs, &flags); 869 if (ret) 870 return ret; 871 if (refs == 0) { 872 ret = -EROFS; 873 btrfs_std_error(root->fs_info, ret); 874 return ret; 875 } 876 } else { 877 refs = 1; 878 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 879 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 880 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 881 else 882 flags = 0; 883 } 884 885 owner = btrfs_header_owner(buf); 886 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 887 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 888 889 if (refs > 1) { 890 if ((owner == root->root_key.objectid || 891 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 892 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 893 ret = btrfs_inc_ref(trans, root, buf, 1, 1); 894 BUG_ON(ret); /* -ENOMEM */ 895 896 if (root->root_key.objectid == 897 BTRFS_TREE_RELOC_OBJECTID) { 898 ret = btrfs_dec_ref(trans, root, buf, 0, 1); 899 BUG_ON(ret); /* -ENOMEM */ 900 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 901 BUG_ON(ret); /* -ENOMEM */ 902 } 903 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 904 } else { 905 906 if (root->root_key.objectid == 907 BTRFS_TREE_RELOC_OBJECTID) 908 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 909 else 910 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 911 BUG_ON(ret); /* -ENOMEM */ 912 } 913 if (new_flags != 0) { 914 ret = btrfs_set_disk_extent_flags(trans, root, 915 buf->start, 916 buf->len, 917 new_flags, 0); 918 if (ret) 919 return ret; 920 } 921 } else { 922 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 923 if (root->root_key.objectid == 924 BTRFS_TREE_RELOC_OBJECTID) 925 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 926 else 927 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 928 BUG_ON(ret); /* -ENOMEM */ 929 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 930 BUG_ON(ret); /* -ENOMEM */ 931 } 932 /* 933 * don't log freeing in case we're freeing the root node, this 934 * is done by tree_mod_log_set_root_pointer later 935 */ 936 if (buf != root->node && btrfs_header_level(buf) != 0) 937 tree_mod_log_free_eb(root->fs_info, buf); 938 clean_tree_block(trans, root, buf); 939 *last_ref = 1; 940 } 941 return 0; 942 } 943 944 /* 945 * does the dirty work in cow of a single block. The parent block (if 946 * supplied) is updated to point to the new cow copy. The new buffer is marked 947 * dirty and returned locked. If you modify the block it needs to be marked 948 * dirty again. 949 * 950 * search_start -- an allocation hint for the new block 951 * 952 * empty_size -- a hint that you plan on doing more cow. This is the size in 953 * bytes the allocator should try to find free next to the block it returns. 954 * This is just a hint and may be ignored by the allocator. 955 */ 956 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 957 struct btrfs_root *root, 958 struct extent_buffer *buf, 959 struct extent_buffer *parent, int parent_slot, 960 struct extent_buffer **cow_ret, 961 u64 search_start, u64 empty_size) 962 { 963 struct btrfs_disk_key disk_key; 964 struct extent_buffer *cow; 965 int level, ret; 966 int last_ref = 0; 967 int unlock_orig = 0; 968 u64 parent_start; 969 970 if (*cow_ret == buf) 971 unlock_orig = 1; 972 973 btrfs_assert_tree_locked(buf); 974 975 WARN_ON(root->ref_cows && trans->transid != 976 root->fs_info->running_transaction->transid); 977 WARN_ON(root->ref_cows && trans->transid != root->last_trans); 978 979 level = btrfs_header_level(buf); 980 981 if (level == 0) 982 btrfs_item_key(buf, &disk_key, 0); 983 else 984 btrfs_node_key(buf, &disk_key, 0); 985 986 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 987 if (parent) 988 parent_start = parent->start; 989 else 990 parent_start = 0; 991 } else 992 parent_start = 0; 993 994 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, 995 root->root_key.objectid, &disk_key, 996 level, search_start, empty_size); 997 if (IS_ERR(cow)) 998 return PTR_ERR(cow); 999 1000 /* cow is set to blocking by btrfs_init_new_buffer */ 1001 1002 copy_extent_buffer(cow, buf, 0, 0, cow->len); 1003 btrfs_set_header_bytenr(cow, cow->start); 1004 btrfs_set_header_generation(cow, trans->transid); 1005 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 1006 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 1007 BTRFS_HEADER_FLAG_RELOC); 1008 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1009 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 1010 else 1011 btrfs_set_header_owner(cow, root->root_key.objectid); 1012 1013 write_extent_buffer(cow, root->fs_info->fsid, 1014 (unsigned long)btrfs_header_fsid(cow), 1015 BTRFS_FSID_SIZE); 1016 1017 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 1018 if (ret) { 1019 btrfs_abort_transaction(trans, root, ret); 1020 return ret; 1021 } 1022 1023 if (root->ref_cows) 1024 btrfs_reloc_cow_block(trans, root, buf, cow); 1025 1026 if (buf == root->node) { 1027 WARN_ON(parent && parent != buf); 1028 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 1029 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 1030 parent_start = buf->start; 1031 else 1032 parent_start = 0; 1033 1034 extent_buffer_get(cow); 1035 tree_mod_log_set_root_pointer(root, cow); 1036 rcu_assign_pointer(root->node, cow); 1037 1038 btrfs_free_tree_block(trans, root, buf, parent_start, 1039 last_ref); 1040 free_extent_buffer(buf); 1041 add_root_to_dirty_list(root); 1042 } else { 1043 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1044 parent_start = parent->start; 1045 else 1046 parent_start = 0; 1047 1048 WARN_ON(trans->transid != btrfs_header_generation(parent)); 1049 tree_mod_log_insert_key(root->fs_info, parent, parent_slot, 1050 MOD_LOG_KEY_REPLACE); 1051 btrfs_set_node_blockptr(parent, parent_slot, 1052 cow->start); 1053 btrfs_set_node_ptr_generation(parent, parent_slot, 1054 trans->transid); 1055 btrfs_mark_buffer_dirty(parent); 1056 btrfs_free_tree_block(trans, root, buf, parent_start, 1057 last_ref); 1058 } 1059 if (unlock_orig) 1060 btrfs_tree_unlock(buf); 1061 free_extent_buffer_stale(buf); 1062 btrfs_mark_buffer_dirty(cow); 1063 *cow_ret = cow; 1064 return 0; 1065 } 1066 1067 /* 1068 * returns the logical address of the oldest predecessor of the given root. 1069 * entries older than time_seq are ignored. 1070 */ 1071 static struct tree_mod_elem * 1072 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, 1073 struct btrfs_root *root, u64 time_seq) 1074 { 1075 struct tree_mod_elem *tm; 1076 struct tree_mod_elem *found = NULL; 1077 u64 root_logical = root->node->start; 1078 int looped = 0; 1079 1080 if (!time_seq) 1081 return 0; 1082 1083 /* 1084 * the very last operation that's logged for a root is the replacement 1085 * operation (if it is replaced at all). this has the index of the *new* 1086 * root, making it the very first operation that's logged for this root. 1087 */ 1088 while (1) { 1089 tm = tree_mod_log_search_oldest(fs_info, root_logical, 1090 time_seq); 1091 if (!looped && !tm) 1092 return 0; 1093 /* 1094 * if there are no tree operation for the oldest root, we simply 1095 * return it. this should only happen if that (old) root is at 1096 * level 0. 1097 */ 1098 if (!tm) 1099 break; 1100 1101 /* 1102 * if there's an operation that's not a root replacement, we 1103 * found the oldest version of our root. normally, we'll find a 1104 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. 1105 */ 1106 if (tm->op != MOD_LOG_ROOT_REPLACE) 1107 break; 1108 1109 found = tm; 1110 root_logical = tm->old_root.logical; 1111 BUG_ON(root_logical == root->node->start); 1112 looped = 1; 1113 } 1114 1115 /* if there's no old root to return, return what we found instead */ 1116 if (!found) 1117 found = tm; 1118 1119 return found; 1120 } 1121 1122 /* 1123 * tm is a pointer to the first operation to rewind within eb. then, all 1124 * previous operations will be rewinded (until we reach something older than 1125 * time_seq). 1126 */ 1127 static void 1128 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, 1129 struct tree_mod_elem *first_tm) 1130 { 1131 u32 n; 1132 struct rb_node *next; 1133 struct tree_mod_elem *tm = first_tm; 1134 unsigned long o_dst; 1135 unsigned long o_src; 1136 unsigned long p_size = sizeof(struct btrfs_key_ptr); 1137 1138 n = btrfs_header_nritems(eb); 1139 while (tm && tm->seq >= time_seq) { 1140 /* 1141 * all the operations are recorded with the operator used for 1142 * the modification. as we're going backwards, we do the 1143 * opposite of each operation here. 1144 */ 1145 switch (tm->op) { 1146 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1147 BUG_ON(tm->slot < n); 1148 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1149 case MOD_LOG_KEY_REMOVE: 1150 btrfs_set_node_key(eb, &tm->key, tm->slot); 1151 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1152 btrfs_set_node_ptr_generation(eb, tm->slot, 1153 tm->generation); 1154 n++; 1155 break; 1156 case MOD_LOG_KEY_REPLACE: 1157 BUG_ON(tm->slot >= n); 1158 btrfs_set_node_key(eb, &tm->key, tm->slot); 1159 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1160 btrfs_set_node_ptr_generation(eb, tm->slot, 1161 tm->generation); 1162 break; 1163 case MOD_LOG_KEY_ADD: 1164 /* if a move operation is needed it's in the log */ 1165 n--; 1166 break; 1167 case MOD_LOG_MOVE_KEYS: 1168 o_dst = btrfs_node_key_ptr_offset(tm->slot); 1169 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); 1170 memmove_extent_buffer(eb, o_dst, o_src, 1171 tm->move.nr_items * p_size); 1172 break; 1173 case MOD_LOG_ROOT_REPLACE: 1174 /* 1175 * this operation is special. for roots, this must be 1176 * handled explicitly before rewinding. 1177 * for non-roots, this operation may exist if the node 1178 * was a root: root A -> child B; then A gets empty and 1179 * B is promoted to the new root. in the mod log, we'll 1180 * have a root-replace operation for B, a tree block 1181 * that is no root. we simply ignore that operation. 1182 */ 1183 break; 1184 } 1185 next = rb_next(&tm->node); 1186 if (!next) 1187 break; 1188 tm = container_of(next, struct tree_mod_elem, node); 1189 if (tm->index != first_tm->index) 1190 break; 1191 } 1192 btrfs_set_header_nritems(eb, n); 1193 } 1194 1195 static struct extent_buffer * 1196 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, 1197 u64 time_seq) 1198 { 1199 struct extent_buffer *eb_rewin; 1200 struct tree_mod_elem *tm; 1201 1202 if (!time_seq) 1203 return eb; 1204 1205 if (btrfs_header_level(eb) == 0) 1206 return eb; 1207 1208 tm = tree_mod_log_search(fs_info, eb->start, time_seq); 1209 if (!tm) 1210 return eb; 1211 1212 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1213 BUG_ON(tm->slot != 0); 1214 eb_rewin = alloc_dummy_extent_buffer(eb->start, 1215 fs_info->tree_root->nodesize); 1216 BUG_ON(!eb_rewin); 1217 btrfs_set_header_bytenr(eb_rewin, eb->start); 1218 btrfs_set_header_backref_rev(eb_rewin, 1219 btrfs_header_backref_rev(eb)); 1220 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); 1221 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); 1222 } else { 1223 eb_rewin = btrfs_clone_extent_buffer(eb); 1224 BUG_ON(!eb_rewin); 1225 } 1226 1227 extent_buffer_get(eb_rewin); 1228 free_extent_buffer(eb); 1229 1230 __tree_mod_log_rewind(eb_rewin, time_seq, tm); 1231 1232 return eb_rewin; 1233 } 1234 1235 /* 1236 * get_old_root() rewinds the state of @root's root node to the given @time_seq 1237 * value. If there are no changes, the current root->root_node is returned. If 1238 * anything changed in between, there's a fresh buffer allocated on which the 1239 * rewind operations are done. In any case, the returned buffer is read locked. 1240 * Returns NULL on error (with no locks held). 1241 */ 1242 static inline struct extent_buffer * 1243 get_old_root(struct btrfs_root *root, u64 time_seq) 1244 { 1245 struct tree_mod_elem *tm; 1246 struct extent_buffer *eb; 1247 struct tree_mod_root *old_root = NULL; 1248 u64 old_generation = 0; 1249 u64 logical; 1250 1251 eb = btrfs_read_lock_root_node(root); 1252 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); 1253 if (!tm) 1254 return root->node; 1255 1256 if (tm->op == MOD_LOG_ROOT_REPLACE) { 1257 old_root = &tm->old_root; 1258 old_generation = tm->generation; 1259 logical = old_root->logical; 1260 } else { 1261 logical = root->node->start; 1262 } 1263 1264 tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1265 if (old_root) 1266 eb = alloc_dummy_extent_buffer(logical, root->nodesize); 1267 else 1268 eb = btrfs_clone_extent_buffer(root->node); 1269 btrfs_tree_read_unlock(root->node); 1270 free_extent_buffer(root->node); 1271 if (!eb) 1272 return NULL; 1273 btrfs_tree_read_lock(eb); 1274 if (old_root) { 1275 btrfs_set_header_bytenr(eb, eb->start); 1276 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); 1277 btrfs_set_header_owner(eb, root->root_key.objectid); 1278 btrfs_set_header_level(eb, old_root->level); 1279 btrfs_set_header_generation(eb, old_generation); 1280 } 1281 if (tm) 1282 __tree_mod_log_rewind(eb, time_seq, tm); 1283 else 1284 WARN_ON(btrfs_header_level(eb) != 0); 1285 extent_buffer_get(eb); 1286 1287 return eb; 1288 } 1289 1290 static inline int should_cow_block(struct btrfs_trans_handle *trans, 1291 struct btrfs_root *root, 1292 struct extent_buffer *buf) 1293 { 1294 /* ensure we can see the force_cow */ 1295 smp_rmb(); 1296 1297 /* 1298 * We do not need to cow a block if 1299 * 1) this block is not created or changed in this transaction; 1300 * 2) this block does not belong to TREE_RELOC tree; 1301 * 3) the root is not forced COW. 1302 * 1303 * What is forced COW: 1304 * when we create snapshot during commiting the transaction, 1305 * after we've finished coping src root, we must COW the shared 1306 * block to ensure the metadata consistency. 1307 */ 1308 if (btrfs_header_generation(buf) == trans->transid && 1309 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 1310 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 1311 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 1312 !root->force_cow) 1313 return 0; 1314 return 1; 1315 } 1316 1317 /* 1318 * cows a single block, see __btrfs_cow_block for the real work. 1319 * This version of it has extra checks so that a block isn't cow'd more than 1320 * once per transaction, as long as it hasn't been written yet 1321 */ 1322 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 1323 struct btrfs_root *root, struct extent_buffer *buf, 1324 struct extent_buffer *parent, int parent_slot, 1325 struct extent_buffer **cow_ret) 1326 { 1327 u64 search_start; 1328 int ret; 1329 1330 if (trans->transaction != root->fs_info->running_transaction) { 1331 printk(KERN_CRIT "trans %llu running %llu\n", 1332 (unsigned long long)trans->transid, 1333 (unsigned long long) 1334 root->fs_info->running_transaction->transid); 1335 WARN_ON(1); 1336 } 1337 if (trans->transid != root->fs_info->generation) { 1338 printk(KERN_CRIT "trans %llu running %llu\n", 1339 (unsigned long long)trans->transid, 1340 (unsigned long long)root->fs_info->generation); 1341 WARN_ON(1); 1342 } 1343 1344 if (!should_cow_block(trans, root, buf)) { 1345 *cow_ret = buf; 1346 return 0; 1347 } 1348 1349 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 1350 1351 if (parent) 1352 btrfs_set_lock_blocking(parent); 1353 btrfs_set_lock_blocking(buf); 1354 1355 ret = __btrfs_cow_block(trans, root, buf, parent, 1356 parent_slot, cow_ret, search_start, 0); 1357 1358 trace_btrfs_cow_block(root, buf, *cow_ret); 1359 1360 return ret; 1361 } 1362 1363 /* 1364 * helper function for defrag to decide if two blocks pointed to by a 1365 * node are actually close by 1366 */ 1367 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 1368 { 1369 if (blocknr < other && other - (blocknr + blocksize) < 32768) 1370 return 1; 1371 if (blocknr > other && blocknr - (other + blocksize) < 32768) 1372 return 1; 1373 return 0; 1374 } 1375 1376 /* 1377 * compare two keys in a memcmp fashion 1378 */ 1379 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) 1380 { 1381 struct btrfs_key k1; 1382 1383 btrfs_disk_key_to_cpu(&k1, disk); 1384 1385 return btrfs_comp_cpu_keys(&k1, k2); 1386 } 1387 1388 /* 1389 * same as comp_keys only with two btrfs_key's 1390 */ 1391 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) 1392 { 1393 if (k1->objectid > k2->objectid) 1394 return 1; 1395 if (k1->objectid < k2->objectid) 1396 return -1; 1397 if (k1->type > k2->type) 1398 return 1; 1399 if (k1->type < k2->type) 1400 return -1; 1401 if (k1->offset > k2->offset) 1402 return 1; 1403 if (k1->offset < k2->offset) 1404 return -1; 1405 return 0; 1406 } 1407 1408 /* 1409 * this is used by the defrag code to go through all the 1410 * leaves pointed to by a node and reallocate them so that 1411 * disk order is close to key order 1412 */ 1413 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 1414 struct btrfs_root *root, struct extent_buffer *parent, 1415 int start_slot, int cache_only, u64 *last_ret, 1416 struct btrfs_key *progress) 1417 { 1418 struct extent_buffer *cur; 1419 u64 blocknr; 1420 u64 gen; 1421 u64 search_start = *last_ret; 1422 u64 last_block = 0; 1423 u64 other; 1424 u32 parent_nritems; 1425 int end_slot; 1426 int i; 1427 int err = 0; 1428 int parent_level; 1429 int uptodate; 1430 u32 blocksize; 1431 int progress_passed = 0; 1432 struct btrfs_disk_key disk_key; 1433 1434 parent_level = btrfs_header_level(parent); 1435 if (cache_only && parent_level != 1) 1436 return 0; 1437 1438 if (trans->transaction != root->fs_info->running_transaction) 1439 WARN_ON(1); 1440 if (trans->transid != root->fs_info->generation) 1441 WARN_ON(1); 1442 1443 parent_nritems = btrfs_header_nritems(parent); 1444 blocksize = btrfs_level_size(root, parent_level - 1); 1445 end_slot = parent_nritems; 1446 1447 if (parent_nritems == 1) 1448 return 0; 1449 1450 btrfs_set_lock_blocking(parent); 1451 1452 for (i = start_slot; i < end_slot; i++) { 1453 int close = 1; 1454 1455 btrfs_node_key(parent, &disk_key, i); 1456 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 1457 continue; 1458 1459 progress_passed = 1; 1460 blocknr = btrfs_node_blockptr(parent, i); 1461 gen = btrfs_node_ptr_generation(parent, i); 1462 if (last_block == 0) 1463 last_block = blocknr; 1464 1465 if (i > 0) { 1466 other = btrfs_node_blockptr(parent, i - 1); 1467 close = close_blocks(blocknr, other, blocksize); 1468 } 1469 if (!close && i < end_slot - 2) { 1470 other = btrfs_node_blockptr(parent, i + 1); 1471 close = close_blocks(blocknr, other, blocksize); 1472 } 1473 if (close) { 1474 last_block = blocknr; 1475 continue; 1476 } 1477 1478 cur = btrfs_find_tree_block(root, blocknr, blocksize); 1479 if (cur) 1480 uptodate = btrfs_buffer_uptodate(cur, gen, 0); 1481 else 1482 uptodate = 0; 1483 if (!cur || !uptodate) { 1484 if (cache_only) { 1485 free_extent_buffer(cur); 1486 continue; 1487 } 1488 if (!cur) { 1489 cur = read_tree_block(root, blocknr, 1490 blocksize, gen); 1491 if (!cur) 1492 return -EIO; 1493 } else if (!uptodate) { 1494 err = btrfs_read_buffer(cur, gen); 1495 if (err) { 1496 free_extent_buffer(cur); 1497 return err; 1498 } 1499 } 1500 } 1501 if (search_start == 0) 1502 search_start = last_block; 1503 1504 btrfs_tree_lock(cur); 1505 btrfs_set_lock_blocking(cur); 1506 err = __btrfs_cow_block(trans, root, cur, parent, i, 1507 &cur, search_start, 1508 min(16 * blocksize, 1509 (end_slot - i) * blocksize)); 1510 if (err) { 1511 btrfs_tree_unlock(cur); 1512 free_extent_buffer(cur); 1513 break; 1514 } 1515 search_start = cur->start; 1516 last_block = cur->start; 1517 *last_ret = search_start; 1518 btrfs_tree_unlock(cur); 1519 free_extent_buffer(cur); 1520 } 1521 return err; 1522 } 1523 1524 /* 1525 * The leaf data grows from end-to-front in the node. 1526 * this returns the address of the start of the last item, 1527 * which is the stop of the leaf data stack 1528 */ 1529 static inline unsigned int leaf_data_end(struct btrfs_root *root, 1530 struct extent_buffer *leaf) 1531 { 1532 u32 nr = btrfs_header_nritems(leaf); 1533 if (nr == 0) 1534 return BTRFS_LEAF_DATA_SIZE(root); 1535 return btrfs_item_offset_nr(leaf, nr - 1); 1536 } 1537 1538 1539 /* 1540 * search for key in the extent_buffer. The items start at offset p, 1541 * and they are item_size apart. There are 'max' items in p. 1542 * 1543 * the slot in the array is returned via slot, and it points to 1544 * the place where you would insert key if it is not found in 1545 * the array. 1546 * 1547 * slot may point to max if the key is bigger than all of the keys 1548 */ 1549 static noinline int generic_bin_search(struct extent_buffer *eb, 1550 unsigned long p, 1551 int item_size, struct btrfs_key *key, 1552 int max, int *slot) 1553 { 1554 int low = 0; 1555 int high = max; 1556 int mid; 1557 int ret; 1558 struct btrfs_disk_key *tmp = NULL; 1559 struct btrfs_disk_key unaligned; 1560 unsigned long offset; 1561 char *kaddr = NULL; 1562 unsigned long map_start = 0; 1563 unsigned long map_len = 0; 1564 int err; 1565 1566 while (low < high) { 1567 mid = (low + high) / 2; 1568 offset = p + mid * item_size; 1569 1570 if (!kaddr || offset < map_start || 1571 (offset + sizeof(struct btrfs_disk_key)) > 1572 map_start + map_len) { 1573 1574 err = map_private_extent_buffer(eb, offset, 1575 sizeof(struct btrfs_disk_key), 1576 &kaddr, &map_start, &map_len); 1577 1578 if (!err) { 1579 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1580 map_start); 1581 } else { 1582 read_extent_buffer(eb, &unaligned, 1583 offset, sizeof(unaligned)); 1584 tmp = &unaligned; 1585 } 1586 1587 } else { 1588 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1589 map_start); 1590 } 1591 ret = comp_keys(tmp, key); 1592 1593 if (ret < 0) 1594 low = mid + 1; 1595 else if (ret > 0) 1596 high = mid; 1597 else { 1598 *slot = mid; 1599 return 0; 1600 } 1601 } 1602 *slot = low; 1603 return 1; 1604 } 1605 1606 /* 1607 * simple bin_search frontend that does the right thing for 1608 * leaves vs nodes 1609 */ 1610 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1611 int level, int *slot) 1612 { 1613 if (level == 0) 1614 return generic_bin_search(eb, 1615 offsetof(struct btrfs_leaf, items), 1616 sizeof(struct btrfs_item), 1617 key, btrfs_header_nritems(eb), 1618 slot); 1619 else 1620 return generic_bin_search(eb, 1621 offsetof(struct btrfs_node, ptrs), 1622 sizeof(struct btrfs_key_ptr), 1623 key, btrfs_header_nritems(eb), 1624 slot); 1625 } 1626 1627 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1628 int level, int *slot) 1629 { 1630 return bin_search(eb, key, level, slot); 1631 } 1632 1633 static void root_add_used(struct btrfs_root *root, u32 size) 1634 { 1635 spin_lock(&root->accounting_lock); 1636 btrfs_set_root_used(&root->root_item, 1637 btrfs_root_used(&root->root_item) + size); 1638 spin_unlock(&root->accounting_lock); 1639 } 1640 1641 static void root_sub_used(struct btrfs_root *root, u32 size) 1642 { 1643 spin_lock(&root->accounting_lock); 1644 btrfs_set_root_used(&root->root_item, 1645 btrfs_root_used(&root->root_item) - size); 1646 spin_unlock(&root->accounting_lock); 1647 } 1648 1649 /* given a node and slot number, this reads the blocks it points to. The 1650 * extent buffer is returned with a reference taken (but unlocked). 1651 * NULL is returned on error. 1652 */ 1653 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, 1654 struct extent_buffer *parent, int slot) 1655 { 1656 int level = btrfs_header_level(parent); 1657 if (slot < 0) 1658 return NULL; 1659 if (slot >= btrfs_header_nritems(parent)) 1660 return NULL; 1661 1662 BUG_ON(level == 0); 1663 1664 return read_tree_block(root, btrfs_node_blockptr(parent, slot), 1665 btrfs_level_size(root, level - 1), 1666 btrfs_node_ptr_generation(parent, slot)); 1667 } 1668 1669 /* 1670 * node level balancing, used to make sure nodes are in proper order for 1671 * item deletion. We balance from the top down, so we have to make sure 1672 * that a deletion won't leave an node completely empty later on. 1673 */ 1674 static noinline int balance_level(struct btrfs_trans_handle *trans, 1675 struct btrfs_root *root, 1676 struct btrfs_path *path, int level) 1677 { 1678 struct extent_buffer *right = NULL; 1679 struct extent_buffer *mid; 1680 struct extent_buffer *left = NULL; 1681 struct extent_buffer *parent = NULL; 1682 int ret = 0; 1683 int wret; 1684 int pslot; 1685 int orig_slot = path->slots[level]; 1686 u64 orig_ptr; 1687 1688 if (level == 0) 1689 return 0; 1690 1691 mid = path->nodes[level]; 1692 1693 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && 1694 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); 1695 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1696 1697 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1698 1699 if (level < BTRFS_MAX_LEVEL - 1) { 1700 parent = path->nodes[level + 1]; 1701 pslot = path->slots[level + 1]; 1702 } 1703 1704 /* 1705 * deal with the case where there is only one pointer in the root 1706 * by promoting the node below to a root 1707 */ 1708 if (!parent) { 1709 struct extent_buffer *child; 1710 1711 if (btrfs_header_nritems(mid) != 1) 1712 return 0; 1713 1714 /* promote the child to a root */ 1715 child = read_node_slot(root, mid, 0); 1716 if (!child) { 1717 ret = -EROFS; 1718 btrfs_std_error(root->fs_info, ret); 1719 goto enospc; 1720 } 1721 1722 btrfs_tree_lock(child); 1723 btrfs_set_lock_blocking(child); 1724 ret = btrfs_cow_block(trans, root, child, mid, 0, &child); 1725 if (ret) { 1726 btrfs_tree_unlock(child); 1727 free_extent_buffer(child); 1728 goto enospc; 1729 } 1730 1731 tree_mod_log_set_root_pointer(root, child); 1732 rcu_assign_pointer(root->node, child); 1733 1734 add_root_to_dirty_list(root); 1735 btrfs_tree_unlock(child); 1736 1737 path->locks[level] = 0; 1738 path->nodes[level] = NULL; 1739 clean_tree_block(trans, root, mid); 1740 btrfs_tree_unlock(mid); 1741 /* once for the path */ 1742 free_extent_buffer(mid); 1743 1744 root_sub_used(root, mid->len); 1745 btrfs_free_tree_block(trans, root, mid, 0, 1); 1746 /* once for the root ptr */ 1747 free_extent_buffer_stale(mid); 1748 return 0; 1749 } 1750 if (btrfs_header_nritems(mid) > 1751 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1752 return 0; 1753 1754 left = read_node_slot(root, parent, pslot - 1); 1755 if (left) { 1756 btrfs_tree_lock(left); 1757 btrfs_set_lock_blocking(left); 1758 wret = btrfs_cow_block(trans, root, left, 1759 parent, pslot - 1, &left); 1760 if (wret) { 1761 ret = wret; 1762 goto enospc; 1763 } 1764 } 1765 right = read_node_slot(root, parent, pslot + 1); 1766 if (right) { 1767 btrfs_tree_lock(right); 1768 btrfs_set_lock_blocking(right); 1769 wret = btrfs_cow_block(trans, root, right, 1770 parent, pslot + 1, &right); 1771 if (wret) { 1772 ret = wret; 1773 goto enospc; 1774 } 1775 } 1776 1777 /* first, try to make some room in the middle buffer */ 1778 if (left) { 1779 orig_slot += btrfs_header_nritems(left); 1780 wret = push_node_left(trans, root, left, mid, 1); 1781 if (wret < 0) 1782 ret = wret; 1783 } 1784 1785 /* 1786 * then try to empty the right most buffer into the middle 1787 */ 1788 if (right) { 1789 wret = push_node_left(trans, root, mid, right, 1); 1790 if (wret < 0 && wret != -ENOSPC) 1791 ret = wret; 1792 if (btrfs_header_nritems(right) == 0) { 1793 clean_tree_block(trans, root, right); 1794 btrfs_tree_unlock(right); 1795 del_ptr(trans, root, path, level + 1, pslot + 1, 1); 1796 root_sub_used(root, right->len); 1797 btrfs_free_tree_block(trans, root, right, 0, 1); 1798 free_extent_buffer_stale(right); 1799 right = NULL; 1800 } else { 1801 struct btrfs_disk_key right_key; 1802 btrfs_node_key(right, &right_key, 0); 1803 tree_mod_log_set_node_key(root->fs_info, parent, 1804 &right_key, pslot + 1, 0); 1805 btrfs_set_node_key(parent, &right_key, pslot + 1); 1806 btrfs_mark_buffer_dirty(parent); 1807 } 1808 } 1809 if (btrfs_header_nritems(mid) == 1) { 1810 /* 1811 * we're not allowed to leave a node with one item in the 1812 * tree during a delete. A deletion from lower in the tree 1813 * could try to delete the only pointer in this node. 1814 * So, pull some keys from the left. 1815 * There has to be a left pointer at this point because 1816 * otherwise we would have pulled some pointers from the 1817 * right 1818 */ 1819 if (!left) { 1820 ret = -EROFS; 1821 btrfs_std_error(root->fs_info, ret); 1822 goto enospc; 1823 } 1824 wret = balance_node_right(trans, root, mid, left); 1825 if (wret < 0) { 1826 ret = wret; 1827 goto enospc; 1828 } 1829 if (wret == 1) { 1830 wret = push_node_left(trans, root, left, mid, 1); 1831 if (wret < 0) 1832 ret = wret; 1833 } 1834 BUG_ON(wret == 1); 1835 } 1836 if (btrfs_header_nritems(mid) == 0) { 1837 clean_tree_block(trans, root, mid); 1838 btrfs_tree_unlock(mid); 1839 del_ptr(trans, root, path, level + 1, pslot, 1); 1840 root_sub_used(root, mid->len); 1841 btrfs_free_tree_block(trans, root, mid, 0, 1); 1842 free_extent_buffer_stale(mid); 1843 mid = NULL; 1844 } else { 1845 /* update the parent key to reflect our changes */ 1846 struct btrfs_disk_key mid_key; 1847 btrfs_node_key(mid, &mid_key, 0); 1848 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key, 1849 pslot, 0); 1850 btrfs_set_node_key(parent, &mid_key, pslot); 1851 btrfs_mark_buffer_dirty(parent); 1852 } 1853 1854 /* update the path */ 1855 if (left) { 1856 if (btrfs_header_nritems(left) > orig_slot) { 1857 extent_buffer_get(left); 1858 /* left was locked after cow */ 1859 path->nodes[level] = left; 1860 path->slots[level + 1] -= 1; 1861 path->slots[level] = orig_slot; 1862 if (mid) { 1863 btrfs_tree_unlock(mid); 1864 free_extent_buffer(mid); 1865 } 1866 } else { 1867 orig_slot -= btrfs_header_nritems(left); 1868 path->slots[level] = orig_slot; 1869 } 1870 } 1871 /* double check we haven't messed things up */ 1872 if (orig_ptr != 1873 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1874 BUG(); 1875 enospc: 1876 if (right) { 1877 btrfs_tree_unlock(right); 1878 free_extent_buffer(right); 1879 } 1880 if (left) { 1881 if (path->nodes[level] != left) 1882 btrfs_tree_unlock(left); 1883 free_extent_buffer(left); 1884 } 1885 return ret; 1886 } 1887 1888 /* Node balancing for insertion. Here we only split or push nodes around 1889 * when they are completely full. This is also done top down, so we 1890 * have to be pessimistic. 1891 */ 1892 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1893 struct btrfs_root *root, 1894 struct btrfs_path *path, int level) 1895 { 1896 struct extent_buffer *right = NULL; 1897 struct extent_buffer *mid; 1898 struct extent_buffer *left = NULL; 1899 struct extent_buffer *parent = NULL; 1900 int ret = 0; 1901 int wret; 1902 int pslot; 1903 int orig_slot = path->slots[level]; 1904 1905 if (level == 0) 1906 return 1; 1907 1908 mid = path->nodes[level]; 1909 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1910 1911 if (level < BTRFS_MAX_LEVEL - 1) { 1912 parent = path->nodes[level + 1]; 1913 pslot = path->slots[level + 1]; 1914 } 1915 1916 if (!parent) 1917 return 1; 1918 1919 left = read_node_slot(root, parent, pslot - 1); 1920 1921 /* first, try to make some room in the middle buffer */ 1922 if (left) { 1923 u32 left_nr; 1924 1925 btrfs_tree_lock(left); 1926 btrfs_set_lock_blocking(left); 1927 1928 left_nr = btrfs_header_nritems(left); 1929 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1930 wret = 1; 1931 } else { 1932 ret = btrfs_cow_block(trans, root, left, parent, 1933 pslot - 1, &left); 1934 if (ret) 1935 wret = 1; 1936 else { 1937 wret = push_node_left(trans, root, 1938 left, mid, 0); 1939 } 1940 } 1941 if (wret < 0) 1942 ret = wret; 1943 if (wret == 0) { 1944 struct btrfs_disk_key disk_key; 1945 orig_slot += left_nr; 1946 btrfs_node_key(mid, &disk_key, 0); 1947 tree_mod_log_set_node_key(root->fs_info, parent, 1948 &disk_key, pslot, 0); 1949 btrfs_set_node_key(parent, &disk_key, pslot); 1950 btrfs_mark_buffer_dirty(parent); 1951 if (btrfs_header_nritems(left) > orig_slot) { 1952 path->nodes[level] = left; 1953 path->slots[level + 1] -= 1; 1954 path->slots[level] = orig_slot; 1955 btrfs_tree_unlock(mid); 1956 free_extent_buffer(mid); 1957 } else { 1958 orig_slot -= 1959 btrfs_header_nritems(left); 1960 path->slots[level] = orig_slot; 1961 btrfs_tree_unlock(left); 1962 free_extent_buffer(left); 1963 } 1964 return 0; 1965 } 1966 btrfs_tree_unlock(left); 1967 free_extent_buffer(left); 1968 } 1969 right = read_node_slot(root, parent, pslot + 1); 1970 1971 /* 1972 * then try to empty the right most buffer into the middle 1973 */ 1974 if (right) { 1975 u32 right_nr; 1976 1977 btrfs_tree_lock(right); 1978 btrfs_set_lock_blocking(right); 1979 1980 right_nr = btrfs_header_nritems(right); 1981 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1982 wret = 1; 1983 } else { 1984 ret = btrfs_cow_block(trans, root, right, 1985 parent, pslot + 1, 1986 &right); 1987 if (ret) 1988 wret = 1; 1989 else { 1990 wret = balance_node_right(trans, root, 1991 right, mid); 1992 } 1993 } 1994 if (wret < 0) 1995 ret = wret; 1996 if (wret == 0) { 1997 struct btrfs_disk_key disk_key; 1998 1999 btrfs_node_key(right, &disk_key, 0); 2000 tree_mod_log_set_node_key(root->fs_info, parent, 2001 &disk_key, pslot + 1, 0); 2002 btrfs_set_node_key(parent, &disk_key, pslot + 1); 2003 btrfs_mark_buffer_dirty(parent); 2004 2005 if (btrfs_header_nritems(mid) <= orig_slot) { 2006 path->nodes[level] = right; 2007 path->slots[level + 1] += 1; 2008 path->slots[level] = orig_slot - 2009 btrfs_header_nritems(mid); 2010 btrfs_tree_unlock(mid); 2011 free_extent_buffer(mid); 2012 } else { 2013 btrfs_tree_unlock(right); 2014 free_extent_buffer(right); 2015 } 2016 return 0; 2017 } 2018 btrfs_tree_unlock(right); 2019 free_extent_buffer(right); 2020 } 2021 return 1; 2022 } 2023 2024 /* 2025 * readahead one full node of leaves, finding things that are close 2026 * to the block in 'slot', and triggering ra on them. 2027 */ 2028 static void reada_for_search(struct btrfs_root *root, 2029 struct btrfs_path *path, 2030 int level, int slot, u64 objectid) 2031 { 2032 struct extent_buffer *node; 2033 struct btrfs_disk_key disk_key; 2034 u32 nritems; 2035 u64 search; 2036 u64 target; 2037 u64 nread = 0; 2038 u64 gen; 2039 int direction = path->reada; 2040 struct extent_buffer *eb; 2041 u32 nr; 2042 u32 blocksize; 2043 u32 nscan = 0; 2044 2045 if (level != 1) 2046 return; 2047 2048 if (!path->nodes[level]) 2049 return; 2050 2051 node = path->nodes[level]; 2052 2053 search = btrfs_node_blockptr(node, slot); 2054 blocksize = btrfs_level_size(root, level - 1); 2055 eb = btrfs_find_tree_block(root, search, blocksize); 2056 if (eb) { 2057 free_extent_buffer(eb); 2058 return; 2059 } 2060 2061 target = search; 2062 2063 nritems = btrfs_header_nritems(node); 2064 nr = slot; 2065 2066 while (1) { 2067 if (direction < 0) { 2068 if (nr == 0) 2069 break; 2070 nr--; 2071 } else if (direction > 0) { 2072 nr++; 2073 if (nr >= nritems) 2074 break; 2075 } 2076 if (path->reada < 0 && objectid) { 2077 btrfs_node_key(node, &disk_key, nr); 2078 if (btrfs_disk_key_objectid(&disk_key) != objectid) 2079 break; 2080 } 2081 search = btrfs_node_blockptr(node, nr); 2082 if ((search <= target && target - search <= 65536) || 2083 (search > target && search - target <= 65536)) { 2084 gen = btrfs_node_ptr_generation(node, nr); 2085 readahead_tree_block(root, search, blocksize, gen); 2086 nread += blocksize; 2087 } 2088 nscan++; 2089 if ((nread > 65536 || nscan > 32)) 2090 break; 2091 } 2092 } 2093 2094 /* 2095 * returns -EAGAIN if it had to drop the path, or zero if everything was in 2096 * cache 2097 */ 2098 static noinline int reada_for_balance(struct btrfs_root *root, 2099 struct btrfs_path *path, int level) 2100 { 2101 int slot; 2102 int nritems; 2103 struct extent_buffer *parent; 2104 struct extent_buffer *eb; 2105 u64 gen; 2106 u64 block1 = 0; 2107 u64 block2 = 0; 2108 int ret = 0; 2109 int blocksize; 2110 2111 parent = path->nodes[level + 1]; 2112 if (!parent) 2113 return 0; 2114 2115 nritems = btrfs_header_nritems(parent); 2116 slot = path->slots[level + 1]; 2117 blocksize = btrfs_level_size(root, level); 2118 2119 if (slot > 0) { 2120 block1 = btrfs_node_blockptr(parent, slot - 1); 2121 gen = btrfs_node_ptr_generation(parent, slot - 1); 2122 eb = btrfs_find_tree_block(root, block1, blocksize); 2123 /* 2124 * if we get -eagain from btrfs_buffer_uptodate, we 2125 * don't want to return eagain here. That will loop 2126 * forever 2127 */ 2128 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) 2129 block1 = 0; 2130 free_extent_buffer(eb); 2131 } 2132 if (slot + 1 < nritems) { 2133 block2 = btrfs_node_blockptr(parent, slot + 1); 2134 gen = btrfs_node_ptr_generation(parent, slot + 1); 2135 eb = btrfs_find_tree_block(root, block2, blocksize); 2136 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) 2137 block2 = 0; 2138 free_extent_buffer(eb); 2139 } 2140 if (block1 || block2) { 2141 ret = -EAGAIN; 2142 2143 /* release the whole path */ 2144 btrfs_release_path(path); 2145 2146 /* read the blocks */ 2147 if (block1) 2148 readahead_tree_block(root, block1, blocksize, 0); 2149 if (block2) 2150 readahead_tree_block(root, block2, blocksize, 0); 2151 2152 if (block1) { 2153 eb = read_tree_block(root, block1, blocksize, 0); 2154 free_extent_buffer(eb); 2155 } 2156 if (block2) { 2157 eb = read_tree_block(root, block2, blocksize, 0); 2158 free_extent_buffer(eb); 2159 } 2160 } 2161 return ret; 2162 } 2163 2164 2165 /* 2166 * when we walk down the tree, it is usually safe to unlock the higher layers 2167 * in the tree. The exceptions are when our path goes through slot 0, because 2168 * operations on the tree might require changing key pointers higher up in the 2169 * tree. 2170 * 2171 * callers might also have set path->keep_locks, which tells this code to keep 2172 * the lock if the path points to the last slot in the block. This is part of 2173 * walking through the tree, and selecting the next slot in the higher block. 2174 * 2175 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 2176 * if lowest_unlock is 1, level 0 won't be unlocked 2177 */ 2178 static noinline void unlock_up(struct btrfs_path *path, int level, 2179 int lowest_unlock, int min_write_lock_level, 2180 int *write_lock_level) 2181 { 2182 int i; 2183 int skip_level = level; 2184 int no_skips = 0; 2185 struct extent_buffer *t; 2186 2187 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2188 if (!path->nodes[i]) 2189 break; 2190 if (!path->locks[i]) 2191 break; 2192 if (!no_skips && path->slots[i] == 0) { 2193 skip_level = i + 1; 2194 continue; 2195 } 2196 if (!no_skips && path->keep_locks) { 2197 u32 nritems; 2198 t = path->nodes[i]; 2199 nritems = btrfs_header_nritems(t); 2200 if (nritems < 1 || path->slots[i] >= nritems - 1) { 2201 skip_level = i + 1; 2202 continue; 2203 } 2204 } 2205 if (skip_level < i && i >= lowest_unlock) 2206 no_skips = 1; 2207 2208 t = path->nodes[i]; 2209 if (i >= lowest_unlock && i > skip_level && path->locks[i]) { 2210 btrfs_tree_unlock_rw(t, path->locks[i]); 2211 path->locks[i] = 0; 2212 if (write_lock_level && 2213 i > min_write_lock_level && 2214 i <= *write_lock_level) { 2215 *write_lock_level = i - 1; 2216 } 2217 } 2218 } 2219 } 2220 2221 /* 2222 * This releases any locks held in the path starting at level and 2223 * going all the way up to the root. 2224 * 2225 * btrfs_search_slot will keep the lock held on higher nodes in a few 2226 * corner cases, such as COW of the block at slot zero in the node. This 2227 * ignores those rules, and it should only be called when there are no 2228 * more updates to be done higher up in the tree. 2229 */ 2230 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) 2231 { 2232 int i; 2233 2234 if (path->keep_locks) 2235 return; 2236 2237 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2238 if (!path->nodes[i]) 2239 continue; 2240 if (!path->locks[i]) 2241 continue; 2242 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 2243 path->locks[i] = 0; 2244 } 2245 } 2246 2247 /* 2248 * helper function for btrfs_search_slot. The goal is to find a block 2249 * in cache without setting the path to blocking. If we find the block 2250 * we return zero and the path is unchanged. 2251 * 2252 * If we can't find the block, we set the path blocking and do some 2253 * reada. -EAGAIN is returned and the search must be repeated. 2254 */ 2255 static int 2256 read_block_for_search(struct btrfs_trans_handle *trans, 2257 struct btrfs_root *root, struct btrfs_path *p, 2258 struct extent_buffer **eb_ret, int level, int slot, 2259 struct btrfs_key *key, u64 time_seq) 2260 { 2261 u64 blocknr; 2262 u64 gen; 2263 u32 blocksize; 2264 struct extent_buffer *b = *eb_ret; 2265 struct extent_buffer *tmp; 2266 int ret; 2267 2268 blocknr = btrfs_node_blockptr(b, slot); 2269 gen = btrfs_node_ptr_generation(b, slot); 2270 blocksize = btrfs_level_size(root, level - 1); 2271 2272 tmp = btrfs_find_tree_block(root, blocknr, blocksize); 2273 if (tmp) { 2274 /* first we do an atomic uptodate check */ 2275 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) { 2276 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 2277 /* 2278 * we found an up to date block without 2279 * sleeping, return 2280 * right away 2281 */ 2282 *eb_ret = tmp; 2283 return 0; 2284 } 2285 /* the pages were up to date, but we failed 2286 * the generation number check. Do a full 2287 * read for the generation number that is correct. 2288 * We must do this without dropping locks so 2289 * we can trust our generation number 2290 */ 2291 free_extent_buffer(tmp); 2292 btrfs_set_path_blocking(p); 2293 2294 /* now we're allowed to do a blocking uptodate check */ 2295 tmp = read_tree_block(root, blocknr, blocksize, gen); 2296 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) { 2297 *eb_ret = tmp; 2298 return 0; 2299 } 2300 free_extent_buffer(tmp); 2301 btrfs_release_path(p); 2302 return -EIO; 2303 } 2304 } 2305 2306 /* 2307 * reduce lock contention at high levels 2308 * of the btree by dropping locks before 2309 * we read. Don't release the lock on the current 2310 * level because we need to walk this node to figure 2311 * out which blocks to read. 2312 */ 2313 btrfs_unlock_up_safe(p, level + 1); 2314 btrfs_set_path_blocking(p); 2315 2316 free_extent_buffer(tmp); 2317 if (p->reada) 2318 reada_for_search(root, p, level, slot, key->objectid); 2319 2320 btrfs_release_path(p); 2321 2322 ret = -EAGAIN; 2323 tmp = read_tree_block(root, blocknr, blocksize, 0); 2324 if (tmp) { 2325 /* 2326 * If the read above didn't mark this buffer up to date, 2327 * it will never end up being up to date. Set ret to EIO now 2328 * and give up so that our caller doesn't loop forever 2329 * on our EAGAINs. 2330 */ 2331 if (!btrfs_buffer_uptodate(tmp, 0, 0)) 2332 ret = -EIO; 2333 free_extent_buffer(tmp); 2334 } 2335 return ret; 2336 } 2337 2338 /* 2339 * helper function for btrfs_search_slot. This does all of the checks 2340 * for node-level blocks and does any balancing required based on 2341 * the ins_len. 2342 * 2343 * If no extra work was required, zero is returned. If we had to 2344 * drop the path, -EAGAIN is returned and btrfs_search_slot must 2345 * start over 2346 */ 2347 static int 2348 setup_nodes_for_search(struct btrfs_trans_handle *trans, 2349 struct btrfs_root *root, struct btrfs_path *p, 2350 struct extent_buffer *b, int level, int ins_len, 2351 int *write_lock_level) 2352 { 2353 int ret; 2354 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 2355 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { 2356 int sret; 2357 2358 if (*write_lock_level < level + 1) { 2359 *write_lock_level = level + 1; 2360 btrfs_release_path(p); 2361 goto again; 2362 } 2363 2364 sret = reada_for_balance(root, p, level); 2365 if (sret) 2366 goto again; 2367 2368 btrfs_set_path_blocking(p); 2369 sret = split_node(trans, root, p, level); 2370 btrfs_clear_path_blocking(p, NULL, 0); 2371 2372 BUG_ON(sret > 0); 2373 if (sret) { 2374 ret = sret; 2375 goto done; 2376 } 2377 b = p->nodes[level]; 2378 } else if (ins_len < 0 && btrfs_header_nritems(b) < 2379 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { 2380 int sret; 2381 2382 if (*write_lock_level < level + 1) { 2383 *write_lock_level = level + 1; 2384 btrfs_release_path(p); 2385 goto again; 2386 } 2387 2388 sret = reada_for_balance(root, p, level); 2389 if (sret) 2390 goto again; 2391 2392 btrfs_set_path_blocking(p); 2393 sret = balance_level(trans, root, p, level); 2394 btrfs_clear_path_blocking(p, NULL, 0); 2395 2396 if (sret) { 2397 ret = sret; 2398 goto done; 2399 } 2400 b = p->nodes[level]; 2401 if (!b) { 2402 btrfs_release_path(p); 2403 goto again; 2404 } 2405 BUG_ON(btrfs_header_nritems(b) == 1); 2406 } 2407 return 0; 2408 2409 again: 2410 ret = -EAGAIN; 2411 done: 2412 return ret; 2413 } 2414 2415 /* 2416 * look for key in the tree. path is filled in with nodes along the way 2417 * if key is found, we return zero and you can find the item in the leaf 2418 * level of the path (level 0) 2419 * 2420 * If the key isn't found, the path points to the slot where it should 2421 * be inserted, and 1 is returned. If there are other errors during the 2422 * search a negative error number is returned. 2423 * 2424 * if ins_len > 0, nodes and leaves will be split as we walk down the 2425 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if 2426 * possible) 2427 */ 2428 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 2429 *root, struct btrfs_key *key, struct btrfs_path *p, int 2430 ins_len, int cow) 2431 { 2432 struct extent_buffer *b; 2433 int slot; 2434 int ret; 2435 int err; 2436 int level; 2437 int lowest_unlock = 1; 2438 int root_lock; 2439 /* everything at write_lock_level or lower must be write locked */ 2440 int write_lock_level = 0; 2441 u8 lowest_level = 0; 2442 int min_write_lock_level; 2443 2444 lowest_level = p->lowest_level; 2445 WARN_ON(lowest_level && ins_len > 0); 2446 WARN_ON(p->nodes[0] != NULL); 2447 2448 if (ins_len < 0) { 2449 lowest_unlock = 2; 2450 2451 /* when we are removing items, we might have to go up to level 2452 * two as we update tree pointers Make sure we keep write 2453 * for those levels as well 2454 */ 2455 write_lock_level = 2; 2456 } else if (ins_len > 0) { 2457 /* 2458 * for inserting items, make sure we have a write lock on 2459 * level 1 so we can update keys 2460 */ 2461 write_lock_level = 1; 2462 } 2463 2464 if (!cow) 2465 write_lock_level = -1; 2466 2467 if (cow && (p->keep_locks || p->lowest_level)) 2468 write_lock_level = BTRFS_MAX_LEVEL; 2469 2470 min_write_lock_level = write_lock_level; 2471 2472 again: 2473 /* 2474 * we try very hard to do read locks on the root 2475 */ 2476 root_lock = BTRFS_READ_LOCK; 2477 level = 0; 2478 if (p->search_commit_root) { 2479 /* 2480 * the commit roots are read only 2481 * so we always do read locks 2482 */ 2483 b = root->commit_root; 2484 extent_buffer_get(b); 2485 level = btrfs_header_level(b); 2486 if (!p->skip_locking) 2487 btrfs_tree_read_lock(b); 2488 } else { 2489 if (p->skip_locking) { 2490 b = btrfs_root_node(root); 2491 level = btrfs_header_level(b); 2492 } else { 2493 /* we don't know the level of the root node 2494 * until we actually have it read locked 2495 */ 2496 b = btrfs_read_lock_root_node(root); 2497 level = btrfs_header_level(b); 2498 if (level <= write_lock_level) { 2499 /* whoops, must trade for write lock */ 2500 btrfs_tree_read_unlock(b); 2501 free_extent_buffer(b); 2502 b = btrfs_lock_root_node(root); 2503 root_lock = BTRFS_WRITE_LOCK; 2504 2505 /* the level might have changed, check again */ 2506 level = btrfs_header_level(b); 2507 } 2508 } 2509 } 2510 p->nodes[level] = b; 2511 if (!p->skip_locking) 2512 p->locks[level] = root_lock; 2513 2514 while (b) { 2515 level = btrfs_header_level(b); 2516 2517 /* 2518 * setup the path here so we can release it under lock 2519 * contention with the cow code 2520 */ 2521 if (cow) { 2522 /* 2523 * if we don't really need to cow this block 2524 * then we don't want to set the path blocking, 2525 * so we test it here 2526 */ 2527 if (!should_cow_block(trans, root, b)) 2528 goto cow_done; 2529 2530 btrfs_set_path_blocking(p); 2531 2532 /* 2533 * must have write locks on this node and the 2534 * parent 2535 */ 2536 if (level + 1 > write_lock_level) { 2537 write_lock_level = level + 1; 2538 btrfs_release_path(p); 2539 goto again; 2540 } 2541 2542 err = btrfs_cow_block(trans, root, b, 2543 p->nodes[level + 1], 2544 p->slots[level + 1], &b); 2545 if (err) { 2546 ret = err; 2547 goto done; 2548 } 2549 } 2550 cow_done: 2551 BUG_ON(!cow && ins_len); 2552 2553 p->nodes[level] = b; 2554 btrfs_clear_path_blocking(p, NULL, 0); 2555 2556 /* 2557 * we have a lock on b and as long as we aren't changing 2558 * the tree, there is no way to for the items in b to change. 2559 * It is safe to drop the lock on our parent before we 2560 * go through the expensive btree search on b. 2561 * 2562 * If cow is true, then we might be changing slot zero, 2563 * which may require changing the parent. So, we can't 2564 * drop the lock until after we know which slot we're 2565 * operating on. 2566 */ 2567 if (!cow) 2568 btrfs_unlock_up_safe(p, level + 1); 2569 2570 ret = bin_search(b, key, level, &slot); 2571 2572 if (level != 0) { 2573 int dec = 0; 2574 if (ret && slot > 0) { 2575 dec = 1; 2576 slot -= 1; 2577 } 2578 p->slots[level] = slot; 2579 err = setup_nodes_for_search(trans, root, p, b, level, 2580 ins_len, &write_lock_level); 2581 if (err == -EAGAIN) 2582 goto again; 2583 if (err) { 2584 ret = err; 2585 goto done; 2586 } 2587 b = p->nodes[level]; 2588 slot = p->slots[level]; 2589 2590 /* 2591 * slot 0 is special, if we change the key 2592 * we have to update the parent pointer 2593 * which means we must have a write lock 2594 * on the parent 2595 */ 2596 if (slot == 0 && cow && 2597 write_lock_level < level + 1) { 2598 write_lock_level = level + 1; 2599 btrfs_release_path(p); 2600 goto again; 2601 } 2602 2603 unlock_up(p, level, lowest_unlock, 2604 min_write_lock_level, &write_lock_level); 2605 2606 if (level == lowest_level) { 2607 if (dec) 2608 p->slots[level]++; 2609 goto done; 2610 } 2611 2612 err = read_block_for_search(trans, root, p, 2613 &b, level, slot, key, 0); 2614 if (err == -EAGAIN) 2615 goto again; 2616 if (err) { 2617 ret = err; 2618 goto done; 2619 } 2620 2621 if (!p->skip_locking) { 2622 level = btrfs_header_level(b); 2623 if (level <= write_lock_level) { 2624 err = btrfs_try_tree_write_lock(b); 2625 if (!err) { 2626 btrfs_set_path_blocking(p); 2627 btrfs_tree_lock(b); 2628 btrfs_clear_path_blocking(p, b, 2629 BTRFS_WRITE_LOCK); 2630 } 2631 p->locks[level] = BTRFS_WRITE_LOCK; 2632 } else { 2633 err = btrfs_try_tree_read_lock(b); 2634 if (!err) { 2635 btrfs_set_path_blocking(p); 2636 btrfs_tree_read_lock(b); 2637 btrfs_clear_path_blocking(p, b, 2638 BTRFS_READ_LOCK); 2639 } 2640 p->locks[level] = BTRFS_READ_LOCK; 2641 } 2642 p->nodes[level] = b; 2643 } 2644 } else { 2645 p->slots[level] = slot; 2646 if (ins_len > 0 && 2647 btrfs_leaf_free_space(root, b) < ins_len) { 2648 if (write_lock_level < 1) { 2649 write_lock_level = 1; 2650 btrfs_release_path(p); 2651 goto again; 2652 } 2653 2654 btrfs_set_path_blocking(p); 2655 err = split_leaf(trans, root, key, 2656 p, ins_len, ret == 0); 2657 btrfs_clear_path_blocking(p, NULL, 0); 2658 2659 BUG_ON(err > 0); 2660 if (err) { 2661 ret = err; 2662 goto done; 2663 } 2664 } 2665 if (!p->search_for_split) 2666 unlock_up(p, level, lowest_unlock, 2667 min_write_lock_level, &write_lock_level); 2668 goto done; 2669 } 2670 } 2671 ret = 1; 2672 done: 2673 /* 2674 * we don't really know what they plan on doing with the path 2675 * from here on, so for now just mark it as blocking 2676 */ 2677 if (!p->leave_spinning) 2678 btrfs_set_path_blocking(p); 2679 if (ret < 0) 2680 btrfs_release_path(p); 2681 return ret; 2682 } 2683 2684 /* 2685 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2686 * current state of the tree together with the operations recorded in the tree 2687 * modification log to search for the key in a previous version of this tree, as 2688 * denoted by the time_seq parameter. 2689 * 2690 * Naturally, there is no support for insert, delete or cow operations. 2691 * 2692 * The resulting path and return value will be set up as if we called 2693 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2694 */ 2695 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 2696 struct btrfs_path *p, u64 time_seq) 2697 { 2698 struct extent_buffer *b; 2699 int slot; 2700 int ret; 2701 int err; 2702 int level; 2703 int lowest_unlock = 1; 2704 u8 lowest_level = 0; 2705 2706 lowest_level = p->lowest_level; 2707 WARN_ON(p->nodes[0] != NULL); 2708 2709 if (p->search_commit_root) { 2710 BUG_ON(time_seq); 2711 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2712 } 2713 2714 again: 2715 b = get_old_root(root, time_seq); 2716 level = btrfs_header_level(b); 2717 p->locks[level] = BTRFS_READ_LOCK; 2718 2719 while (b) { 2720 level = btrfs_header_level(b); 2721 p->nodes[level] = b; 2722 btrfs_clear_path_blocking(p, NULL, 0); 2723 2724 /* 2725 * we have a lock on b and as long as we aren't changing 2726 * the tree, there is no way to for the items in b to change. 2727 * It is safe to drop the lock on our parent before we 2728 * go through the expensive btree search on b. 2729 */ 2730 btrfs_unlock_up_safe(p, level + 1); 2731 2732 ret = bin_search(b, key, level, &slot); 2733 2734 if (level != 0) { 2735 int dec = 0; 2736 if (ret && slot > 0) { 2737 dec = 1; 2738 slot -= 1; 2739 } 2740 p->slots[level] = slot; 2741 unlock_up(p, level, lowest_unlock, 0, NULL); 2742 2743 if (level == lowest_level) { 2744 if (dec) 2745 p->slots[level]++; 2746 goto done; 2747 } 2748 2749 err = read_block_for_search(NULL, root, p, &b, level, 2750 slot, key, time_seq); 2751 if (err == -EAGAIN) 2752 goto again; 2753 if (err) { 2754 ret = err; 2755 goto done; 2756 } 2757 2758 level = btrfs_header_level(b); 2759 err = btrfs_try_tree_read_lock(b); 2760 if (!err) { 2761 btrfs_set_path_blocking(p); 2762 btrfs_tree_read_lock(b); 2763 btrfs_clear_path_blocking(p, b, 2764 BTRFS_READ_LOCK); 2765 } 2766 p->locks[level] = BTRFS_READ_LOCK; 2767 p->nodes[level] = b; 2768 b = tree_mod_log_rewind(root->fs_info, b, time_seq); 2769 if (b != p->nodes[level]) { 2770 btrfs_tree_unlock_rw(p->nodes[level], 2771 p->locks[level]); 2772 p->locks[level] = 0; 2773 p->nodes[level] = b; 2774 } 2775 } else { 2776 p->slots[level] = slot; 2777 unlock_up(p, level, lowest_unlock, 0, NULL); 2778 goto done; 2779 } 2780 } 2781 ret = 1; 2782 done: 2783 if (!p->leave_spinning) 2784 btrfs_set_path_blocking(p); 2785 if (ret < 0) 2786 btrfs_release_path(p); 2787 2788 return ret; 2789 } 2790 2791 /* 2792 * adjust the pointers going up the tree, starting at level 2793 * making sure the right key of each node is points to 'key'. 2794 * This is used after shifting pointers to the left, so it stops 2795 * fixing up pointers when a given leaf/node is not in slot 0 of the 2796 * higher levels 2797 * 2798 */ 2799 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2800 struct btrfs_root *root, struct btrfs_path *path, 2801 struct btrfs_disk_key *key, int level) 2802 { 2803 int i; 2804 struct extent_buffer *t; 2805 2806 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2807 int tslot = path->slots[i]; 2808 if (!path->nodes[i]) 2809 break; 2810 t = path->nodes[i]; 2811 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1); 2812 btrfs_set_node_key(t, key, tslot); 2813 btrfs_mark_buffer_dirty(path->nodes[i]); 2814 if (tslot != 0) 2815 break; 2816 } 2817 } 2818 2819 /* 2820 * update item key. 2821 * 2822 * This function isn't completely safe. It's the caller's responsibility 2823 * that the new key won't break the order 2824 */ 2825 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2826 struct btrfs_root *root, struct btrfs_path *path, 2827 struct btrfs_key *new_key) 2828 { 2829 struct btrfs_disk_key disk_key; 2830 struct extent_buffer *eb; 2831 int slot; 2832 2833 eb = path->nodes[0]; 2834 slot = path->slots[0]; 2835 if (slot > 0) { 2836 btrfs_item_key(eb, &disk_key, slot - 1); 2837 BUG_ON(comp_keys(&disk_key, new_key) >= 0); 2838 } 2839 if (slot < btrfs_header_nritems(eb) - 1) { 2840 btrfs_item_key(eb, &disk_key, slot + 1); 2841 BUG_ON(comp_keys(&disk_key, new_key) <= 0); 2842 } 2843 2844 btrfs_cpu_key_to_disk(&disk_key, new_key); 2845 btrfs_set_item_key(eb, &disk_key, slot); 2846 btrfs_mark_buffer_dirty(eb); 2847 if (slot == 0) 2848 fixup_low_keys(trans, root, path, &disk_key, 1); 2849 } 2850 2851 /* 2852 * try to push data from one node into the next node left in the 2853 * tree. 2854 * 2855 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2856 * error, and > 0 if there was no room in the left hand block. 2857 */ 2858 static int push_node_left(struct btrfs_trans_handle *trans, 2859 struct btrfs_root *root, struct extent_buffer *dst, 2860 struct extent_buffer *src, int empty) 2861 { 2862 int push_items = 0; 2863 int src_nritems; 2864 int dst_nritems; 2865 int ret = 0; 2866 2867 src_nritems = btrfs_header_nritems(src); 2868 dst_nritems = btrfs_header_nritems(dst); 2869 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 2870 WARN_ON(btrfs_header_generation(src) != trans->transid); 2871 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2872 2873 if (!empty && src_nritems <= 8) 2874 return 1; 2875 2876 if (push_items <= 0) 2877 return 1; 2878 2879 if (empty) { 2880 push_items = min(src_nritems, push_items); 2881 if (push_items < src_nritems) { 2882 /* leave at least 8 pointers in the node if 2883 * we aren't going to empty it 2884 */ 2885 if (src_nritems - push_items < 8) { 2886 if (push_items <= 8) 2887 return 1; 2888 push_items -= 8; 2889 } 2890 } 2891 } else 2892 push_items = min(src_nritems - 8, push_items); 2893 2894 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, 2895 push_items); 2896 copy_extent_buffer(dst, src, 2897 btrfs_node_key_ptr_offset(dst_nritems), 2898 btrfs_node_key_ptr_offset(0), 2899 push_items * sizeof(struct btrfs_key_ptr)); 2900 2901 if (push_items < src_nritems) { 2902 tree_mod_log_eb_move(root->fs_info, src, 0, push_items, 2903 src_nritems - push_items); 2904 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 2905 btrfs_node_key_ptr_offset(push_items), 2906 (src_nritems - push_items) * 2907 sizeof(struct btrfs_key_ptr)); 2908 } 2909 btrfs_set_header_nritems(src, src_nritems - push_items); 2910 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2911 btrfs_mark_buffer_dirty(src); 2912 btrfs_mark_buffer_dirty(dst); 2913 2914 return ret; 2915 } 2916 2917 /* 2918 * try to push data from one node into the next node right in the 2919 * tree. 2920 * 2921 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2922 * error, and > 0 if there was no room in the right hand block. 2923 * 2924 * this will only push up to 1/2 the contents of the left node over 2925 */ 2926 static int balance_node_right(struct btrfs_trans_handle *trans, 2927 struct btrfs_root *root, 2928 struct extent_buffer *dst, 2929 struct extent_buffer *src) 2930 { 2931 int push_items = 0; 2932 int max_push; 2933 int src_nritems; 2934 int dst_nritems; 2935 int ret = 0; 2936 2937 WARN_ON(btrfs_header_generation(src) != trans->transid); 2938 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2939 2940 src_nritems = btrfs_header_nritems(src); 2941 dst_nritems = btrfs_header_nritems(dst); 2942 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 2943 if (push_items <= 0) 2944 return 1; 2945 2946 if (src_nritems < 4) 2947 return 1; 2948 2949 max_push = src_nritems / 2 + 1; 2950 /* don't try to empty the node */ 2951 if (max_push >= src_nritems) 2952 return 1; 2953 2954 if (max_push < push_items) 2955 push_items = max_push; 2956 2957 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); 2958 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 2959 btrfs_node_key_ptr_offset(0), 2960 (dst_nritems) * 2961 sizeof(struct btrfs_key_ptr)); 2962 2963 tree_mod_log_eb_copy(root->fs_info, dst, src, 0, 2964 src_nritems - push_items, push_items); 2965 copy_extent_buffer(dst, src, 2966 btrfs_node_key_ptr_offset(0), 2967 btrfs_node_key_ptr_offset(src_nritems - push_items), 2968 push_items * sizeof(struct btrfs_key_ptr)); 2969 2970 btrfs_set_header_nritems(src, src_nritems - push_items); 2971 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2972 2973 btrfs_mark_buffer_dirty(src); 2974 btrfs_mark_buffer_dirty(dst); 2975 2976 return ret; 2977 } 2978 2979 /* 2980 * helper function to insert a new root level in the tree. 2981 * A new node is allocated, and a single item is inserted to 2982 * point to the existing root 2983 * 2984 * returns zero on success or < 0 on failure. 2985 */ 2986 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2987 struct btrfs_root *root, 2988 struct btrfs_path *path, int level) 2989 { 2990 u64 lower_gen; 2991 struct extent_buffer *lower; 2992 struct extent_buffer *c; 2993 struct extent_buffer *old; 2994 struct btrfs_disk_key lower_key; 2995 2996 BUG_ON(path->nodes[level]); 2997 BUG_ON(path->nodes[level-1] != root->node); 2998 2999 lower = path->nodes[level-1]; 3000 if (level == 1) 3001 btrfs_item_key(lower, &lower_key, 0); 3002 else 3003 btrfs_node_key(lower, &lower_key, 0); 3004 3005 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, 3006 root->root_key.objectid, &lower_key, 3007 level, root->node->start, 0); 3008 if (IS_ERR(c)) 3009 return PTR_ERR(c); 3010 3011 root_add_used(root, root->nodesize); 3012 3013 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); 3014 btrfs_set_header_nritems(c, 1); 3015 btrfs_set_header_level(c, level); 3016 btrfs_set_header_bytenr(c, c->start); 3017 btrfs_set_header_generation(c, trans->transid); 3018 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); 3019 btrfs_set_header_owner(c, root->root_key.objectid); 3020 3021 write_extent_buffer(c, root->fs_info->fsid, 3022 (unsigned long)btrfs_header_fsid(c), 3023 BTRFS_FSID_SIZE); 3024 3025 write_extent_buffer(c, root->fs_info->chunk_tree_uuid, 3026 (unsigned long)btrfs_header_chunk_tree_uuid(c), 3027 BTRFS_UUID_SIZE); 3028 3029 btrfs_set_node_key(c, &lower_key, 0); 3030 btrfs_set_node_blockptr(c, 0, lower->start); 3031 lower_gen = btrfs_header_generation(lower); 3032 WARN_ON(lower_gen != trans->transid); 3033 3034 btrfs_set_node_ptr_generation(c, 0, lower_gen); 3035 3036 btrfs_mark_buffer_dirty(c); 3037 3038 old = root->node; 3039 tree_mod_log_set_root_pointer(root, c); 3040 rcu_assign_pointer(root->node, c); 3041 3042 /* the super has an extra ref to root->node */ 3043 free_extent_buffer(old); 3044 3045 add_root_to_dirty_list(root); 3046 extent_buffer_get(c); 3047 path->nodes[level] = c; 3048 path->locks[level] = BTRFS_WRITE_LOCK; 3049 path->slots[level] = 0; 3050 return 0; 3051 } 3052 3053 /* 3054 * worker function to insert a single pointer in a node. 3055 * the node should have enough room for the pointer already 3056 * 3057 * slot and level indicate where you want the key to go, and 3058 * blocknr is the block the key points to. 3059 */ 3060 static void insert_ptr(struct btrfs_trans_handle *trans, 3061 struct btrfs_root *root, struct btrfs_path *path, 3062 struct btrfs_disk_key *key, u64 bytenr, 3063 int slot, int level) 3064 { 3065 struct extent_buffer *lower; 3066 int nritems; 3067 int ret; 3068 3069 BUG_ON(!path->nodes[level]); 3070 btrfs_assert_tree_locked(path->nodes[level]); 3071 lower = path->nodes[level]; 3072 nritems = btrfs_header_nritems(lower); 3073 BUG_ON(slot > nritems); 3074 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); 3075 if (slot != nritems) { 3076 if (level) 3077 tree_mod_log_eb_move(root->fs_info, lower, slot + 1, 3078 slot, nritems - slot); 3079 memmove_extent_buffer(lower, 3080 btrfs_node_key_ptr_offset(slot + 1), 3081 btrfs_node_key_ptr_offset(slot), 3082 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3083 } 3084 if (level) { 3085 ret = tree_mod_log_insert_key(root->fs_info, lower, slot, 3086 MOD_LOG_KEY_ADD); 3087 BUG_ON(ret < 0); 3088 } 3089 btrfs_set_node_key(lower, key, slot); 3090 btrfs_set_node_blockptr(lower, slot, bytenr); 3091 WARN_ON(trans->transid == 0); 3092 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3093 btrfs_set_header_nritems(lower, nritems + 1); 3094 btrfs_mark_buffer_dirty(lower); 3095 } 3096 3097 /* 3098 * split the node at the specified level in path in two. 3099 * The path is corrected to point to the appropriate node after the split 3100 * 3101 * Before splitting this tries to make some room in the node by pushing 3102 * left and right, if either one works, it returns right away. 3103 * 3104 * returns 0 on success and < 0 on failure 3105 */ 3106 static noinline int split_node(struct btrfs_trans_handle *trans, 3107 struct btrfs_root *root, 3108 struct btrfs_path *path, int level) 3109 { 3110 struct extent_buffer *c; 3111 struct extent_buffer *split; 3112 struct btrfs_disk_key disk_key; 3113 int mid; 3114 int ret; 3115 u32 c_nritems; 3116 3117 c = path->nodes[level]; 3118 WARN_ON(btrfs_header_generation(c) != trans->transid); 3119 if (c == root->node) { 3120 /* trying to split the root, lets make a new one */ 3121 ret = insert_new_root(trans, root, path, level + 1); 3122 if (ret) 3123 return ret; 3124 } else { 3125 ret = push_nodes_for_insert(trans, root, path, level); 3126 c = path->nodes[level]; 3127 if (!ret && btrfs_header_nritems(c) < 3128 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) 3129 return 0; 3130 if (ret < 0) 3131 return ret; 3132 } 3133 3134 c_nritems = btrfs_header_nritems(c); 3135 mid = (c_nritems + 1) / 2; 3136 btrfs_node_key(c, &disk_key, mid); 3137 3138 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, 3139 root->root_key.objectid, 3140 &disk_key, level, c->start, 0); 3141 if (IS_ERR(split)) 3142 return PTR_ERR(split); 3143 3144 root_add_used(root, root->nodesize); 3145 3146 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); 3147 btrfs_set_header_level(split, btrfs_header_level(c)); 3148 btrfs_set_header_bytenr(split, split->start); 3149 btrfs_set_header_generation(split, trans->transid); 3150 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); 3151 btrfs_set_header_owner(split, root->root_key.objectid); 3152 write_extent_buffer(split, root->fs_info->fsid, 3153 (unsigned long)btrfs_header_fsid(split), 3154 BTRFS_FSID_SIZE); 3155 write_extent_buffer(split, root->fs_info->chunk_tree_uuid, 3156 (unsigned long)btrfs_header_chunk_tree_uuid(split), 3157 BTRFS_UUID_SIZE); 3158 3159 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); 3160 copy_extent_buffer(split, c, 3161 btrfs_node_key_ptr_offset(0), 3162 btrfs_node_key_ptr_offset(mid), 3163 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3164 btrfs_set_header_nritems(split, c_nritems - mid); 3165 btrfs_set_header_nritems(c, mid); 3166 ret = 0; 3167 3168 btrfs_mark_buffer_dirty(c); 3169 btrfs_mark_buffer_dirty(split); 3170 3171 insert_ptr(trans, root, path, &disk_key, split->start, 3172 path->slots[level + 1] + 1, level + 1); 3173 3174 if (path->slots[level] >= mid) { 3175 path->slots[level] -= mid; 3176 btrfs_tree_unlock(c); 3177 free_extent_buffer(c); 3178 path->nodes[level] = split; 3179 path->slots[level + 1] += 1; 3180 } else { 3181 btrfs_tree_unlock(split); 3182 free_extent_buffer(split); 3183 } 3184 return ret; 3185 } 3186 3187 /* 3188 * how many bytes are required to store the items in a leaf. start 3189 * and nr indicate which items in the leaf to check. This totals up the 3190 * space used both by the item structs and the item data 3191 */ 3192 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 3193 { 3194 int data_len; 3195 int nritems = btrfs_header_nritems(l); 3196 int end = min(nritems, start + nr) - 1; 3197 3198 if (!nr) 3199 return 0; 3200 data_len = btrfs_item_end_nr(l, start); 3201 data_len = data_len - btrfs_item_offset_nr(l, end); 3202 data_len += sizeof(struct btrfs_item) * nr; 3203 WARN_ON(data_len < 0); 3204 return data_len; 3205 } 3206 3207 /* 3208 * The space between the end of the leaf items and 3209 * the start of the leaf data. IOW, how much room 3210 * the leaf has left for both items and data 3211 */ 3212 noinline int btrfs_leaf_free_space(struct btrfs_root *root, 3213 struct extent_buffer *leaf) 3214 { 3215 int nritems = btrfs_header_nritems(leaf); 3216 int ret; 3217 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); 3218 if (ret < 0) { 3219 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, " 3220 "used %d nritems %d\n", 3221 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), 3222 leaf_space_used(leaf, 0, nritems), nritems); 3223 } 3224 return ret; 3225 } 3226 3227 /* 3228 * min slot controls the lowest index we're willing to push to the 3229 * right. We'll push up to and including min_slot, but no lower 3230 */ 3231 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3232 struct btrfs_root *root, 3233 struct btrfs_path *path, 3234 int data_size, int empty, 3235 struct extent_buffer *right, 3236 int free_space, u32 left_nritems, 3237 u32 min_slot) 3238 { 3239 struct extent_buffer *left = path->nodes[0]; 3240 struct extent_buffer *upper = path->nodes[1]; 3241 struct btrfs_map_token token; 3242 struct btrfs_disk_key disk_key; 3243 int slot; 3244 u32 i; 3245 int push_space = 0; 3246 int push_items = 0; 3247 struct btrfs_item *item; 3248 u32 nr; 3249 u32 right_nritems; 3250 u32 data_end; 3251 u32 this_item_size; 3252 3253 btrfs_init_map_token(&token); 3254 3255 if (empty) 3256 nr = 0; 3257 else 3258 nr = max_t(u32, 1, min_slot); 3259 3260 if (path->slots[0] >= left_nritems) 3261 push_space += data_size; 3262 3263 slot = path->slots[1]; 3264 i = left_nritems - 1; 3265 while (i >= nr) { 3266 item = btrfs_item_nr(left, i); 3267 3268 if (!empty && push_items > 0) { 3269 if (path->slots[0] > i) 3270 break; 3271 if (path->slots[0] == i) { 3272 int space = btrfs_leaf_free_space(root, left); 3273 if (space + push_space * 2 > free_space) 3274 break; 3275 } 3276 } 3277 3278 if (path->slots[0] == i) 3279 push_space += data_size; 3280 3281 this_item_size = btrfs_item_size(left, item); 3282 if (this_item_size + sizeof(*item) + push_space > free_space) 3283 break; 3284 3285 push_items++; 3286 push_space += this_item_size + sizeof(*item); 3287 if (i == 0) 3288 break; 3289 i--; 3290 } 3291 3292 if (push_items == 0) 3293 goto out_unlock; 3294 3295 if (!empty && push_items == left_nritems) 3296 WARN_ON(1); 3297 3298 /* push left to right */ 3299 right_nritems = btrfs_header_nritems(right); 3300 3301 push_space = btrfs_item_end_nr(left, left_nritems - push_items); 3302 push_space -= leaf_data_end(root, left); 3303 3304 /* make room in the right data area */ 3305 data_end = leaf_data_end(root, right); 3306 memmove_extent_buffer(right, 3307 btrfs_leaf_data(right) + data_end - push_space, 3308 btrfs_leaf_data(right) + data_end, 3309 BTRFS_LEAF_DATA_SIZE(root) - data_end); 3310 3311 /* copy from the left data area */ 3312 copy_extent_buffer(right, left, btrfs_leaf_data(right) + 3313 BTRFS_LEAF_DATA_SIZE(root) - push_space, 3314 btrfs_leaf_data(left) + leaf_data_end(root, left), 3315 push_space); 3316 3317 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), 3318 btrfs_item_nr_offset(0), 3319 right_nritems * sizeof(struct btrfs_item)); 3320 3321 /* copy the items from left to right */ 3322 copy_extent_buffer(right, left, btrfs_item_nr_offset(0), 3323 btrfs_item_nr_offset(left_nritems - push_items), 3324 push_items * sizeof(struct btrfs_item)); 3325 3326 /* update the item pointers */ 3327 right_nritems += push_items; 3328 btrfs_set_header_nritems(right, right_nritems); 3329 push_space = BTRFS_LEAF_DATA_SIZE(root); 3330 for (i = 0; i < right_nritems; i++) { 3331 item = btrfs_item_nr(right, i); 3332 push_space -= btrfs_token_item_size(right, item, &token); 3333 btrfs_set_token_item_offset(right, item, push_space, &token); 3334 } 3335 3336 left_nritems -= push_items; 3337 btrfs_set_header_nritems(left, left_nritems); 3338 3339 if (left_nritems) 3340 btrfs_mark_buffer_dirty(left); 3341 else 3342 clean_tree_block(trans, root, left); 3343 3344 btrfs_mark_buffer_dirty(right); 3345 3346 btrfs_item_key(right, &disk_key, 0); 3347 btrfs_set_node_key(upper, &disk_key, slot + 1); 3348 btrfs_mark_buffer_dirty(upper); 3349 3350 /* then fixup the leaf pointer in the path */ 3351 if (path->slots[0] >= left_nritems) { 3352 path->slots[0] -= left_nritems; 3353 if (btrfs_header_nritems(path->nodes[0]) == 0) 3354 clean_tree_block(trans, root, path->nodes[0]); 3355 btrfs_tree_unlock(path->nodes[0]); 3356 free_extent_buffer(path->nodes[0]); 3357 path->nodes[0] = right; 3358 path->slots[1] += 1; 3359 } else { 3360 btrfs_tree_unlock(right); 3361 free_extent_buffer(right); 3362 } 3363 return 0; 3364 3365 out_unlock: 3366 btrfs_tree_unlock(right); 3367 free_extent_buffer(right); 3368 return 1; 3369 } 3370 3371 /* 3372 * push some data in the path leaf to the right, trying to free up at 3373 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3374 * 3375 * returns 1 if the push failed because the other node didn't have enough 3376 * room, 0 if everything worked out and < 0 if there were major errors. 3377 * 3378 * this will push starting from min_slot to the end of the leaf. It won't 3379 * push any slot lower than min_slot 3380 */ 3381 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3382 *root, struct btrfs_path *path, 3383 int min_data_size, int data_size, 3384 int empty, u32 min_slot) 3385 { 3386 struct extent_buffer *left = path->nodes[0]; 3387 struct extent_buffer *right; 3388 struct extent_buffer *upper; 3389 int slot; 3390 int free_space; 3391 u32 left_nritems; 3392 int ret; 3393 3394 if (!path->nodes[1]) 3395 return 1; 3396 3397 slot = path->slots[1]; 3398 upper = path->nodes[1]; 3399 if (slot >= btrfs_header_nritems(upper) - 1) 3400 return 1; 3401 3402 btrfs_assert_tree_locked(path->nodes[1]); 3403 3404 right = read_node_slot(root, upper, slot + 1); 3405 if (right == NULL) 3406 return 1; 3407 3408 btrfs_tree_lock(right); 3409 btrfs_set_lock_blocking(right); 3410 3411 free_space = btrfs_leaf_free_space(root, right); 3412 if (free_space < data_size) 3413 goto out_unlock; 3414 3415 /* cow and double check */ 3416 ret = btrfs_cow_block(trans, root, right, upper, 3417 slot + 1, &right); 3418 if (ret) 3419 goto out_unlock; 3420 3421 free_space = btrfs_leaf_free_space(root, right); 3422 if (free_space < data_size) 3423 goto out_unlock; 3424 3425 left_nritems = btrfs_header_nritems(left); 3426 if (left_nritems == 0) 3427 goto out_unlock; 3428 3429 return __push_leaf_right(trans, root, path, min_data_size, empty, 3430 right, free_space, left_nritems, min_slot); 3431 out_unlock: 3432 btrfs_tree_unlock(right); 3433 free_extent_buffer(right); 3434 return 1; 3435 } 3436 3437 /* 3438 * push some data in the path leaf to the left, trying to free up at 3439 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3440 * 3441 * max_slot can put a limit on how far into the leaf we'll push items. The 3442 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3443 * items 3444 */ 3445 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3446 struct btrfs_root *root, 3447 struct btrfs_path *path, int data_size, 3448 int empty, struct extent_buffer *left, 3449 int free_space, u32 right_nritems, 3450 u32 max_slot) 3451 { 3452 struct btrfs_disk_key disk_key; 3453 struct extent_buffer *right = path->nodes[0]; 3454 int i; 3455 int push_space = 0; 3456 int push_items = 0; 3457 struct btrfs_item *item; 3458 u32 old_left_nritems; 3459 u32 nr; 3460 int ret = 0; 3461 u32 this_item_size; 3462 u32 old_left_item_size; 3463 struct btrfs_map_token token; 3464 3465 btrfs_init_map_token(&token); 3466 3467 if (empty) 3468 nr = min(right_nritems, max_slot); 3469 else 3470 nr = min(right_nritems - 1, max_slot); 3471 3472 for (i = 0; i < nr; i++) { 3473 item = btrfs_item_nr(right, i); 3474 3475 if (!empty && push_items > 0) { 3476 if (path->slots[0] < i) 3477 break; 3478 if (path->slots[0] == i) { 3479 int space = btrfs_leaf_free_space(root, right); 3480 if (space + push_space * 2 > free_space) 3481 break; 3482 } 3483 } 3484 3485 if (path->slots[0] == i) 3486 push_space += data_size; 3487 3488 this_item_size = btrfs_item_size(right, item); 3489 if (this_item_size + sizeof(*item) + push_space > free_space) 3490 break; 3491 3492 push_items++; 3493 push_space += this_item_size + sizeof(*item); 3494 } 3495 3496 if (push_items == 0) { 3497 ret = 1; 3498 goto out; 3499 } 3500 if (!empty && push_items == btrfs_header_nritems(right)) 3501 WARN_ON(1); 3502 3503 /* push data from right to left */ 3504 copy_extent_buffer(left, right, 3505 btrfs_item_nr_offset(btrfs_header_nritems(left)), 3506 btrfs_item_nr_offset(0), 3507 push_items * sizeof(struct btrfs_item)); 3508 3509 push_space = BTRFS_LEAF_DATA_SIZE(root) - 3510 btrfs_item_offset_nr(right, push_items - 1); 3511 3512 copy_extent_buffer(left, right, btrfs_leaf_data(left) + 3513 leaf_data_end(root, left) - push_space, 3514 btrfs_leaf_data(right) + 3515 btrfs_item_offset_nr(right, push_items - 1), 3516 push_space); 3517 old_left_nritems = btrfs_header_nritems(left); 3518 BUG_ON(old_left_nritems <= 0); 3519 3520 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); 3521 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3522 u32 ioff; 3523 3524 item = btrfs_item_nr(left, i); 3525 3526 ioff = btrfs_token_item_offset(left, item, &token); 3527 btrfs_set_token_item_offset(left, item, 3528 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), 3529 &token); 3530 } 3531 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3532 3533 /* fixup right node */ 3534 if (push_items > right_nritems) { 3535 printk(KERN_CRIT "push items %d nr %u\n", push_items, 3536 right_nritems); 3537 WARN_ON(1); 3538 } 3539 3540 if (push_items < right_nritems) { 3541 push_space = btrfs_item_offset_nr(right, push_items - 1) - 3542 leaf_data_end(root, right); 3543 memmove_extent_buffer(right, btrfs_leaf_data(right) + 3544 BTRFS_LEAF_DATA_SIZE(root) - push_space, 3545 btrfs_leaf_data(right) + 3546 leaf_data_end(root, right), push_space); 3547 3548 memmove_extent_buffer(right, btrfs_item_nr_offset(0), 3549 btrfs_item_nr_offset(push_items), 3550 (btrfs_header_nritems(right) - push_items) * 3551 sizeof(struct btrfs_item)); 3552 } 3553 right_nritems -= push_items; 3554 btrfs_set_header_nritems(right, right_nritems); 3555 push_space = BTRFS_LEAF_DATA_SIZE(root); 3556 for (i = 0; i < right_nritems; i++) { 3557 item = btrfs_item_nr(right, i); 3558 3559 push_space = push_space - btrfs_token_item_size(right, 3560 item, &token); 3561 btrfs_set_token_item_offset(right, item, push_space, &token); 3562 } 3563 3564 btrfs_mark_buffer_dirty(left); 3565 if (right_nritems) 3566 btrfs_mark_buffer_dirty(right); 3567 else 3568 clean_tree_block(trans, root, right); 3569 3570 btrfs_item_key(right, &disk_key, 0); 3571 fixup_low_keys(trans, root, path, &disk_key, 1); 3572 3573 /* then fixup the leaf pointer in the path */ 3574 if (path->slots[0] < push_items) { 3575 path->slots[0] += old_left_nritems; 3576 btrfs_tree_unlock(path->nodes[0]); 3577 free_extent_buffer(path->nodes[0]); 3578 path->nodes[0] = left; 3579 path->slots[1] -= 1; 3580 } else { 3581 btrfs_tree_unlock(left); 3582 free_extent_buffer(left); 3583 path->slots[0] -= push_items; 3584 } 3585 BUG_ON(path->slots[0] < 0); 3586 return ret; 3587 out: 3588 btrfs_tree_unlock(left); 3589 free_extent_buffer(left); 3590 return ret; 3591 } 3592 3593 /* 3594 * push some data in the path leaf to the left, trying to free up at 3595 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3596 * 3597 * max_slot can put a limit on how far into the leaf we'll push items. The 3598 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3599 * items 3600 */ 3601 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3602 *root, struct btrfs_path *path, int min_data_size, 3603 int data_size, int empty, u32 max_slot) 3604 { 3605 struct extent_buffer *right = path->nodes[0]; 3606 struct extent_buffer *left; 3607 int slot; 3608 int free_space; 3609 u32 right_nritems; 3610 int ret = 0; 3611 3612 slot = path->slots[1]; 3613 if (slot == 0) 3614 return 1; 3615 if (!path->nodes[1]) 3616 return 1; 3617 3618 right_nritems = btrfs_header_nritems(right); 3619 if (right_nritems == 0) 3620 return 1; 3621 3622 btrfs_assert_tree_locked(path->nodes[1]); 3623 3624 left = read_node_slot(root, path->nodes[1], slot - 1); 3625 if (left == NULL) 3626 return 1; 3627 3628 btrfs_tree_lock(left); 3629 btrfs_set_lock_blocking(left); 3630 3631 free_space = btrfs_leaf_free_space(root, left); 3632 if (free_space < data_size) { 3633 ret = 1; 3634 goto out; 3635 } 3636 3637 /* cow and double check */ 3638 ret = btrfs_cow_block(trans, root, left, 3639 path->nodes[1], slot - 1, &left); 3640 if (ret) { 3641 /* we hit -ENOSPC, but it isn't fatal here */ 3642 if (ret == -ENOSPC) 3643 ret = 1; 3644 goto out; 3645 } 3646 3647 free_space = btrfs_leaf_free_space(root, left); 3648 if (free_space < data_size) { 3649 ret = 1; 3650 goto out; 3651 } 3652 3653 return __push_leaf_left(trans, root, path, min_data_size, 3654 empty, left, free_space, right_nritems, 3655 max_slot); 3656 out: 3657 btrfs_tree_unlock(left); 3658 free_extent_buffer(left); 3659 return ret; 3660 } 3661 3662 /* 3663 * split the path's leaf in two, making sure there is at least data_size 3664 * available for the resulting leaf level of the path. 3665 */ 3666 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3667 struct btrfs_root *root, 3668 struct btrfs_path *path, 3669 struct extent_buffer *l, 3670 struct extent_buffer *right, 3671 int slot, int mid, int nritems) 3672 { 3673 int data_copy_size; 3674 int rt_data_off; 3675 int i; 3676 struct btrfs_disk_key disk_key; 3677 struct btrfs_map_token token; 3678 3679 btrfs_init_map_token(&token); 3680 3681 nritems = nritems - mid; 3682 btrfs_set_header_nritems(right, nritems); 3683 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); 3684 3685 copy_extent_buffer(right, l, btrfs_item_nr_offset(0), 3686 btrfs_item_nr_offset(mid), 3687 nritems * sizeof(struct btrfs_item)); 3688 3689 copy_extent_buffer(right, l, 3690 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - 3691 data_copy_size, btrfs_leaf_data(l) + 3692 leaf_data_end(root, l), data_copy_size); 3693 3694 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - 3695 btrfs_item_end_nr(l, mid); 3696 3697 for (i = 0; i < nritems; i++) { 3698 struct btrfs_item *item = btrfs_item_nr(right, i); 3699 u32 ioff; 3700 3701 ioff = btrfs_token_item_offset(right, item, &token); 3702 btrfs_set_token_item_offset(right, item, 3703 ioff + rt_data_off, &token); 3704 } 3705 3706 btrfs_set_header_nritems(l, mid); 3707 btrfs_item_key(right, &disk_key, 0); 3708 insert_ptr(trans, root, path, &disk_key, right->start, 3709 path->slots[1] + 1, 1); 3710 3711 btrfs_mark_buffer_dirty(right); 3712 btrfs_mark_buffer_dirty(l); 3713 BUG_ON(path->slots[0] != slot); 3714 3715 if (mid <= slot) { 3716 btrfs_tree_unlock(path->nodes[0]); 3717 free_extent_buffer(path->nodes[0]); 3718 path->nodes[0] = right; 3719 path->slots[0] -= mid; 3720 path->slots[1] += 1; 3721 } else { 3722 btrfs_tree_unlock(right); 3723 free_extent_buffer(right); 3724 } 3725 3726 BUG_ON(path->slots[0] < 0); 3727 } 3728 3729 /* 3730 * double splits happen when we need to insert a big item in the middle 3731 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3732 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3733 * A B C 3734 * 3735 * We avoid this by trying to push the items on either side of our target 3736 * into the adjacent leaves. If all goes well we can avoid the double split 3737 * completely. 3738 */ 3739 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3740 struct btrfs_root *root, 3741 struct btrfs_path *path, 3742 int data_size) 3743 { 3744 int ret; 3745 int progress = 0; 3746 int slot; 3747 u32 nritems; 3748 3749 slot = path->slots[0]; 3750 3751 /* 3752 * try to push all the items after our slot into the 3753 * right leaf 3754 */ 3755 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); 3756 if (ret < 0) 3757 return ret; 3758 3759 if (ret == 0) 3760 progress++; 3761 3762 nritems = btrfs_header_nritems(path->nodes[0]); 3763 /* 3764 * our goal is to get our slot at the start or end of a leaf. If 3765 * we've done so we're done 3766 */ 3767 if (path->slots[0] == 0 || path->slots[0] == nritems) 3768 return 0; 3769 3770 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) 3771 return 0; 3772 3773 /* try to push all the items before our slot into the next leaf */ 3774 slot = path->slots[0]; 3775 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); 3776 if (ret < 0) 3777 return ret; 3778 3779 if (ret == 0) 3780 progress++; 3781 3782 if (progress) 3783 return 0; 3784 return 1; 3785 } 3786 3787 /* 3788 * split the path's leaf in two, making sure there is at least data_size 3789 * available for the resulting leaf level of the path. 3790 * 3791 * returns 0 if all went well and < 0 on failure. 3792 */ 3793 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3794 struct btrfs_root *root, 3795 struct btrfs_key *ins_key, 3796 struct btrfs_path *path, int data_size, 3797 int extend) 3798 { 3799 struct btrfs_disk_key disk_key; 3800 struct extent_buffer *l; 3801 u32 nritems; 3802 int mid; 3803 int slot; 3804 struct extent_buffer *right; 3805 int ret = 0; 3806 int wret; 3807 int split; 3808 int num_doubles = 0; 3809 int tried_avoid_double = 0; 3810 3811 l = path->nodes[0]; 3812 slot = path->slots[0]; 3813 if (extend && data_size + btrfs_item_size_nr(l, slot) + 3814 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) 3815 return -EOVERFLOW; 3816 3817 /* first try to make some room by pushing left and right */ 3818 if (data_size) { 3819 wret = push_leaf_right(trans, root, path, data_size, 3820 data_size, 0, 0); 3821 if (wret < 0) 3822 return wret; 3823 if (wret) { 3824 wret = push_leaf_left(trans, root, path, data_size, 3825 data_size, 0, (u32)-1); 3826 if (wret < 0) 3827 return wret; 3828 } 3829 l = path->nodes[0]; 3830 3831 /* did the pushes work? */ 3832 if (btrfs_leaf_free_space(root, l) >= data_size) 3833 return 0; 3834 } 3835 3836 if (!path->nodes[1]) { 3837 ret = insert_new_root(trans, root, path, 1); 3838 if (ret) 3839 return ret; 3840 } 3841 again: 3842 split = 1; 3843 l = path->nodes[0]; 3844 slot = path->slots[0]; 3845 nritems = btrfs_header_nritems(l); 3846 mid = (nritems + 1) / 2; 3847 3848 if (mid <= slot) { 3849 if (nritems == 1 || 3850 leaf_space_used(l, mid, nritems - mid) + data_size > 3851 BTRFS_LEAF_DATA_SIZE(root)) { 3852 if (slot >= nritems) { 3853 split = 0; 3854 } else { 3855 mid = slot; 3856 if (mid != nritems && 3857 leaf_space_used(l, mid, nritems - mid) + 3858 data_size > BTRFS_LEAF_DATA_SIZE(root)) { 3859 if (data_size && !tried_avoid_double) 3860 goto push_for_double; 3861 split = 2; 3862 } 3863 } 3864 } 3865 } else { 3866 if (leaf_space_used(l, 0, mid) + data_size > 3867 BTRFS_LEAF_DATA_SIZE(root)) { 3868 if (!extend && data_size && slot == 0) { 3869 split = 0; 3870 } else if ((extend || !data_size) && slot == 0) { 3871 mid = 1; 3872 } else { 3873 mid = slot; 3874 if (mid != nritems && 3875 leaf_space_used(l, mid, nritems - mid) + 3876 data_size > BTRFS_LEAF_DATA_SIZE(root)) { 3877 if (data_size && !tried_avoid_double) 3878 goto push_for_double; 3879 split = 2 ; 3880 } 3881 } 3882 } 3883 } 3884 3885 if (split == 0) 3886 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3887 else 3888 btrfs_item_key(l, &disk_key, mid); 3889 3890 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, 3891 root->root_key.objectid, 3892 &disk_key, 0, l->start, 0); 3893 if (IS_ERR(right)) 3894 return PTR_ERR(right); 3895 3896 root_add_used(root, root->leafsize); 3897 3898 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); 3899 btrfs_set_header_bytenr(right, right->start); 3900 btrfs_set_header_generation(right, trans->transid); 3901 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); 3902 btrfs_set_header_owner(right, root->root_key.objectid); 3903 btrfs_set_header_level(right, 0); 3904 write_extent_buffer(right, root->fs_info->fsid, 3905 (unsigned long)btrfs_header_fsid(right), 3906 BTRFS_FSID_SIZE); 3907 3908 write_extent_buffer(right, root->fs_info->chunk_tree_uuid, 3909 (unsigned long)btrfs_header_chunk_tree_uuid(right), 3910 BTRFS_UUID_SIZE); 3911 3912 if (split == 0) { 3913 if (mid <= slot) { 3914 btrfs_set_header_nritems(right, 0); 3915 insert_ptr(trans, root, path, &disk_key, right->start, 3916 path->slots[1] + 1, 1); 3917 btrfs_tree_unlock(path->nodes[0]); 3918 free_extent_buffer(path->nodes[0]); 3919 path->nodes[0] = right; 3920 path->slots[0] = 0; 3921 path->slots[1] += 1; 3922 } else { 3923 btrfs_set_header_nritems(right, 0); 3924 insert_ptr(trans, root, path, &disk_key, right->start, 3925 path->slots[1], 1); 3926 btrfs_tree_unlock(path->nodes[0]); 3927 free_extent_buffer(path->nodes[0]); 3928 path->nodes[0] = right; 3929 path->slots[0] = 0; 3930 if (path->slots[1] == 0) 3931 fixup_low_keys(trans, root, path, 3932 &disk_key, 1); 3933 } 3934 btrfs_mark_buffer_dirty(right); 3935 return ret; 3936 } 3937 3938 copy_for_split(trans, root, path, l, right, slot, mid, nritems); 3939 3940 if (split == 2) { 3941 BUG_ON(num_doubles != 0); 3942 num_doubles++; 3943 goto again; 3944 } 3945 3946 return 0; 3947 3948 push_for_double: 3949 push_for_double_split(trans, root, path, data_size); 3950 tried_avoid_double = 1; 3951 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) 3952 return 0; 3953 goto again; 3954 } 3955 3956 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3957 struct btrfs_root *root, 3958 struct btrfs_path *path, int ins_len) 3959 { 3960 struct btrfs_key key; 3961 struct extent_buffer *leaf; 3962 struct btrfs_file_extent_item *fi; 3963 u64 extent_len = 0; 3964 u32 item_size; 3965 int ret; 3966 3967 leaf = path->nodes[0]; 3968 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3969 3970 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3971 key.type != BTRFS_EXTENT_CSUM_KEY); 3972 3973 if (btrfs_leaf_free_space(root, leaf) >= ins_len) 3974 return 0; 3975 3976 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 3977 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3978 fi = btrfs_item_ptr(leaf, path->slots[0], 3979 struct btrfs_file_extent_item); 3980 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3981 } 3982 btrfs_release_path(path); 3983 3984 path->keep_locks = 1; 3985 path->search_for_split = 1; 3986 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3987 path->search_for_split = 0; 3988 if (ret < 0) 3989 goto err; 3990 3991 ret = -EAGAIN; 3992 leaf = path->nodes[0]; 3993 /* if our item isn't there or got smaller, return now */ 3994 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) 3995 goto err; 3996 3997 /* the leaf has changed, it now has room. return now */ 3998 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) 3999 goto err; 4000 4001 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4002 fi = btrfs_item_ptr(leaf, path->slots[0], 4003 struct btrfs_file_extent_item); 4004 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 4005 goto err; 4006 } 4007 4008 btrfs_set_path_blocking(path); 4009 ret = split_leaf(trans, root, &key, path, ins_len, 1); 4010 if (ret) 4011 goto err; 4012 4013 path->keep_locks = 0; 4014 btrfs_unlock_up_safe(path, 1); 4015 return 0; 4016 err: 4017 path->keep_locks = 0; 4018 return ret; 4019 } 4020 4021 static noinline int split_item(struct btrfs_trans_handle *trans, 4022 struct btrfs_root *root, 4023 struct btrfs_path *path, 4024 struct btrfs_key *new_key, 4025 unsigned long split_offset) 4026 { 4027 struct extent_buffer *leaf; 4028 struct btrfs_item *item; 4029 struct btrfs_item *new_item; 4030 int slot; 4031 char *buf; 4032 u32 nritems; 4033 u32 item_size; 4034 u32 orig_offset; 4035 struct btrfs_disk_key disk_key; 4036 4037 leaf = path->nodes[0]; 4038 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); 4039 4040 btrfs_set_path_blocking(path); 4041 4042 item = btrfs_item_nr(leaf, path->slots[0]); 4043 orig_offset = btrfs_item_offset(leaf, item); 4044 item_size = btrfs_item_size(leaf, item); 4045 4046 buf = kmalloc(item_size, GFP_NOFS); 4047 if (!buf) 4048 return -ENOMEM; 4049 4050 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 4051 path->slots[0]), item_size); 4052 4053 slot = path->slots[0] + 1; 4054 nritems = btrfs_header_nritems(leaf); 4055 if (slot != nritems) { 4056 /* shift the items */ 4057 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), 4058 btrfs_item_nr_offset(slot), 4059 (nritems - slot) * sizeof(struct btrfs_item)); 4060 } 4061 4062 btrfs_cpu_key_to_disk(&disk_key, new_key); 4063 btrfs_set_item_key(leaf, &disk_key, slot); 4064 4065 new_item = btrfs_item_nr(leaf, slot); 4066 4067 btrfs_set_item_offset(leaf, new_item, orig_offset); 4068 btrfs_set_item_size(leaf, new_item, item_size - split_offset); 4069 4070 btrfs_set_item_offset(leaf, item, 4071 orig_offset + item_size - split_offset); 4072 btrfs_set_item_size(leaf, item, split_offset); 4073 4074 btrfs_set_header_nritems(leaf, nritems + 1); 4075 4076 /* write the data for the start of the original item */ 4077 write_extent_buffer(leaf, buf, 4078 btrfs_item_ptr_offset(leaf, path->slots[0]), 4079 split_offset); 4080 4081 /* write the data for the new item */ 4082 write_extent_buffer(leaf, buf + split_offset, 4083 btrfs_item_ptr_offset(leaf, slot), 4084 item_size - split_offset); 4085 btrfs_mark_buffer_dirty(leaf); 4086 4087 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); 4088 kfree(buf); 4089 return 0; 4090 } 4091 4092 /* 4093 * This function splits a single item into two items, 4094 * giving 'new_key' to the new item and splitting the 4095 * old one at split_offset (from the start of the item). 4096 * 4097 * The path may be released by this operation. After 4098 * the split, the path is pointing to the old item. The 4099 * new item is going to be in the same node as the old one. 4100 * 4101 * Note, the item being split must be smaller enough to live alone on 4102 * a tree block with room for one extra struct btrfs_item 4103 * 4104 * This allows us to split the item in place, keeping a lock on the 4105 * leaf the entire time. 4106 */ 4107 int btrfs_split_item(struct btrfs_trans_handle *trans, 4108 struct btrfs_root *root, 4109 struct btrfs_path *path, 4110 struct btrfs_key *new_key, 4111 unsigned long split_offset) 4112 { 4113 int ret; 4114 ret = setup_leaf_for_split(trans, root, path, 4115 sizeof(struct btrfs_item)); 4116 if (ret) 4117 return ret; 4118 4119 ret = split_item(trans, root, path, new_key, split_offset); 4120 return ret; 4121 } 4122 4123 /* 4124 * This function duplicate a item, giving 'new_key' to the new item. 4125 * It guarantees both items live in the same tree leaf and the new item 4126 * is contiguous with the original item. 4127 * 4128 * This allows us to split file extent in place, keeping a lock on the 4129 * leaf the entire time. 4130 */ 4131 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4132 struct btrfs_root *root, 4133 struct btrfs_path *path, 4134 struct btrfs_key *new_key) 4135 { 4136 struct extent_buffer *leaf; 4137 int ret; 4138 u32 item_size; 4139 4140 leaf = path->nodes[0]; 4141 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4142 ret = setup_leaf_for_split(trans, root, path, 4143 item_size + sizeof(struct btrfs_item)); 4144 if (ret) 4145 return ret; 4146 4147 path->slots[0]++; 4148 setup_items_for_insert(trans, root, path, new_key, &item_size, 4149 item_size, item_size + 4150 sizeof(struct btrfs_item), 1); 4151 leaf = path->nodes[0]; 4152 memcpy_extent_buffer(leaf, 4153 btrfs_item_ptr_offset(leaf, path->slots[0]), 4154 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4155 item_size); 4156 return 0; 4157 } 4158 4159 /* 4160 * make the item pointed to by the path smaller. new_size indicates 4161 * how small to make it, and from_end tells us if we just chop bytes 4162 * off the end of the item or if we shift the item to chop bytes off 4163 * the front. 4164 */ 4165 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 4166 struct btrfs_root *root, 4167 struct btrfs_path *path, 4168 u32 new_size, int from_end) 4169 { 4170 int slot; 4171 struct extent_buffer *leaf; 4172 struct btrfs_item *item; 4173 u32 nritems; 4174 unsigned int data_end; 4175 unsigned int old_data_start; 4176 unsigned int old_size; 4177 unsigned int size_diff; 4178 int i; 4179 struct btrfs_map_token token; 4180 4181 btrfs_init_map_token(&token); 4182 4183 leaf = path->nodes[0]; 4184 slot = path->slots[0]; 4185 4186 old_size = btrfs_item_size_nr(leaf, slot); 4187 if (old_size == new_size) 4188 return; 4189 4190 nritems = btrfs_header_nritems(leaf); 4191 data_end = leaf_data_end(root, leaf); 4192 4193 old_data_start = btrfs_item_offset_nr(leaf, slot); 4194 4195 size_diff = old_size - new_size; 4196 4197 BUG_ON(slot < 0); 4198 BUG_ON(slot >= nritems); 4199 4200 /* 4201 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4202 */ 4203 /* first correct the data pointers */ 4204 for (i = slot; i < nritems; i++) { 4205 u32 ioff; 4206 item = btrfs_item_nr(leaf, i); 4207 4208 ioff = btrfs_token_item_offset(leaf, item, &token); 4209 btrfs_set_token_item_offset(leaf, item, 4210 ioff + size_diff, &token); 4211 } 4212 4213 /* shift the data */ 4214 if (from_end) { 4215 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4216 data_end + size_diff, btrfs_leaf_data(leaf) + 4217 data_end, old_data_start + new_size - data_end); 4218 } else { 4219 struct btrfs_disk_key disk_key; 4220 u64 offset; 4221 4222 btrfs_item_key(leaf, &disk_key, slot); 4223 4224 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4225 unsigned long ptr; 4226 struct btrfs_file_extent_item *fi; 4227 4228 fi = btrfs_item_ptr(leaf, slot, 4229 struct btrfs_file_extent_item); 4230 fi = (struct btrfs_file_extent_item *)( 4231 (unsigned long)fi - size_diff); 4232 4233 if (btrfs_file_extent_type(leaf, fi) == 4234 BTRFS_FILE_EXTENT_INLINE) { 4235 ptr = btrfs_item_ptr_offset(leaf, slot); 4236 memmove_extent_buffer(leaf, ptr, 4237 (unsigned long)fi, 4238 offsetof(struct btrfs_file_extent_item, 4239 disk_bytenr)); 4240 } 4241 } 4242 4243 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4244 data_end + size_diff, btrfs_leaf_data(leaf) + 4245 data_end, old_data_start - data_end); 4246 4247 offset = btrfs_disk_key_offset(&disk_key); 4248 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4249 btrfs_set_item_key(leaf, &disk_key, slot); 4250 if (slot == 0) 4251 fixup_low_keys(trans, root, path, &disk_key, 1); 4252 } 4253 4254 item = btrfs_item_nr(leaf, slot); 4255 btrfs_set_item_size(leaf, item, new_size); 4256 btrfs_mark_buffer_dirty(leaf); 4257 4258 if (btrfs_leaf_free_space(root, leaf) < 0) { 4259 btrfs_print_leaf(root, leaf); 4260 BUG(); 4261 } 4262 } 4263 4264 /* 4265 * make the item pointed to by the path bigger, data_size is the new size. 4266 */ 4267 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4268 struct btrfs_root *root, struct btrfs_path *path, 4269 u32 data_size) 4270 { 4271 int slot; 4272 struct extent_buffer *leaf; 4273 struct btrfs_item *item; 4274 u32 nritems; 4275 unsigned int data_end; 4276 unsigned int old_data; 4277 unsigned int old_size; 4278 int i; 4279 struct btrfs_map_token token; 4280 4281 btrfs_init_map_token(&token); 4282 4283 leaf = path->nodes[0]; 4284 4285 nritems = btrfs_header_nritems(leaf); 4286 data_end = leaf_data_end(root, leaf); 4287 4288 if (btrfs_leaf_free_space(root, leaf) < data_size) { 4289 btrfs_print_leaf(root, leaf); 4290 BUG(); 4291 } 4292 slot = path->slots[0]; 4293 old_data = btrfs_item_end_nr(leaf, slot); 4294 4295 BUG_ON(slot < 0); 4296 if (slot >= nritems) { 4297 btrfs_print_leaf(root, leaf); 4298 printk(KERN_CRIT "slot %d too large, nritems %d\n", 4299 slot, nritems); 4300 BUG_ON(1); 4301 } 4302 4303 /* 4304 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4305 */ 4306 /* first correct the data pointers */ 4307 for (i = slot; i < nritems; i++) { 4308 u32 ioff; 4309 item = btrfs_item_nr(leaf, i); 4310 4311 ioff = btrfs_token_item_offset(leaf, item, &token); 4312 btrfs_set_token_item_offset(leaf, item, 4313 ioff - data_size, &token); 4314 } 4315 4316 /* shift the data */ 4317 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4318 data_end - data_size, btrfs_leaf_data(leaf) + 4319 data_end, old_data - data_end); 4320 4321 data_end = old_data; 4322 old_size = btrfs_item_size_nr(leaf, slot); 4323 item = btrfs_item_nr(leaf, slot); 4324 btrfs_set_item_size(leaf, item, old_size + data_size); 4325 btrfs_mark_buffer_dirty(leaf); 4326 4327 if (btrfs_leaf_free_space(root, leaf) < 0) { 4328 btrfs_print_leaf(root, leaf); 4329 BUG(); 4330 } 4331 } 4332 4333 /* 4334 * Given a key and some data, insert items into the tree. 4335 * This does all the path init required, making room in the tree if needed. 4336 * Returns the number of keys that were inserted. 4337 */ 4338 int btrfs_insert_some_items(struct btrfs_trans_handle *trans, 4339 struct btrfs_root *root, 4340 struct btrfs_path *path, 4341 struct btrfs_key *cpu_key, u32 *data_size, 4342 int nr) 4343 { 4344 struct extent_buffer *leaf; 4345 struct btrfs_item *item; 4346 int ret = 0; 4347 int slot; 4348 int i; 4349 u32 nritems; 4350 u32 total_data = 0; 4351 u32 total_size = 0; 4352 unsigned int data_end; 4353 struct btrfs_disk_key disk_key; 4354 struct btrfs_key found_key; 4355 struct btrfs_map_token token; 4356 4357 btrfs_init_map_token(&token); 4358 4359 for (i = 0; i < nr; i++) { 4360 if (total_size + data_size[i] + sizeof(struct btrfs_item) > 4361 BTRFS_LEAF_DATA_SIZE(root)) { 4362 break; 4363 nr = i; 4364 } 4365 total_data += data_size[i]; 4366 total_size += data_size[i] + sizeof(struct btrfs_item); 4367 } 4368 BUG_ON(nr == 0); 4369 4370 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); 4371 if (ret == 0) 4372 return -EEXIST; 4373 if (ret < 0) 4374 goto out; 4375 4376 leaf = path->nodes[0]; 4377 4378 nritems = btrfs_header_nritems(leaf); 4379 data_end = leaf_data_end(root, leaf); 4380 4381 if (btrfs_leaf_free_space(root, leaf) < total_size) { 4382 for (i = nr; i >= 0; i--) { 4383 total_data -= data_size[i]; 4384 total_size -= data_size[i] + sizeof(struct btrfs_item); 4385 if (total_size < btrfs_leaf_free_space(root, leaf)) 4386 break; 4387 } 4388 nr = i; 4389 } 4390 4391 slot = path->slots[0]; 4392 BUG_ON(slot < 0); 4393 4394 if (slot != nritems) { 4395 unsigned int old_data = btrfs_item_end_nr(leaf, slot); 4396 4397 item = btrfs_item_nr(leaf, slot); 4398 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4399 4400 /* figure out how many keys we can insert in here */ 4401 total_data = data_size[0]; 4402 for (i = 1; i < nr; i++) { 4403 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0) 4404 break; 4405 total_data += data_size[i]; 4406 } 4407 nr = i; 4408 4409 if (old_data < data_end) { 4410 btrfs_print_leaf(root, leaf); 4411 printk(KERN_CRIT "slot %d old_data %d data_end %d\n", 4412 slot, old_data, data_end); 4413 BUG_ON(1); 4414 } 4415 /* 4416 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4417 */ 4418 /* first correct the data pointers */ 4419 for (i = slot; i < nritems; i++) { 4420 u32 ioff; 4421 4422 item = btrfs_item_nr(leaf, i); 4423 ioff = btrfs_token_item_offset(leaf, item, &token); 4424 btrfs_set_token_item_offset(leaf, item, 4425 ioff - total_data, &token); 4426 } 4427 /* shift the items */ 4428 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 4429 btrfs_item_nr_offset(slot), 4430 (nritems - slot) * sizeof(struct btrfs_item)); 4431 4432 /* shift the data */ 4433 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4434 data_end - total_data, btrfs_leaf_data(leaf) + 4435 data_end, old_data - data_end); 4436 data_end = old_data; 4437 } else { 4438 /* 4439 * this sucks but it has to be done, if we are inserting at 4440 * the end of the leaf only insert 1 of the items, since we 4441 * have no way of knowing whats on the next leaf and we'd have 4442 * to drop our current locks to figure it out 4443 */ 4444 nr = 1; 4445 } 4446 4447 /* setup the item for the new data */ 4448 for (i = 0; i < nr; i++) { 4449 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 4450 btrfs_set_item_key(leaf, &disk_key, slot + i); 4451 item = btrfs_item_nr(leaf, slot + i); 4452 btrfs_set_token_item_offset(leaf, item, 4453 data_end - data_size[i], &token); 4454 data_end -= data_size[i]; 4455 btrfs_set_token_item_size(leaf, item, data_size[i], &token); 4456 } 4457 btrfs_set_header_nritems(leaf, nritems + nr); 4458 btrfs_mark_buffer_dirty(leaf); 4459 4460 ret = 0; 4461 if (slot == 0) { 4462 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 4463 fixup_low_keys(trans, root, path, &disk_key, 1); 4464 } 4465 4466 if (btrfs_leaf_free_space(root, leaf) < 0) { 4467 btrfs_print_leaf(root, leaf); 4468 BUG(); 4469 } 4470 out: 4471 if (!ret) 4472 ret = nr; 4473 return ret; 4474 } 4475 4476 /* 4477 * this is a helper for btrfs_insert_empty_items, the main goal here is 4478 * to save stack depth by doing the bulk of the work in a function 4479 * that doesn't call btrfs_search_slot 4480 */ 4481 void setup_items_for_insert(struct btrfs_trans_handle *trans, 4482 struct btrfs_root *root, struct btrfs_path *path, 4483 struct btrfs_key *cpu_key, u32 *data_size, 4484 u32 total_data, u32 total_size, int nr) 4485 { 4486 struct btrfs_item *item; 4487 int i; 4488 u32 nritems; 4489 unsigned int data_end; 4490 struct btrfs_disk_key disk_key; 4491 struct extent_buffer *leaf; 4492 int slot; 4493 struct btrfs_map_token token; 4494 4495 btrfs_init_map_token(&token); 4496 4497 leaf = path->nodes[0]; 4498 slot = path->slots[0]; 4499 4500 nritems = btrfs_header_nritems(leaf); 4501 data_end = leaf_data_end(root, leaf); 4502 4503 if (btrfs_leaf_free_space(root, leaf) < total_size) { 4504 btrfs_print_leaf(root, leaf); 4505 printk(KERN_CRIT "not enough freespace need %u have %d\n", 4506 total_size, btrfs_leaf_free_space(root, leaf)); 4507 BUG(); 4508 } 4509 4510 if (slot != nritems) { 4511 unsigned int old_data = btrfs_item_end_nr(leaf, slot); 4512 4513 if (old_data < data_end) { 4514 btrfs_print_leaf(root, leaf); 4515 printk(KERN_CRIT "slot %d old_data %d data_end %d\n", 4516 slot, old_data, data_end); 4517 BUG_ON(1); 4518 } 4519 /* 4520 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4521 */ 4522 /* first correct the data pointers */ 4523 for (i = slot; i < nritems; i++) { 4524 u32 ioff; 4525 4526 item = btrfs_item_nr(leaf, i); 4527 ioff = btrfs_token_item_offset(leaf, item, &token); 4528 btrfs_set_token_item_offset(leaf, item, 4529 ioff - total_data, &token); 4530 } 4531 /* shift the items */ 4532 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 4533 btrfs_item_nr_offset(slot), 4534 (nritems - slot) * sizeof(struct btrfs_item)); 4535 4536 /* shift the data */ 4537 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4538 data_end - total_data, btrfs_leaf_data(leaf) + 4539 data_end, old_data - data_end); 4540 data_end = old_data; 4541 } 4542 4543 /* setup the item for the new data */ 4544 for (i = 0; i < nr; i++) { 4545 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 4546 btrfs_set_item_key(leaf, &disk_key, slot + i); 4547 item = btrfs_item_nr(leaf, slot + i); 4548 btrfs_set_token_item_offset(leaf, item, 4549 data_end - data_size[i], &token); 4550 data_end -= data_size[i]; 4551 btrfs_set_token_item_size(leaf, item, data_size[i], &token); 4552 } 4553 4554 btrfs_set_header_nritems(leaf, nritems + nr); 4555 4556 if (slot == 0) { 4557 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 4558 fixup_low_keys(trans, root, path, &disk_key, 1); 4559 } 4560 btrfs_unlock_up_safe(path, 1); 4561 btrfs_mark_buffer_dirty(leaf); 4562 4563 if (btrfs_leaf_free_space(root, leaf) < 0) { 4564 btrfs_print_leaf(root, leaf); 4565 BUG(); 4566 } 4567 } 4568 4569 /* 4570 * Given a key and some data, insert items into the tree. 4571 * This does all the path init required, making room in the tree if needed. 4572 */ 4573 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4574 struct btrfs_root *root, 4575 struct btrfs_path *path, 4576 struct btrfs_key *cpu_key, u32 *data_size, 4577 int nr) 4578 { 4579 int ret = 0; 4580 int slot; 4581 int i; 4582 u32 total_size = 0; 4583 u32 total_data = 0; 4584 4585 for (i = 0; i < nr; i++) 4586 total_data += data_size[i]; 4587 4588 total_size = total_data + (nr * sizeof(struct btrfs_item)); 4589 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); 4590 if (ret == 0) 4591 return -EEXIST; 4592 if (ret < 0) 4593 return ret; 4594 4595 slot = path->slots[0]; 4596 BUG_ON(slot < 0); 4597 4598 setup_items_for_insert(trans, root, path, cpu_key, data_size, 4599 total_data, total_size, nr); 4600 return 0; 4601 } 4602 4603 /* 4604 * Given a key and some data, insert an item into the tree. 4605 * This does all the path init required, making room in the tree if needed. 4606 */ 4607 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 4608 *root, struct btrfs_key *cpu_key, void *data, u32 4609 data_size) 4610 { 4611 int ret = 0; 4612 struct btrfs_path *path; 4613 struct extent_buffer *leaf; 4614 unsigned long ptr; 4615 4616 path = btrfs_alloc_path(); 4617 if (!path) 4618 return -ENOMEM; 4619 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4620 if (!ret) { 4621 leaf = path->nodes[0]; 4622 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4623 write_extent_buffer(leaf, data, ptr, data_size); 4624 btrfs_mark_buffer_dirty(leaf); 4625 } 4626 btrfs_free_path(path); 4627 return ret; 4628 } 4629 4630 /* 4631 * delete the pointer from a given node. 4632 * 4633 * the tree should have been previously balanced so the deletion does not 4634 * empty a node. 4635 */ 4636 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4637 struct btrfs_path *path, int level, int slot, 4638 int tree_mod_log) 4639 { 4640 struct extent_buffer *parent = path->nodes[level]; 4641 u32 nritems; 4642 int ret; 4643 4644 nritems = btrfs_header_nritems(parent); 4645 if (slot != nritems - 1) { 4646 if (tree_mod_log && level) 4647 tree_mod_log_eb_move(root->fs_info, parent, slot, 4648 slot + 1, nritems - slot - 1); 4649 memmove_extent_buffer(parent, 4650 btrfs_node_key_ptr_offset(slot), 4651 btrfs_node_key_ptr_offset(slot + 1), 4652 sizeof(struct btrfs_key_ptr) * 4653 (nritems - slot - 1)); 4654 } else if (tree_mod_log && level) { 4655 ret = tree_mod_log_insert_key(root->fs_info, parent, slot, 4656 MOD_LOG_KEY_REMOVE); 4657 BUG_ON(ret < 0); 4658 } 4659 4660 nritems--; 4661 btrfs_set_header_nritems(parent, nritems); 4662 if (nritems == 0 && parent == root->node) { 4663 BUG_ON(btrfs_header_level(root->node) != 1); 4664 /* just turn the root into a leaf and break */ 4665 btrfs_set_header_level(root->node, 0); 4666 } else if (slot == 0) { 4667 struct btrfs_disk_key disk_key; 4668 4669 btrfs_node_key(parent, &disk_key, 0); 4670 fixup_low_keys(trans, root, path, &disk_key, level + 1); 4671 } 4672 btrfs_mark_buffer_dirty(parent); 4673 } 4674 4675 /* 4676 * a helper function to delete the leaf pointed to by path->slots[1] and 4677 * path->nodes[1]. 4678 * 4679 * This deletes the pointer in path->nodes[1] and frees the leaf 4680 * block extent. zero is returned if it all worked out, < 0 otherwise. 4681 * 4682 * The path must have already been setup for deleting the leaf, including 4683 * all the proper balancing. path->nodes[1] must be locked. 4684 */ 4685 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4686 struct btrfs_root *root, 4687 struct btrfs_path *path, 4688 struct extent_buffer *leaf) 4689 { 4690 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4691 del_ptr(trans, root, path, 1, path->slots[1], 1); 4692 4693 /* 4694 * btrfs_free_extent is expensive, we want to make sure we 4695 * aren't holding any locks when we call it 4696 */ 4697 btrfs_unlock_up_safe(path, 0); 4698 4699 root_sub_used(root, leaf->len); 4700 4701 extent_buffer_get(leaf); 4702 btrfs_free_tree_block(trans, root, leaf, 0, 1); 4703 free_extent_buffer_stale(leaf); 4704 } 4705 /* 4706 * delete the item at the leaf level in path. If that empties 4707 * the leaf, remove it from the tree 4708 */ 4709 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4710 struct btrfs_path *path, int slot, int nr) 4711 { 4712 struct extent_buffer *leaf; 4713 struct btrfs_item *item; 4714 int last_off; 4715 int dsize = 0; 4716 int ret = 0; 4717 int wret; 4718 int i; 4719 u32 nritems; 4720 struct btrfs_map_token token; 4721 4722 btrfs_init_map_token(&token); 4723 4724 leaf = path->nodes[0]; 4725 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); 4726 4727 for (i = 0; i < nr; i++) 4728 dsize += btrfs_item_size_nr(leaf, slot + i); 4729 4730 nritems = btrfs_header_nritems(leaf); 4731 4732 if (slot + nr != nritems) { 4733 int data_end = leaf_data_end(root, leaf); 4734 4735 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4736 data_end + dsize, 4737 btrfs_leaf_data(leaf) + data_end, 4738 last_off - data_end); 4739 4740 for (i = slot + nr; i < nritems; i++) { 4741 u32 ioff; 4742 4743 item = btrfs_item_nr(leaf, i); 4744 ioff = btrfs_token_item_offset(leaf, item, &token); 4745 btrfs_set_token_item_offset(leaf, item, 4746 ioff + dsize, &token); 4747 } 4748 4749 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 4750 btrfs_item_nr_offset(slot + nr), 4751 sizeof(struct btrfs_item) * 4752 (nritems - slot - nr)); 4753 } 4754 btrfs_set_header_nritems(leaf, nritems - nr); 4755 nritems -= nr; 4756 4757 /* delete the leaf if we've emptied it */ 4758 if (nritems == 0) { 4759 if (leaf == root->node) { 4760 btrfs_set_header_level(leaf, 0); 4761 } else { 4762 btrfs_set_path_blocking(path); 4763 clean_tree_block(trans, root, leaf); 4764 btrfs_del_leaf(trans, root, path, leaf); 4765 } 4766 } else { 4767 int used = leaf_space_used(leaf, 0, nritems); 4768 if (slot == 0) { 4769 struct btrfs_disk_key disk_key; 4770 4771 btrfs_item_key(leaf, &disk_key, 0); 4772 fixup_low_keys(trans, root, path, &disk_key, 1); 4773 } 4774 4775 /* delete the leaf if it is mostly empty */ 4776 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { 4777 /* push_leaf_left fixes the path. 4778 * make sure the path still points to our leaf 4779 * for possible call to del_ptr below 4780 */ 4781 slot = path->slots[1]; 4782 extent_buffer_get(leaf); 4783 4784 btrfs_set_path_blocking(path); 4785 wret = push_leaf_left(trans, root, path, 1, 1, 4786 1, (u32)-1); 4787 if (wret < 0 && wret != -ENOSPC) 4788 ret = wret; 4789 4790 if (path->nodes[0] == leaf && 4791 btrfs_header_nritems(leaf)) { 4792 wret = push_leaf_right(trans, root, path, 1, 4793 1, 1, 0); 4794 if (wret < 0 && wret != -ENOSPC) 4795 ret = wret; 4796 } 4797 4798 if (btrfs_header_nritems(leaf) == 0) { 4799 path->slots[1] = slot; 4800 btrfs_del_leaf(trans, root, path, leaf); 4801 free_extent_buffer(leaf); 4802 ret = 0; 4803 } else { 4804 /* if we're still in the path, make sure 4805 * we're dirty. Otherwise, one of the 4806 * push_leaf functions must have already 4807 * dirtied this buffer 4808 */ 4809 if (path->nodes[0] == leaf) 4810 btrfs_mark_buffer_dirty(leaf); 4811 free_extent_buffer(leaf); 4812 } 4813 } else { 4814 btrfs_mark_buffer_dirty(leaf); 4815 } 4816 } 4817 return ret; 4818 } 4819 4820 /* 4821 * search the tree again to find a leaf with lesser keys 4822 * returns 0 if it found something or 1 if there are no lesser leaves. 4823 * returns < 0 on io errors. 4824 * 4825 * This may release the path, and so you may lose any locks held at the 4826 * time you call it. 4827 */ 4828 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 4829 { 4830 struct btrfs_key key; 4831 struct btrfs_disk_key found_key; 4832 int ret; 4833 4834 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 4835 4836 if (key.offset > 0) 4837 key.offset--; 4838 else if (key.type > 0) 4839 key.type--; 4840 else if (key.objectid > 0) 4841 key.objectid--; 4842 else 4843 return 1; 4844 4845 btrfs_release_path(path); 4846 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4847 if (ret < 0) 4848 return ret; 4849 btrfs_item_key(path->nodes[0], &found_key, 0); 4850 ret = comp_keys(&found_key, &key); 4851 if (ret < 0) 4852 return 0; 4853 return 1; 4854 } 4855 4856 /* 4857 * A helper function to walk down the tree starting at min_key, and looking 4858 * for nodes or leaves that are either in cache or have a minimum 4859 * transaction id. This is used by the btree defrag code, and tree logging 4860 * 4861 * This does not cow, but it does stuff the starting key it finds back 4862 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4863 * key and get a writable path. 4864 * 4865 * This does lock as it descends, and path->keep_locks should be set 4866 * to 1 by the caller. 4867 * 4868 * This honors path->lowest_level to prevent descent past a given level 4869 * of the tree. 4870 * 4871 * min_trans indicates the oldest transaction that you are interested 4872 * in walking through. Any nodes or leaves older than min_trans are 4873 * skipped over (without reading them). 4874 * 4875 * returns zero if something useful was found, < 0 on error and 1 if there 4876 * was nothing in the tree that matched the search criteria. 4877 */ 4878 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4879 struct btrfs_key *max_key, 4880 struct btrfs_path *path, int cache_only, 4881 u64 min_trans) 4882 { 4883 struct extent_buffer *cur; 4884 struct btrfs_key found_key; 4885 int slot; 4886 int sret; 4887 u32 nritems; 4888 int level; 4889 int ret = 1; 4890 4891 WARN_ON(!path->keep_locks); 4892 again: 4893 cur = btrfs_read_lock_root_node(root); 4894 level = btrfs_header_level(cur); 4895 WARN_ON(path->nodes[level]); 4896 path->nodes[level] = cur; 4897 path->locks[level] = BTRFS_READ_LOCK; 4898 4899 if (btrfs_header_generation(cur) < min_trans) { 4900 ret = 1; 4901 goto out; 4902 } 4903 while (1) { 4904 nritems = btrfs_header_nritems(cur); 4905 level = btrfs_header_level(cur); 4906 sret = bin_search(cur, min_key, level, &slot); 4907 4908 /* at the lowest level, we're done, setup the path and exit */ 4909 if (level == path->lowest_level) { 4910 if (slot >= nritems) 4911 goto find_next_key; 4912 ret = 0; 4913 path->slots[level] = slot; 4914 btrfs_item_key_to_cpu(cur, &found_key, slot); 4915 goto out; 4916 } 4917 if (sret && slot > 0) 4918 slot--; 4919 /* 4920 * check this node pointer against the cache_only and 4921 * min_trans parameters. If it isn't in cache or is too 4922 * old, skip to the next one. 4923 */ 4924 while (slot < nritems) { 4925 u64 blockptr; 4926 u64 gen; 4927 struct extent_buffer *tmp; 4928 struct btrfs_disk_key disk_key; 4929 4930 blockptr = btrfs_node_blockptr(cur, slot); 4931 gen = btrfs_node_ptr_generation(cur, slot); 4932 if (gen < min_trans) { 4933 slot++; 4934 continue; 4935 } 4936 if (!cache_only) 4937 break; 4938 4939 if (max_key) { 4940 btrfs_node_key(cur, &disk_key, slot); 4941 if (comp_keys(&disk_key, max_key) >= 0) { 4942 ret = 1; 4943 goto out; 4944 } 4945 } 4946 4947 tmp = btrfs_find_tree_block(root, blockptr, 4948 btrfs_level_size(root, level - 1)); 4949 4950 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 4951 free_extent_buffer(tmp); 4952 break; 4953 } 4954 if (tmp) 4955 free_extent_buffer(tmp); 4956 slot++; 4957 } 4958 find_next_key: 4959 /* 4960 * we didn't find a candidate key in this node, walk forward 4961 * and find another one 4962 */ 4963 if (slot >= nritems) { 4964 path->slots[level] = slot; 4965 btrfs_set_path_blocking(path); 4966 sret = btrfs_find_next_key(root, path, min_key, level, 4967 cache_only, min_trans); 4968 if (sret == 0) { 4969 btrfs_release_path(path); 4970 goto again; 4971 } else { 4972 goto out; 4973 } 4974 } 4975 /* save our key for returning back */ 4976 btrfs_node_key_to_cpu(cur, &found_key, slot); 4977 path->slots[level] = slot; 4978 if (level == path->lowest_level) { 4979 ret = 0; 4980 unlock_up(path, level, 1, 0, NULL); 4981 goto out; 4982 } 4983 btrfs_set_path_blocking(path); 4984 cur = read_node_slot(root, cur, slot); 4985 BUG_ON(!cur); /* -ENOMEM */ 4986 4987 btrfs_tree_read_lock(cur); 4988 4989 path->locks[level - 1] = BTRFS_READ_LOCK; 4990 path->nodes[level - 1] = cur; 4991 unlock_up(path, level, 1, 0, NULL); 4992 btrfs_clear_path_blocking(path, NULL, 0); 4993 } 4994 out: 4995 if (ret == 0) 4996 memcpy(min_key, &found_key, sizeof(found_key)); 4997 btrfs_set_path_blocking(path); 4998 return ret; 4999 } 5000 5001 /* 5002 * this is similar to btrfs_next_leaf, but does not try to preserve 5003 * and fixup the path. It looks for and returns the next key in the 5004 * tree based on the current path and the cache_only and min_trans 5005 * parameters. 5006 * 5007 * 0 is returned if another key is found, < 0 if there are any errors 5008 * and 1 is returned if there are no higher keys in the tree 5009 * 5010 * path->keep_locks should be set to 1 on the search made before 5011 * calling this function. 5012 */ 5013 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 5014 struct btrfs_key *key, int level, 5015 int cache_only, u64 min_trans) 5016 { 5017 int slot; 5018 struct extent_buffer *c; 5019 5020 WARN_ON(!path->keep_locks); 5021 while (level < BTRFS_MAX_LEVEL) { 5022 if (!path->nodes[level]) 5023 return 1; 5024 5025 slot = path->slots[level] + 1; 5026 c = path->nodes[level]; 5027 next: 5028 if (slot >= btrfs_header_nritems(c)) { 5029 int ret; 5030 int orig_lowest; 5031 struct btrfs_key cur_key; 5032 if (level + 1 >= BTRFS_MAX_LEVEL || 5033 !path->nodes[level + 1]) 5034 return 1; 5035 5036 if (path->locks[level + 1]) { 5037 level++; 5038 continue; 5039 } 5040 5041 slot = btrfs_header_nritems(c) - 1; 5042 if (level == 0) 5043 btrfs_item_key_to_cpu(c, &cur_key, slot); 5044 else 5045 btrfs_node_key_to_cpu(c, &cur_key, slot); 5046 5047 orig_lowest = path->lowest_level; 5048 btrfs_release_path(path); 5049 path->lowest_level = level; 5050 ret = btrfs_search_slot(NULL, root, &cur_key, path, 5051 0, 0); 5052 path->lowest_level = orig_lowest; 5053 if (ret < 0) 5054 return ret; 5055 5056 c = path->nodes[level]; 5057 slot = path->slots[level]; 5058 if (ret == 0) 5059 slot++; 5060 goto next; 5061 } 5062 5063 if (level == 0) 5064 btrfs_item_key_to_cpu(c, key, slot); 5065 else { 5066 u64 blockptr = btrfs_node_blockptr(c, slot); 5067 u64 gen = btrfs_node_ptr_generation(c, slot); 5068 5069 if (cache_only) { 5070 struct extent_buffer *cur; 5071 cur = btrfs_find_tree_block(root, blockptr, 5072 btrfs_level_size(root, level - 1)); 5073 if (!cur || 5074 btrfs_buffer_uptodate(cur, gen, 1) <= 0) { 5075 slot++; 5076 if (cur) 5077 free_extent_buffer(cur); 5078 goto next; 5079 } 5080 free_extent_buffer(cur); 5081 } 5082 if (gen < min_trans) { 5083 slot++; 5084 goto next; 5085 } 5086 btrfs_node_key_to_cpu(c, key, slot); 5087 } 5088 return 0; 5089 } 5090 return 1; 5091 } 5092 5093 /* 5094 * search the tree again to find a leaf with greater keys 5095 * returns 0 if it found something or 1 if there are no greater leaves. 5096 * returns < 0 on io errors. 5097 */ 5098 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) 5099 { 5100 return btrfs_next_old_leaf(root, path, 0); 5101 } 5102 5103 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 5104 u64 time_seq) 5105 { 5106 int slot; 5107 int level; 5108 struct extent_buffer *c; 5109 struct extent_buffer *next; 5110 struct btrfs_key key; 5111 u32 nritems; 5112 int ret; 5113 int old_spinning = path->leave_spinning; 5114 int next_rw_lock = 0; 5115 5116 nritems = btrfs_header_nritems(path->nodes[0]); 5117 if (nritems == 0) 5118 return 1; 5119 5120 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 5121 again: 5122 level = 1; 5123 next = NULL; 5124 next_rw_lock = 0; 5125 btrfs_release_path(path); 5126 5127 path->keep_locks = 1; 5128 path->leave_spinning = 1; 5129 5130 if (time_seq) 5131 ret = btrfs_search_old_slot(root, &key, path, time_seq); 5132 else 5133 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5134 path->keep_locks = 0; 5135 5136 if (ret < 0) 5137 return ret; 5138 5139 nritems = btrfs_header_nritems(path->nodes[0]); 5140 /* 5141 * by releasing the path above we dropped all our locks. A balance 5142 * could have added more items next to the key that used to be 5143 * at the very end of the block. So, check again here and 5144 * advance the path if there are now more items available. 5145 */ 5146 if (nritems > 0 && path->slots[0] < nritems - 1) { 5147 if (ret == 0) 5148 path->slots[0]++; 5149 ret = 0; 5150 goto done; 5151 } 5152 5153 while (level < BTRFS_MAX_LEVEL) { 5154 if (!path->nodes[level]) { 5155 ret = 1; 5156 goto done; 5157 } 5158 5159 slot = path->slots[level] + 1; 5160 c = path->nodes[level]; 5161 if (slot >= btrfs_header_nritems(c)) { 5162 level++; 5163 if (level == BTRFS_MAX_LEVEL) { 5164 ret = 1; 5165 goto done; 5166 } 5167 continue; 5168 } 5169 5170 if (next) { 5171 btrfs_tree_unlock_rw(next, next_rw_lock); 5172 free_extent_buffer(next); 5173 } 5174 5175 next = c; 5176 next_rw_lock = path->locks[level]; 5177 ret = read_block_for_search(NULL, root, path, &next, level, 5178 slot, &key, 0); 5179 if (ret == -EAGAIN) 5180 goto again; 5181 5182 if (ret < 0) { 5183 btrfs_release_path(path); 5184 goto done; 5185 } 5186 5187 if (!path->skip_locking) { 5188 ret = btrfs_try_tree_read_lock(next); 5189 if (!ret && time_seq) { 5190 /* 5191 * If we don't get the lock, we may be racing 5192 * with push_leaf_left, holding that lock while 5193 * itself waiting for the leaf we've currently 5194 * locked. To solve this situation, we give up 5195 * on our lock and cycle. 5196 */ 5197 free_extent_buffer(next); 5198 btrfs_release_path(path); 5199 cond_resched(); 5200 goto again; 5201 } 5202 if (!ret) { 5203 btrfs_set_path_blocking(path); 5204 btrfs_tree_read_lock(next); 5205 btrfs_clear_path_blocking(path, next, 5206 BTRFS_READ_LOCK); 5207 } 5208 next_rw_lock = BTRFS_READ_LOCK; 5209 } 5210 break; 5211 } 5212 path->slots[level] = slot; 5213 while (1) { 5214 level--; 5215 c = path->nodes[level]; 5216 if (path->locks[level]) 5217 btrfs_tree_unlock_rw(c, path->locks[level]); 5218 5219 free_extent_buffer(c); 5220 path->nodes[level] = next; 5221 path->slots[level] = 0; 5222 if (!path->skip_locking) 5223 path->locks[level] = next_rw_lock; 5224 if (!level) 5225 break; 5226 5227 ret = read_block_for_search(NULL, root, path, &next, level, 5228 0, &key, 0); 5229 if (ret == -EAGAIN) 5230 goto again; 5231 5232 if (ret < 0) { 5233 btrfs_release_path(path); 5234 goto done; 5235 } 5236 5237 if (!path->skip_locking) { 5238 ret = btrfs_try_tree_read_lock(next); 5239 if (!ret) { 5240 btrfs_set_path_blocking(path); 5241 btrfs_tree_read_lock(next); 5242 btrfs_clear_path_blocking(path, next, 5243 BTRFS_READ_LOCK); 5244 } 5245 next_rw_lock = BTRFS_READ_LOCK; 5246 } 5247 } 5248 ret = 0; 5249 done: 5250 unlock_up(path, 0, 1, 0, NULL); 5251 path->leave_spinning = old_spinning; 5252 if (!old_spinning) 5253 btrfs_set_path_blocking(path); 5254 5255 return ret; 5256 } 5257 5258 /* 5259 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5260 * searching until it gets past min_objectid or finds an item of 'type' 5261 * 5262 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5263 */ 5264 int btrfs_previous_item(struct btrfs_root *root, 5265 struct btrfs_path *path, u64 min_objectid, 5266 int type) 5267 { 5268 struct btrfs_key found_key; 5269 struct extent_buffer *leaf; 5270 u32 nritems; 5271 int ret; 5272 5273 while (1) { 5274 if (path->slots[0] == 0) { 5275 btrfs_set_path_blocking(path); 5276 ret = btrfs_prev_leaf(root, path); 5277 if (ret != 0) 5278 return ret; 5279 } else { 5280 path->slots[0]--; 5281 } 5282 leaf = path->nodes[0]; 5283 nritems = btrfs_header_nritems(leaf); 5284 if (nritems == 0) 5285 return 1; 5286 if (path->slots[0] == nritems) 5287 path->slots[0]--; 5288 5289 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5290 if (found_key.objectid < min_objectid) 5291 break; 5292 if (found_key.type == type) 5293 return 0; 5294 if (found_key.objectid == min_objectid && 5295 found_key.type < type) 5296 break; 5297 } 5298 return 1; 5299 } 5300