xref: /openbmc/linux/fs/btrfs/ctree.c (revision 26dd3e4f)
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28 
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 		      *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
32 		      *root, struct btrfs_key *ins_key,
33 		      struct btrfs_path *path, int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 			  struct btrfs_fs_info *fs_info,
36 			  struct extent_buffer *dst,
37 			  struct extent_buffer *src, int empty);
38 static int balance_node_right(struct btrfs_trans_handle *trans,
39 			      struct btrfs_fs_info *fs_info,
40 			      struct extent_buffer *dst_buf,
41 			      struct extent_buffer *src_buf);
42 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
43 		    int level, int slot);
44 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
45 				 struct extent_buffer *eb);
46 
47 struct btrfs_path *btrfs_alloc_path(void)
48 {
49 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 }
51 
52 /*
53  * set all locked nodes in the path to blocking locks.  This should
54  * be done before scheduling
55  */
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 {
58 	int i;
59 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 		if (!p->nodes[i] || !p->locks[i])
61 			continue;
62 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 		if (p->locks[i] == BTRFS_READ_LOCK)
64 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
67 	}
68 }
69 
70 /*
71  * reset all the locked nodes in the patch to spinning locks.
72  *
73  * held is used to keep lockdep happy, when lockdep is enabled
74  * we set held to a blocking lock before we go around and
75  * retake all the spinlocks in the path.  You can safely use NULL
76  * for held
77  */
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 					struct extent_buffer *held, int held_rw)
80 {
81 	int i;
82 
83 	if (held) {
84 		btrfs_set_lock_blocking_rw(held, held_rw);
85 		if (held_rw == BTRFS_WRITE_LOCK)
86 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 		else if (held_rw == BTRFS_READ_LOCK)
88 			held_rw = BTRFS_READ_LOCK_BLOCKING;
89 	}
90 	btrfs_set_path_blocking(p);
91 
92 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 		if (p->nodes[i] && p->locks[i]) {
94 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 				p->locks[i] = BTRFS_WRITE_LOCK;
97 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 				p->locks[i] = BTRFS_READ_LOCK;
99 		}
100 	}
101 
102 	if (held)
103 		btrfs_clear_lock_blocking_rw(held, held_rw);
104 }
105 
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
108 {
109 	if (!p)
110 		return;
111 	btrfs_release_path(p);
112 	kmem_cache_free(btrfs_path_cachep, p);
113 }
114 
115 /*
116  * path release drops references on the extent buffers in the path
117  * and it drops any locks held by this path
118  *
119  * It is safe to call this on paths that no locks or extent buffers held.
120  */
121 noinline void btrfs_release_path(struct btrfs_path *p)
122 {
123 	int i;
124 
125 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
126 		p->slots[i] = 0;
127 		if (!p->nodes[i])
128 			continue;
129 		if (p->locks[i]) {
130 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
131 			p->locks[i] = 0;
132 		}
133 		free_extent_buffer(p->nodes[i]);
134 		p->nodes[i] = NULL;
135 	}
136 }
137 
138 /*
139  * safely gets a reference on the root node of a tree.  A lock
140  * is not taken, so a concurrent writer may put a different node
141  * at the root of the tree.  See btrfs_lock_root_node for the
142  * looping required.
143  *
144  * The extent buffer returned by this has a reference taken, so
145  * it won't disappear.  It may stop being the root of the tree
146  * at any time because there are no locks held.
147  */
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
149 {
150 	struct extent_buffer *eb;
151 
152 	while (1) {
153 		rcu_read_lock();
154 		eb = rcu_dereference(root->node);
155 
156 		/*
157 		 * RCU really hurts here, we could free up the root node because
158 		 * it was COWed but we may not get the new root node yet so do
159 		 * the inc_not_zero dance and if it doesn't work then
160 		 * synchronize_rcu and try again.
161 		 */
162 		if (atomic_inc_not_zero(&eb->refs)) {
163 			rcu_read_unlock();
164 			break;
165 		}
166 		rcu_read_unlock();
167 		synchronize_rcu();
168 	}
169 	return eb;
170 }
171 
172 /* loop around taking references on and locking the root node of the
173  * tree until you end up with a lock on the root.  A locked buffer
174  * is returned, with a reference held.
175  */
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
177 {
178 	struct extent_buffer *eb;
179 
180 	while (1) {
181 		eb = btrfs_root_node(root);
182 		btrfs_tree_lock(eb);
183 		if (eb == root->node)
184 			break;
185 		btrfs_tree_unlock(eb);
186 		free_extent_buffer(eb);
187 	}
188 	return eb;
189 }
190 
191 /* loop around taking references on and locking the root node of the
192  * tree until you end up with a lock on the root.  A locked buffer
193  * is returned, with a reference held.
194  */
195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
196 {
197 	struct extent_buffer *eb;
198 
199 	while (1) {
200 		eb = btrfs_root_node(root);
201 		btrfs_tree_read_lock(eb);
202 		if (eb == root->node)
203 			break;
204 		btrfs_tree_read_unlock(eb);
205 		free_extent_buffer(eb);
206 	}
207 	return eb;
208 }
209 
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211  * put onto a simple dirty list.  transaction.c walks this to make sure they
212  * get properly updated on disk.
213  */
214 static void add_root_to_dirty_list(struct btrfs_root *root)
215 {
216 	struct btrfs_fs_info *fs_info = root->fs_info;
217 
218 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
219 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 		return;
221 
222 	spin_lock(&fs_info->trans_lock);
223 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
224 		/* Want the extent tree to be the last on the list */
225 		if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
226 			list_move_tail(&root->dirty_list,
227 				       &fs_info->dirty_cowonly_roots);
228 		else
229 			list_move(&root->dirty_list,
230 				  &fs_info->dirty_cowonly_roots);
231 	}
232 	spin_unlock(&fs_info->trans_lock);
233 }
234 
235 /*
236  * used by snapshot creation to make a copy of a root for a tree with
237  * a given objectid.  The buffer with the new root node is returned in
238  * cow_ret, and this func returns zero on success or a negative error code.
239  */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 		      struct btrfs_root *root,
242 		      struct extent_buffer *buf,
243 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 	struct btrfs_fs_info *fs_info = root->fs_info;
246 	struct extent_buffer *cow;
247 	int ret = 0;
248 	int level;
249 	struct btrfs_disk_key disk_key;
250 
251 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 		trans->transid != fs_info->running_transaction->transid);
253 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
254 		trans->transid != root->last_trans);
255 
256 	level = btrfs_header_level(buf);
257 	if (level == 0)
258 		btrfs_item_key(buf, &disk_key, 0);
259 	else
260 		btrfs_node_key(buf, &disk_key, 0);
261 
262 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
263 			&disk_key, level, buf->start, 0);
264 	if (IS_ERR(cow))
265 		return PTR_ERR(cow);
266 
267 	copy_extent_buffer_full(cow, buf);
268 	btrfs_set_header_bytenr(cow, cow->start);
269 	btrfs_set_header_generation(cow, trans->transid);
270 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
271 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
272 				     BTRFS_HEADER_FLAG_RELOC);
273 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
274 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
275 	else
276 		btrfs_set_header_owner(cow, new_root_objectid);
277 
278 	write_extent_buffer_fsid(cow, fs_info->fsid);
279 
280 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
281 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
282 		ret = btrfs_inc_ref(trans, root, cow, 1);
283 	else
284 		ret = btrfs_inc_ref(trans, root, cow, 0);
285 
286 	if (ret)
287 		return ret;
288 
289 	btrfs_mark_buffer_dirty(cow);
290 	*cow_ret = cow;
291 	return 0;
292 }
293 
294 enum mod_log_op {
295 	MOD_LOG_KEY_REPLACE,
296 	MOD_LOG_KEY_ADD,
297 	MOD_LOG_KEY_REMOVE,
298 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
299 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
300 	MOD_LOG_MOVE_KEYS,
301 	MOD_LOG_ROOT_REPLACE,
302 };
303 
304 struct tree_mod_move {
305 	int dst_slot;
306 	int nr_items;
307 };
308 
309 struct tree_mod_root {
310 	u64 logical;
311 	u8 level;
312 };
313 
314 struct tree_mod_elem {
315 	struct rb_node node;
316 	u64 logical;
317 	u64 seq;
318 	enum mod_log_op op;
319 
320 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 	int slot;
322 
323 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 	u64 generation;
325 
326 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 	struct btrfs_disk_key key;
328 	u64 blockptr;
329 
330 	/* this is used for op == MOD_LOG_MOVE_KEYS */
331 	struct tree_mod_move move;
332 
333 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
334 	struct tree_mod_root old_root;
335 };
336 
337 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
338 {
339 	read_lock(&fs_info->tree_mod_log_lock);
340 }
341 
342 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
343 {
344 	read_unlock(&fs_info->tree_mod_log_lock);
345 }
346 
347 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
348 {
349 	write_lock(&fs_info->tree_mod_log_lock);
350 }
351 
352 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
353 {
354 	write_unlock(&fs_info->tree_mod_log_lock);
355 }
356 
357 /*
358  * Pull a new tree mod seq number for our operation.
359  */
360 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
361 {
362 	return atomic64_inc_return(&fs_info->tree_mod_seq);
363 }
364 
365 /*
366  * This adds a new blocker to the tree mod log's blocker list if the @elem
367  * passed does not already have a sequence number set. So when a caller expects
368  * to record tree modifications, it should ensure to set elem->seq to zero
369  * before calling btrfs_get_tree_mod_seq.
370  * Returns a fresh, unused tree log modification sequence number, even if no new
371  * blocker was added.
372  */
373 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
374 			   struct seq_list *elem)
375 {
376 	tree_mod_log_write_lock(fs_info);
377 	spin_lock(&fs_info->tree_mod_seq_lock);
378 	if (!elem->seq) {
379 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
380 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
381 	}
382 	spin_unlock(&fs_info->tree_mod_seq_lock);
383 	tree_mod_log_write_unlock(fs_info);
384 
385 	return elem->seq;
386 }
387 
388 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
389 			    struct seq_list *elem)
390 {
391 	struct rb_root *tm_root;
392 	struct rb_node *node;
393 	struct rb_node *next;
394 	struct seq_list *cur_elem;
395 	struct tree_mod_elem *tm;
396 	u64 min_seq = (u64)-1;
397 	u64 seq_putting = elem->seq;
398 
399 	if (!seq_putting)
400 		return;
401 
402 	spin_lock(&fs_info->tree_mod_seq_lock);
403 	list_del(&elem->list);
404 	elem->seq = 0;
405 
406 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
407 		if (cur_elem->seq < min_seq) {
408 			if (seq_putting > cur_elem->seq) {
409 				/*
410 				 * blocker with lower sequence number exists, we
411 				 * cannot remove anything from the log
412 				 */
413 				spin_unlock(&fs_info->tree_mod_seq_lock);
414 				return;
415 			}
416 			min_seq = cur_elem->seq;
417 		}
418 	}
419 	spin_unlock(&fs_info->tree_mod_seq_lock);
420 
421 	/*
422 	 * anything that's lower than the lowest existing (read: blocked)
423 	 * sequence number can be removed from the tree.
424 	 */
425 	tree_mod_log_write_lock(fs_info);
426 	tm_root = &fs_info->tree_mod_log;
427 	for (node = rb_first(tm_root); node; node = next) {
428 		next = rb_next(node);
429 		tm = container_of(node, struct tree_mod_elem, node);
430 		if (tm->seq > min_seq)
431 			continue;
432 		rb_erase(node, tm_root);
433 		kfree(tm);
434 	}
435 	tree_mod_log_write_unlock(fs_info);
436 }
437 
438 /*
439  * key order of the log:
440  *       node/leaf start address -> sequence
441  *
442  * The 'start address' is the logical address of the *new* root node
443  * for root replace operations, or the logical address of the affected
444  * block for all other operations.
445  *
446  * Note: must be called with write lock (tree_mod_log_write_lock).
447  */
448 static noinline int
449 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
450 {
451 	struct rb_root *tm_root;
452 	struct rb_node **new;
453 	struct rb_node *parent = NULL;
454 	struct tree_mod_elem *cur;
455 
456 	BUG_ON(!tm);
457 
458 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
459 
460 	tm_root = &fs_info->tree_mod_log;
461 	new = &tm_root->rb_node;
462 	while (*new) {
463 		cur = container_of(*new, struct tree_mod_elem, node);
464 		parent = *new;
465 		if (cur->logical < tm->logical)
466 			new = &((*new)->rb_left);
467 		else if (cur->logical > tm->logical)
468 			new = &((*new)->rb_right);
469 		else if (cur->seq < tm->seq)
470 			new = &((*new)->rb_left);
471 		else if (cur->seq > tm->seq)
472 			new = &((*new)->rb_right);
473 		else
474 			return -EEXIST;
475 	}
476 
477 	rb_link_node(&tm->node, parent, new);
478 	rb_insert_color(&tm->node, tm_root);
479 	return 0;
480 }
481 
482 /*
483  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
484  * returns zero with the tree_mod_log_lock acquired. The caller must hold
485  * this until all tree mod log insertions are recorded in the rb tree and then
486  * call tree_mod_log_write_unlock() to release.
487  */
488 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
489 				    struct extent_buffer *eb) {
490 	smp_mb();
491 	if (list_empty(&(fs_info)->tree_mod_seq_list))
492 		return 1;
493 	if (eb && btrfs_header_level(eb) == 0)
494 		return 1;
495 
496 	tree_mod_log_write_lock(fs_info);
497 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
498 		tree_mod_log_write_unlock(fs_info);
499 		return 1;
500 	}
501 
502 	return 0;
503 }
504 
505 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
506 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
507 				    struct extent_buffer *eb)
508 {
509 	smp_mb();
510 	if (list_empty(&(fs_info)->tree_mod_seq_list))
511 		return 0;
512 	if (eb && btrfs_header_level(eb) == 0)
513 		return 0;
514 
515 	return 1;
516 }
517 
518 static struct tree_mod_elem *
519 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
520 		    enum mod_log_op op, gfp_t flags)
521 {
522 	struct tree_mod_elem *tm;
523 
524 	tm = kzalloc(sizeof(*tm), flags);
525 	if (!tm)
526 		return NULL;
527 
528 	tm->logical = eb->start;
529 	if (op != MOD_LOG_KEY_ADD) {
530 		btrfs_node_key(eb, &tm->key, slot);
531 		tm->blockptr = btrfs_node_blockptr(eb, slot);
532 	}
533 	tm->op = op;
534 	tm->slot = slot;
535 	tm->generation = btrfs_node_ptr_generation(eb, slot);
536 	RB_CLEAR_NODE(&tm->node);
537 
538 	return tm;
539 }
540 
541 static noinline int
542 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
543 			struct extent_buffer *eb, int slot,
544 			enum mod_log_op op, gfp_t flags)
545 {
546 	struct tree_mod_elem *tm;
547 	int ret;
548 
549 	if (!tree_mod_need_log(fs_info, eb))
550 		return 0;
551 
552 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
553 	if (!tm)
554 		return -ENOMEM;
555 
556 	if (tree_mod_dont_log(fs_info, eb)) {
557 		kfree(tm);
558 		return 0;
559 	}
560 
561 	ret = __tree_mod_log_insert(fs_info, tm);
562 	tree_mod_log_write_unlock(fs_info);
563 	if (ret)
564 		kfree(tm);
565 
566 	return ret;
567 }
568 
569 static noinline int
570 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
571 			 struct extent_buffer *eb, int dst_slot, int src_slot,
572 			 int nr_items, gfp_t flags)
573 {
574 	struct tree_mod_elem *tm = NULL;
575 	struct tree_mod_elem **tm_list = NULL;
576 	int ret = 0;
577 	int i;
578 	int locked = 0;
579 
580 	if (!tree_mod_need_log(fs_info, eb))
581 		return 0;
582 
583 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
584 	if (!tm_list)
585 		return -ENOMEM;
586 
587 	tm = kzalloc(sizeof(*tm), flags);
588 	if (!tm) {
589 		ret = -ENOMEM;
590 		goto free_tms;
591 	}
592 
593 	tm->logical = eb->start;
594 	tm->slot = src_slot;
595 	tm->move.dst_slot = dst_slot;
596 	tm->move.nr_items = nr_items;
597 	tm->op = MOD_LOG_MOVE_KEYS;
598 
599 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
600 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
601 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
602 		if (!tm_list[i]) {
603 			ret = -ENOMEM;
604 			goto free_tms;
605 		}
606 	}
607 
608 	if (tree_mod_dont_log(fs_info, eb))
609 		goto free_tms;
610 	locked = 1;
611 
612 	/*
613 	 * When we override something during the move, we log these removals.
614 	 * This can only happen when we move towards the beginning of the
615 	 * buffer, i.e. dst_slot < src_slot.
616 	 */
617 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
618 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
619 		if (ret)
620 			goto free_tms;
621 	}
622 
623 	ret = __tree_mod_log_insert(fs_info, tm);
624 	if (ret)
625 		goto free_tms;
626 	tree_mod_log_write_unlock(fs_info);
627 	kfree(tm_list);
628 
629 	return 0;
630 free_tms:
631 	for (i = 0; i < nr_items; i++) {
632 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
633 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
634 		kfree(tm_list[i]);
635 	}
636 	if (locked)
637 		tree_mod_log_write_unlock(fs_info);
638 	kfree(tm_list);
639 	kfree(tm);
640 
641 	return ret;
642 }
643 
644 static inline int
645 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
646 		       struct tree_mod_elem **tm_list,
647 		       int nritems)
648 {
649 	int i, j;
650 	int ret;
651 
652 	for (i = nritems - 1; i >= 0; i--) {
653 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
654 		if (ret) {
655 			for (j = nritems - 1; j > i; j--)
656 				rb_erase(&tm_list[j]->node,
657 					 &fs_info->tree_mod_log);
658 			return ret;
659 		}
660 	}
661 
662 	return 0;
663 }
664 
665 static noinline int
666 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
667 			 struct extent_buffer *old_root,
668 			 struct extent_buffer *new_root, gfp_t flags,
669 			 int log_removal)
670 {
671 	struct tree_mod_elem *tm = NULL;
672 	struct tree_mod_elem **tm_list = NULL;
673 	int nritems = 0;
674 	int ret = 0;
675 	int i;
676 
677 	if (!tree_mod_need_log(fs_info, NULL))
678 		return 0;
679 
680 	if (log_removal && btrfs_header_level(old_root) > 0) {
681 		nritems = btrfs_header_nritems(old_root);
682 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
683 				  flags);
684 		if (!tm_list) {
685 			ret = -ENOMEM;
686 			goto free_tms;
687 		}
688 		for (i = 0; i < nritems; i++) {
689 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
690 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
691 			if (!tm_list[i]) {
692 				ret = -ENOMEM;
693 				goto free_tms;
694 			}
695 		}
696 	}
697 
698 	tm = kzalloc(sizeof(*tm), flags);
699 	if (!tm) {
700 		ret = -ENOMEM;
701 		goto free_tms;
702 	}
703 
704 	tm->logical = new_root->start;
705 	tm->old_root.logical = old_root->start;
706 	tm->old_root.level = btrfs_header_level(old_root);
707 	tm->generation = btrfs_header_generation(old_root);
708 	tm->op = MOD_LOG_ROOT_REPLACE;
709 
710 	if (tree_mod_dont_log(fs_info, NULL))
711 		goto free_tms;
712 
713 	if (tm_list)
714 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
715 	if (!ret)
716 		ret = __tree_mod_log_insert(fs_info, tm);
717 
718 	tree_mod_log_write_unlock(fs_info);
719 	if (ret)
720 		goto free_tms;
721 	kfree(tm_list);
722 
723 	return ret;
724 
725 free_tms:
726 	if (tm_list) {
727 		for (i = 0; i < nritems; i++)
728 			kfree(tm_list[i]);
729 		kfree(tm_list);
730 	}
731 	kfree(tm);
732 
733 	return ret;
734 }
735 
736 static struct tree_mod_elem *
737 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
738 		      int smallest)
739 {
740 	struct rb_root *tm_root;
741 	struct rb_node *node;
742 	struct tree_mod_elem *cur = NULL;
743 	struct tree_mod_elem *found = NULL;
744 
745 	tree_mod_log_read_lock(fs_info);
746 	tm_root = &fs_info->tree_mod_log;
747 	node = tm_root->rb_node;
748 	while (node) {
749 		cur = container_of(node, struct tree_mod_elem, node);
750 		if (cur->logical < start) {
751 			node = node->rb_left;
752 		} else if (cur->logical > start) {
753 			node = node->rb_right;
754 		} else if (cur->seq < min_seq) {
755 			node = node->rb_left;
756 		} else if (!smallest) {
757 			/* we want the node with the highest seq */
758 			if (found)
759 				BUG_ON(found->seq > cur->seq);
760 			found = cur;
761 			node = node->rb_left;
762 		} else if (cur->seq > min_seq) {
763 			/* we want the node with the smallest seq */
764 			if (found)
765 				BUG_ON(found->seq < cur->seq);
766 			found = cur;
767 			node = node->rb_right;
768 		} else {
769 			found = cur;
770 			break;
771 		}
772 	}
773 	tree_mod_log_read_unlock(fs_info);
774 
775 	return found;
776 }
777 
778 /*
779  * this returns the element from the log with the smallest time sequence
780  * value that's in the log (the oldest log item). any element with a time
781  * sequence lower than min_seq will be ignored.
782  */
783 static struct tree_mod_elem *
784 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
785 			   u64 min_seq)
786 {
787 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
788 }
789 
790 /*
791  * this returns the element from the log with the largest time sequence
792  * value that's in the log (the most recent log item). any element with
793  * a time sequence lower than min_seq will be ignored.
794  */
795 static struct tree_mod_elem *
796 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
797 {
798 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
799 }
800 
801 static noinline int
802 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
803 		     struct extent_buffer *src, unsigned long dst_offset,
804 		     unsigned long src_offset, int nr_items)
805 {
806 	int ret = 0;
807 	struct tree_mod_elem **tm_list = NULL;
808 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
809 	int i;
810 	int locked = 0;
811 
812 	if (!tree_mod_need_log(fs_info, NULL))
813 		return 0;
814 
815 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
816 		return 0;
817 
818 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
819 			  GFP_NOFS);
820 	if (!tm_list)
821 		return -ENOMEM;
822 
823 	tm_list_add = tm_list;
824 	tm_list_rem = tm_list + nr_items;
825 	for (i = 0; i < nr_items; i++) {
826 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
827 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
828 		if (!tm_list_rem[i]) {
829 			ret = -ENOMEM;
830 			goto free_tms;
831 		}
832 
833 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
834 		    MOD_LOG_KEY_ADD, GFP_NOFS);
835 		if (!tm_list_add[i]) {
836 			ret = -ENOMEM;
837 			goto free_tms;
838 		}
839 	}
840 
841 	if (tree_mod_dont_log(fs_info, NULL))
842 		goto free_tms;
843 	locked = 1;
844 
845 	for (i = 0; i < nr_items; i++) {
846 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
847 		if (ret)
848 			goto free_tms;
849 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
850 		if (ret)
851 			goto free_tms;
852 	}
853 
854 	tree_mod_log_write_unlock(fs_info);
855 	kfree(tm_list);
856 
857 	return 0;
858 
859 free_tms:
860 	for (i = 0; i < nr_items * 2; i++) {
861 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
862 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
863 		kfree(tm_list[i]);
864 	}
865 	if (locked)
866 		tree_mod_log_write_unlock(fs_info);
867 	kfree(tm_list);
868 
869 	return ret;
870 }
871 
872 static inline void
873 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
874 		     int dst_offset, int src_offset, int nr_items)
875 {
876 	int ret;
877 	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
878 				       nr_items, GFP_NOFS);
879 	BUG_ON(ret < 0);
880 }
881 
882 static noinline void
883 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
884 			  struct extent_buffer *eb, int slot, int atomic)
885 {
886 	int ret;
887 
888 	ret = tree_mod_log_insert_key(fs_info, eb, slot,
889 					MOD_LOG_KEY_REPLACE,
890 					atomic ? GFP_ATOMIC : GFP_NOFS);
891 	BUG_ON(ret < 0);
892 }
893 
894 static noinline int
895 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
896 {
897 	struct tree_mod_elem **tm_list = NULL;
898 	int nritems = 0;
899 	int i;
900 	int ret = 0;
901 
902 	if (btrfs_header_level(eb) == 0)
903 		return 0;
904 
905 	if (!tree_mod_need_log(fs_info, NULL))
906 		return 0;
907 
908 	nritems = btrfs_header_nritems(eb);
909 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
910 	if (!tm_list)
911 		return -ENOMEM;
912 
913 	for (i = 0; i < nritems; i++) {
914 		tm_list[i] = alloc_tree_mod_elem(eb, i,
915 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
916 		if (!tm_list[i]) {
917 			ret = -ENOMEM;
918 			goto free_tms;
919 		}
920 	}
921 
922 	if (tree_mod_dont_log(fs_info, eb))
923 		goto free_tms;
924 
925 	ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
926 	tree_mod_log_write_unlock(fs_info);
927 	if (ret)
928 		goto free_tms;
929 	kfree(tm_list);
930 
931 	return 0;
932 
933 free_tms:
934 	for (i = 0; i < nritems; i++)
935 		kfree(tm_list[i]);
936 	kfree(tm_list);
937 
938 	return ret;
939 }
940 
941 static noinline void
942 tree_mod_log_set_root_pointer(struct btrfs_root *root,
943 			      struct extent_buffer *new_root_node,
944 			      int log_removal)
945 {
946 	int ret;
947 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
948 				       new_root_node, GFP_NOFS, log_removal);
949 	BUG_ON(ret < 0);
950 }
951 
952 /*
953  * check if the tree block can be shared by multiple trees
954  */
955 int btrfs_block_can_be_shared(struct btrfs_root *root,
956 			      struct extent_buffer *buf)
957 {
958 	/*
959 	 * Tree blocks not in reference counted trees and tree roots
960 	 * are never shared. If a block was allocated after the last
961 	 * snapshot and the block was not allocated by tree relocation,
962 	 * we know the block is not shared.
963 	 */
964 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
965 	    buf != root->node && buf != root->commit_root &&
966 	    (btrfs_header_generation(buf) <=
967 	     btrfs_root_last_snapshot(&root->root_item) ||
968 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
969 		return 1;
970 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
971 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
972 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
973 		return 1;
974 #endif
975 	return 0;
976 }
977 
978 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
979 				       struct btrfs_root *root,
980 				       struct extent_buffer *buf,
981 				       struct extent_buffer *cow,
982 				       int *last_ref)
983 {
984 	struct btrfs_fs_info *fs_info = root->fs_info;
985 	u64 refs;
986 	u64 owner;
987 	u64 flags;
988 	u64 new_flags = 0;
989 	int ret;
990 
991 	/*
992 	 * Backrefs update rules:
993 	 *
994 	 * Always use full backrefs for extent pointers in tree block
995 	 * allocated by tree relocation.
996 	 *
997 	 * If a shared tree block is no longer referenced by its owner
998 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
999 	 * use full backrefs for extent pointers in tree block.
1000 	 *
1001 	 * If a tree block is been relocating
1002 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1003 	 * use full backrefs for extent pointers in tree block.
1004 	 * The reason for this is some operations (such as drop tree)
1005 	 * are only allowed for blocks use full backrefs.
1006 	 */
1007 
1008 	if (btrfs_block_can_be_shared(root, buf)) {
1009 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
1010 					       btrfs_header_level(buf), 1,
1011 					       &refs, &flags);
1012 		if (ret)
1013 			return ret;
1014 		if (refs == 0) {
1015 			ret = -EROFS;
1016 			btrfs_handle_fs_error(fs_info, ret, NULL);
1017 			return ret;
1018 		}
1019 	} else {
1020 		refs = 1;
1021 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1022 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1023 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1024 		else
1025 			flags = 0;
1026 	}
1027 
1028 	owner = btrfs_header_owner(buf);
1029 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1030 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1031 
1032 	if (refs > 1) {
1033 		if ((owner == root->root_key.objectid ||
1034 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1035 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1036 			ret = btrfs_inc_ref(trans, root, buf, 1);
1037 			BUG_ON(ret); /* -ENOMEM */
1038 
1039 			if (root->root_key.objectid ==
1040 			    BTRFS_TREE_RELOC_OBJECTID) {
1041 				ret = btrfs_dec_ref(trans, root, buf, 0);
1042 				BUG_ON(ret); /* -ENOMEM */
1043 				ret = btrfs_inc_ref(trans, root, cow, 1);
1044 				BUG_ON(ret); /* -ENOMEM */
1045 			}
1046 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1047 		} else {
1048 
1049 			if (root->root_key.objectid ==
1050 			    BTRFS_TREE_RELOC_OBJECTID)
1051 				ret = btrfs_inc_ref(trans, root, cow, 1);
1052 			else
1053 				ret = btrfs_inc_ref(trans, root, cow, 0);
1054 			BUG_ON(ret); /* -ENOMEM */
1055 		}
1056 		if (new_flags != 0) {
1057 			int level = btrfs_header_level(buf);
1058 
1059 			ret = btrfs_set_disk_extent_flags(trans, fs_info,
1060 							  buf->start,
1061 							  buf->len,
1062 							  new_flags, level, 0);
1063 			if (ret)
1064 				return ret;
1065 		}
1066 	} else {
1067 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1068 			if (root->root_key.objectid ==
1069 			    BTRFS_TREE_RELOC_OBJECTID)
1070 				ret = btrfs_inc_ref(trans, root, cow, 1);
1071 			else
1072 				ret = btrfs_inc_ref(trans, root, cow, 0);
1073 			BUG_ON(ret); /* -ENOMEM */
1074 			ret = btrfs_dec_ref(trans, root, buf, 1);
1075 			BUG_ON(ret); /* -ENOMEM */
1076 		}
1077 		clean_tree_block(trans, fs_info, buf);
1078 		*last_ref = 1;
1079 	}
1080 	return 0;
1081 }
1082 
1083 /*
1084  * does the dirty work in cow of a single block.  The parent block (if
1085  * supplied) is updated to point to the new cow copy.  The new buffer is marked
1086  * dirty and returned locked.  If you modify the block it needs to be marked
1087  * dirty again.
1088  *
1089  * search_start -- an allocation hint for the new block
1090  *
1091  * empty_size -- a hint that you plan on doing more cow.  This is the size in
1092  * bytes the allocator should try to find free next to the block it returns.
1093  * This is just a hint and may be ignored by the allocator.
1094  */
1095 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1096 			     struct btrfs_root *root,
1097 			     struct extent_buffer *buf,
1098 			     struct extent_buffer *parent, int parent_slot,
1099 			     struct extent_buffer **cow_ret,
1100 			     u64 search_start, u64 empty_size)
1101 {
1102 	struct btrfs_fs_info *fs_info = root->fs_info;
1103 	struct btrfs_disk_key disk_key;
1104 	struct extent_buffer *cow;
1105 	int level, ret;
1106 	int last_ref = 0;
1107 	int unlock_orig = 0;
1108 	u64 parent_start = 0;
1109 
1110 	if (*cow_ret == buf)
1111 		unlock_orig = 1;
1112 
1113 	btrfs_assert_tree_locked(buf);
1114 
1115 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1116 		trans->transid != fs_info->running_transaction->transid);
1117 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1118 		trans->transid != root->last_trans);
1119 
1120 	level = btrfs_header_level(buf);
1121 
1122 	if (level == 0)
1123 		btrfs_item_key(buf, &disk_key, 0);
1124 	else
1125 		btrfs_node_key(buf, &disk_key, 0);
1126 
1127 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1128 		parent_start = parent->start;
1129 
1130 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
1131 			root->root_key.objectid, &disk_key, level,
1132 			search_start, empty_size);
1133 	if (IS_ERR(cow))
1134 		return PTR_ERR(cow);
1135 
1136 	/* cow is set to blocking by btrfs_init_new_buffer */
1137 
1138 	copy_extent_buffer_full(cow, buf);
1139 	btrfs_set_header_bytenr(cow, cow->start);
1140 	btrfs_set_header_generation(cow, trans->transid);
1141 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1142 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1143 				     BTRFS_HEADER_FLAG_RELOC);
1144 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1145 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1146 	else
1147 		btrfs_set_header_owner(cow, root->root_key.objectid);
1148 
1149 	write_extent_buffer_fsid(cow, fs_info->fsid);
1150 
1151 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1152 	if (ret) {
1153 		btrfs_abort_transaction(trans, ret);
1154 		return ret;
1155 	}
1156 
1157 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1158 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1159 		if (ret) {
1160 			btrfs_abort_transaction(trans, ret);
1161 			return ret;
1162 		}
1163 	}
1164 
1165 	if (buf == root->node) {
1166 		WARN_ON(parent && parent != buf);
1167 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1168 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1169 			parent_start = buf->start;
1170 
1171 		extent_buffer_get(cow);
1172 		tree_mod_log_set_root_pointer(root, cow, 1);
1173 		rcu_assign_pointer(root->node, cow);
1174 
1175 		btrfs_free_tree_block(trans, root, buf, parent_start,
1176 				      last_ref);
1177 		free_extent_buffer(buf);
1178 		add_root_to_dirty_list(root);
1179 	} else {
1180 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1181 		tree_mod_log_insert_key(fs_info, parent, parent_slot,
1182 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1183 		btrfs_set_node_blockptr(parent, parent_slot,
1184 					cow->start);
1185 		btrfs_set_node_ptr_generation(parent, parent_slot,
1186 					      trans->transid);
1187 		btrfs_mark_buffer_dirty(parent);
1188 		if (last_ref) {
1189 			ret = tree_mod_log_free_eb(fs_info, buf);
1190 			if (ret) {
1191 				btrfs_abort_transaction(trans, ret);
1192 				return ret;
1193 			}
1194 		}
1195 		btrfs_free_tree_block(trans, root, buf, parent_start,
1196 				      last_ref);
1197 	}
1198 	if (unlock_orig)
1199 		btrfs_tree_unlock(buf);
1200 	free_extent_buffer_stale(buf);
1201 	btrfs_mark_buffer_dirty(cow);
1202 	*cow_ret = cow;
1203 	return 0;
1204 }
1205 
1206 /*
1207  * returns the logical address of the oldest predecessor of the given root.
1208  * entries older than time_seq are ignored.
1209  */
1210 static struct tree_mod_elem *
1211 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1212 			   struct extent_buffer *eb_root, u64 time_seq)
1213 {
1214 	struct tree_mod_elem *tm;
1215 	struct tree_mod_elem *found = NULL;
1216 	u64 root_logical = eb_root->start;
1217 	int looped = 0;
1218 
1219 	if (!time_seq)
1220 		return NULL;
1221 
1222 	/*
1223 	 * the very last operation that's logged for a root is the
1224 	 * replacement operation (if it is replaced at all). this has
1225 	 * the logical address of the *new* root, making it the very
1226 	 * first operation that's logged for this root.
1227 	 */
1228 	while (1) {
1229 		tm = tree_mod_log_search_oldest(fs_info, root_logical,
1230 						time_seq);
1231 		if (!looped && !tm)
1232 			return NULL;
1233 		/*
1234 		 * if there are no tree operation for the oldest root, we simply
1235 		 * return it. this should only happen if that (old) root is at
1236 		 * level 0.
1237 		 */
1238 		if (!tm)
1239 			break;
1240 
1241 		/*
1242 		 * if there's an operation that's not a root replacement, we
1243 		 * found the oldest version of our root. normally, we'll find a
1244 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1245 		 */
1246 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1247 			break;
1248 
1249 		found = tm;
1250 		root_logical = tm->old_root.logical;
1251 		looped = 1;
1252 	}
1253 
1254 	/* if there's no old root to return, return what we found instead */
1255 	if (!found)
1256 		found = tm;
1257 
1258 	return found;
1259 }
1260 
1261 /*
1262  * tm is a pointer to the first operation to rewind within eb. then, all
1263  * previous operations will be rewound (until we reach something older than
1264  * time_seq).
1265  */
1266 static void
1267 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1268 		      u64 time_seq, struct tree_mod_elem *first_tm)
1269 {
1270 	u32 n;
1271 	struct rb_node *next;
1272 	struct tree_mod_elem *tm = first_tm;
1273 	unsigned long o_dst;
1274 	unsigned long o_src;
1275 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1276 
1277 	n = btrfs_header_nritems(eb);
1278 	tree_mod_log_read_lock(fs_info);
1279 	while (tm && tm->seq >= time_seq) {
1280 		/*
1281 		 * all the operations are recorded with the operator used for
1282 		 * the modification. as we're going backwards, we do the
1283 		 * opposite of each operation here.
1284 		 */
1285 		switch (tm->op) {
1286 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1287 			BUG_ON(tm->slot < n);
1288 			/* Fallthrough */
1289 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1290 		case MOD_LOG_KEY_REMOVE:
1291 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1292 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1293 			btrfs_set_node_ptr_generation(eb, tm->slot,
1294 						      tm->generation);
1295 			n++;
1296 			break;
1297 		case MOD_LOG_KEY_REPLACE:
1298 			BUG_ON(tm->slot >= n);
1299 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1300 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1301 			btrfs_set_node_ptr_generation(eb, tm->slot,
1302 						      tm->generation);
1303 			break;
1304 		case MOD_LOG_KEY_ADD:
1305 			/* if a move operation is needed it's in the log */
1306 			n--;
1307 			break;
1308 		case MOD_LOG_MOVE_KEYS:
1309 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1310 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1311 			memmove_extent_buffer(eb, o_dst, o_src,
1312 					      tm->move.nr_items * p_size);
1313 			break;
1314 		case MOD_LOG_ROOT_REPLACE:
1315 			/*
1316 			 * this operation is special. for roots, this must be
1317 			 * handled explicitly before rewinding.
1318 			 * for non-roots, this operation may exist if the node
1319 			 * was a root: root A -> child B; then A gets empty and
1320 			 * B is promoted to the new root. in the mod log, we'll
1321 			 * have a root-replace operation for B, a tree block
1322 			 * that is no root. we simply ignore that operation.
1323 			 */
1324 			break;
1325 		}
1326 		next = rb_next(&tm->node);
1327 		if (!next)
1328 			break;
1329 		tm = container_of(next, struct tree_mod_elem, node);
1330 		if (tm->logical != first_tm->logical)
1331 			break;
1332 	}
1333 	tree_mod_log_read_unlock(fs_info);
1334 	btrfs_set_header_nritems(eb, n);
1335 }
1336 
1337 /*
1338  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1339  * is returned. If rewind operations happen, a fresh buffer is returned. The
1340  * returned buffer is always read-locked. If the returned buffer is not the
1341  * input buffer, the lock on the input buffer is released and the input buffer
1342  * is freed (its refcount is decremented).
1343  */
1344 static struct extent_buffer *
1345 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1346 		    struct extent_buffer *eb, u64 time_seq)
1347 {
1348 	struct extent_buffer *eb_rewin;
1349 	struct tree_mod_elem *tm;
1350 
1351 	if (!time_seq)
1352 		return eb;
1353 
1354 	if (btrfs_header_level(eb) == 0)
1355 		return eb;
1356 
1357 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1358 	if (!tm)
1359 		return eb;
1360 
1361 	btrfs_set_path_blocking(path);
1362 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1363 
1364 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1365 		BUG_ON(tm->slot != 0);
1366 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1367 		if (!eb_rewin) {
1368 			btrfs_tree_read_unlock_blocking(eb);
1369 			free_extent_buffer(eb);
1370 			return NULL;
1371 		}
1372 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1373 		btrfs_set_header_backref_rev(eb_rewin,
1374 					     btrfs_header_backref_rev(eb));
1375 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1376 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1377 	} else {
1378 		eb_rewin = btrfs_clone_extent_buffer(eb);
1379 		if (!eb_rewin) {
1380 			btrfs_tree_read_unlock_blocking(eb);
1381 			free_extent_buffer(eb);
1382 			return NULL;
1383 		}
1384 	}
1385 
1386 	btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1387 	btrfs_tree_read_unlock_blocking(eb);
1388 	free_extent_buffer(eb);
1389 
1390 	extent_buffer_get(eb_rewin);
1391 	btrfs_tree_read_lock(eb_rewin);
1392 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1393 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1394 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1395 
1396 	return eb_rewin;
1397 }
1398 
1399 /*
1400  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1401  * value. If there are no changes, the current root->root_node is returned. If
1402  * anything changed in between, there's a fresh buffer allocated on which the
1403  * rewind operations are done. In any case, the returned buffer is read locked.
1404  * Returns NULL on error (with no locks held).
1405  */
1406 static inline struct extent_buffer *
1407 get_old_root(struct btrfs_root *root, u64 time_seq)
1408 {
1409 	struct btrfs_fs_info *fs_info = root->fs_info;
1410 	struct tree_mod_elem *tm;
1411 	struct extent_buffer *eb = NULL;
1412 	struct extent_buffer *eb_root;
1413 	struct extent_buffer *old;
1414 	struct tree_mod_root *old_root = NULL;
1415 	u64 old_generation = 0;
1416 	u64 logical;
1417 
1418 	eb_root = btrfs_read_lock_root_node(root);
1419 	tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
1420 	if (!tm)
1421 		return eb_root;
1422 
1423 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1424 		old_root = &tm->old_root;
1425 		old_generation = tm->generation;
1426 		logical = old_root->logical;
1427 	} else {
1428 		logical = eb_root->start;
1429 	}
1430 
1431 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1432 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1433 		btrfs_tree_read_unlock(eb_root);
1434 		free_extent_buffer(eb_root);
1435 		old = read_tree_block(fs_info, logical, 0);
1436 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1437 			if (!IS_ERR(old))
1438 				free_extent_buffer(old);
1439 			btrfs_warn(fs_info,
1440 				   "failed to read tree block %llu from get_old_root",
1441 				   logical);
1442 		} else {
1443 			eb = btrfs_clone_extent_buffer(old);
1444 			free_extent_buffer(old);
1445 		}
1446 	} else if (old_root) {
1447 		btrfs_tree_read_unlock(eb_root);
1448 		free_extent_buffer(eb_root);
1449 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1450 	} else {
1451 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1452 		eb = btrfs_clone_extent_buffer(eb_root);
1453 		btrfs_tree_read_unlock_blocking(eb_root);
1454 		free_extent_buffer(eb_root);
1455 	}
1456 
1457 	if (!eb)
1458 		return NULL;
1459 	extent_buffer_get(eb);
1460 	btrfs_tree_read_lock(eb);
1461 	if (old_root) {
1462 		btrfs_set_header_bytenr(eb, eb->start);
1463 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1464 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1465 		btrfs_set_header_level(eb, old_root->level);
1466 		btrfs_set_header_generation(eb, old_generation);
1467 	}
1468 	if (tm)
1469 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1470 	else
1471 		WARN_ON(btrfs_header_level(eb) != 0);
1472 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1473 
1474 	return eb;
1475 }
1476 
1477 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1478 {
1479 	struct tree_mod_elem *tm;
1480 	int level;
1481 	struct extent_buffer *eb_root = btrfs_root_node(root);
1482 
1483 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1484 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1485 		level = tm->old_root.level;
1486 	} else {
1487 		level = btrfs_header_level(eb_root);
1488 	}
1489 	free_extent_buffer(eb_root);
1490 
1491 	return level;
1492 }
1493 
1494 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1495 				   struct btrfs_root *root,
1496 				   struct extent_buffer *buf)
1497 {
1498 	if (btrfs_is_testing(root->fs_info))
1499 		return 0;
1500 
1501 	/* ensure we can see the force_cow */
1502 	smp_rmb();
1503 
1504 	/*
1505 	 * We do not need to cow a block if
1506 	 * 1) this block is not created or changed in this transaction;
1507 	 * 2) this block does not belong to TREE_RELOC tree;
1508 	 * 3) the root is not forced COW.
1509 	 *
1510 	 * What is forced COW:
1511 	 *    when we create snapshot during committing the transaction,
1512 	 *    after we've finished coping src root, we must COW the shared
1513 	 *    block to ensure the metadata consistency.
1514 	 */
1515 	if (btrfs_header_generation(buf) == trans->transid &&
1516 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1517 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1518 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1519 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1520 		return 0;
1521 	return 1;
1522 }
1523 
1524 /*
1525  * cows a single block, see __btrfs_cow_block for the real work.
1526  * This version of it has extra checks so that a block isn't COWed more than
1527  * once per transaction, as long as it hasn't been written yet
1528  */
1529 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1530 		    struct btrfs_root *root, struct extent_buffer *buf,
1531 		    struct extent_buffer *parent, int parent_slot,
1532 		    struct extent_buffer **cow_ret)
1533 {
1534 	struct btrfs_fs_info *fs_info = root->fs_info;
1535 	u64 search_start;
1536 	int ret;
1537 
1538 	if (trans->transaction != fs_info->running_transaction)
1539 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1540 		       trans->transid,
1541 		       fs_info->running_transaction->transid);
1542 
1543 	if (trans->transid != fs_info->generation)
1544 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1545 		       trans->transid, fs_info->generation);
1546 
1547 	if (!should_cow_block(trans, root, buf)) {
1548 		trans->dirty = true;
1549 		*cow_ret = buf;
1550 		return 0;
1551 	}
1552 
1553 	search_start = buf->start & ~((u64)SZ_1G - 1);
1554 
1555 	if (parent)
1556 		btrfs_set_lock_blocking(parent);
1557 	btrfs_set_lock_blocking(buf);
1558 
1559 	ret = __btrfs_cow_block(trans, root, buf, parent,
1560 				 parent_slot, cow_ret, search_start, 0);
1561 
1562 	trace_btrfs_cow_block(root, buf, *cow_ret);
1563 
1564 	return ret;
1565 }
1566 
1567 /*
1568  * helper function for defrag to decide if two blocks pointed to by a
1569  * node are actually close by
1570  */
1571 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1572 {
1573 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1574 		return 1;
1575 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1576 		return 1;
1577 	return 0;
1578 }
1579 
1580 /*
1581  * compare two keys in a memcmp fashion
1582  */
1583 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1584 {
1585 	struct btrfs_key k1;
1586 
1587 	btrfs_disk_key_to_cpu(&k1, disk);
1588 
1589 	return btrfs_comp_cpu_keys(&k1, k2);
1590 }
1591 
1592 /*
1593  * same as comp_keys only with two btrfs_key's
1594  */
1595 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1596 {
1597 	if (k1->objectid > k2->objectid)
1598 		return 1;
1599 	if (k1->objectid < k2->objectid)
1600 		return -1;
1601 	if (k1->type > k2->type)
1602 		return 1;
1603 	if (k1->type < k2->type)
1604 		return -1;
1605 	if (k1->offset > k2->offset)
1606 		return 1;
1607 	if (k1->offset < k2->offset)
1608 		return -1;
1609 	return 0;
1610 }
1611 
1612 /*
1613  * this is used by the defrag code to go through all the
1614  * leaves pointed to by a node and reallocate them so that
1615  * disk order is close to key order
1616  */
1617 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1618 		       struct btrfs_root *root, struct extent_buffer *parent,
1619 		       int start_slot, u64 *last_ret,
1620 		       struct btrfs_key *progress)
1621 {
1622 	struct btrfs_fs_info *fs_info = root->fs_info;
1623 	struct extent_buffer *cur;
1624 	u64 blocknr;
1625 	u64 gen;
1626 	u64 search_start = *last_ret;
1627 	u64 last_block = 0;
1628 	u64 other;
1629 	u32 parent_nritems;
1630 	int end_slot;
1631 	int i;
1632 	int err = 0;
1633 	int parent_level;
1634 	int uptodate;
1635 	u32 blocksize;
1636 	int progress_passed = 0;
1637 	struct btrfs_disk_key disk_key;
1638 
1639 	parent_level = btrfs_header_level(parent);
1640 
1641 	WARN_ON(trans->transaction != fs_info->running_transaction);
1642 	WARN_ON(trans->transid != fs_info->generation);
1643 
1644 	parent_nritems = btrfs_header_nritems(parent);
1645 	blocksize = fs_info->nodesize;
1646 	end_slot = parent_nritems - 1;
1647 
1648 	if (parent_nritems <= 1)
1649 		return 0;
1650 
1651 	btrfs_set_lock_blocking(parent);
1652 
1653 	for (i = start_slot; i <= end_slot; i++) {
1654 		int close = 1;
1655 
1656 		btrfs_node_key(parent, &disk_key, i);
1657 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1658 			continue;
1659 
1660 		progress_passed = 1;
1661 		blocknr = btrfs_node_blockptr(parent, i);
1662 		gen = btrfs_node_ptr_generation(parent, i);
1663 		if (last_block == 0)
1664 			last_block = blocknr;
1665 
1666 		if (i > 0) {
1667 			other = btrfs_node_blockptr(parent, i - 1);
1668 			close = close_blocks(blocknr, other, blocksize);
1669 		}
1670 		if (!close && i < end_slot) {
1671 			other = btrfs_node_blockptr(parent, i + 1);
1672 			close = close_blocks(blocknr, other, blocksize);
1673 		}
1674 		if (close) {
1675 			last_block = blocknr;
1676 			continue;
1677 		}
1678 
1679 		cur = find_extent_buffer(fs_info, blocknr);
1680 		if (cur)
1681 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1682 		else
1683 			uptodate = 0;
1684 		if (!cur || !uptodate) {
1685 			if (!cur) {
1686 				cur = read_tree_block(fs_info, blocknr, gen);
1687 				if (IS_ERR(cur)) {
1688 					return PTR_ERR(cur);
1689 				} else if (!extent_buffer_uptodate(cur)) {
1690 					free_extent_buffer(cur);
1691 					return -EIO;
1692 				}
1693 			} else if (!uptodate) {
1694 				err = btrfs_read_buffer(cur, gen);
1695 				if (err) {
1696 					free_extent_buffer(cur);
1697 					return err;
1698 				}
1699 			}
1700 		}
1701 		if (search_start == 0)
1702 			search_start = last_block;
1703 
1704 		btrfs_tree_lock(cur);
1705 		btrfs_set_lock_blocking(cur);
1706 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1707 					&cur, search_start,
1708 					min(16 * blocksize,
1709 					    (end_slot - i) * blocksize));
1710 		if (err) {
1711 			btrfs_tree_unlock(cur);
1712 			free_extent_buffer(cur);
1713 			break;
1714 		}
1715 		search_start = cur->start;
1716 		last_block = cur->start;
1717 		*last_ret = search_start;
1718 		btrfs_tree_unlock(cur);
1719 		free_extent_buffer(cur);
1720 	}
1721 	return err;
1722 }
1723 
1724 /*
1725  * search for key in the extent_buffer.  The items start at offset p,
1726  * and they are item_size apart.  There are 'max' items in p.
1727  *
1728  * the slot in the array is returned via slot, and it points to
1729  * the place where you would insert key if it is not found in
1730  * the array.
1731  *
1732  * slot may point to max if the key is bigger than all of the keys
1733  */
1734 static noinline int generic_bin_search(struct extent_buffer *eb,
1735 				       unsigned long p,
1736 				       int item_size, struct btrfs_key *key,
1737 				       int max, int *slot)
1738 {
1739 	int low = 0;
1740 	int high = max;
1741 	int mid;
1742 	int ret;
1743 	struct btrfs_disk_key *tmp = NULL;
1744 	struct btrfs_disk_key unaligned;
1745 	unsigned long offset;
1746 	char *kaddr = NULL;
1747 	unsigned long map_start = 0;
1748 	unsigned long map_len = 0;
1749 	int err;
1750 
1751 	if (low > high) {
1752 		btrfs_err(eb->fs_info,
1753 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1754 			  __func__, low, high, eb->start,
1755 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1756 		return -EINVAL;
1757 	}
1758 
1759 	while (low < high) {
1760 		mid = (low + high) / 2;
1761 		offset = p + mid * item_size;
1762 
1763 		if (!kaddr || offset < map_start ||
1764 		    (offset + sizeof(struct btrfs_disk_key)) >
1765 		    map_start + map_len) {
1766 
1767 			err = map_private_extent_buffer(eb, offset,
1768 						sizeof(struct btrfs_disk_key),
1769 						&kaddr, &map_start, &map_len);
1770 
1771 			if (!err) {
1772 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1773 							map_start);
1774 			} else if (err == 1) {
1775 				read_extent_buffer(eb, &unaligned,
1776 						   offset, sizeof(unaligned));
1777 				tmp = &unaligned;
1778 			} else {
1779 				return err;
1780 			}
1781 
1782 		} else {
1783 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1784 							map_start);
1785 		}
1786 		ret = comp_keys(tmp, key);
1787 
1788 		if (ret < 0)
1789 			low = mid + 1;
1790 		else if (ret > 0)
1791 			high = mid;
1792 		else {
1793 			*slot = mid;
1794 			return 0;
1795 		}
1796 	}
1797 	*slot = low;
1798 	return 1;
1799 }
1800 
1801 /*
1802  * simple bin_search frontend that does the right thing for
1803  * leaves vs nodes
1804  */
1805 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1806 		      int level, int *slot)
1807 {
1808 	if (level == 0)
1809 		return generic_bin_search(eb,
1810 					  offsetof(struct btrfs_leaf, items),
1811 					  sizeof(struct btrfs_item),
1812 					  key, btrfs_header_nritems(eb),
1813 					  slot);
1814 	else
1815 		return generic_bin_search(eb,
1816 					  offsetof(struct btrfs_node, ptrs),
1817 					  sizeof(struct btrfs_key_ptr),
1818 					  key, btrfs_header_nritems(eb),
1819 					  slot);
1820 }
1821 
1822 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1823 		     int level, int *slot)
1824 {
1825 	return bin_search(eb, key, level, slot);
1826 }
1827 
1828 static void root_add_used(struct btrfs_root *root, u32 size)
1829 {
1830 	spin_lock(&root->accounting_lock);
1831 	btrfs_set_root_used(&root->root_item,
1832 			    btrfs_root_used(&root->root_item) + size);
1833 	spin_unlock(&root->accounting_lock);
1834 }
1835 
1836 static void root_sub_used(struct btrfs_root *root, u32 size)
1837 {
1838 	spin_lock(&root->accounting_lock);
1839 	btrfs_set_root_used(&root->root_item,
1840 			    btrfs_root_used(&root->root_item) - size);
1841 	spin_unlock(&root->accounting_lock);
1842 }
1843 
1844 /* given a node and slot number, this reads the blocks it points to.  The
1845  * extent buffer is returned with a reference taken (but unlocked).
1846  */
1847 static noinline struct extent_buffer *
1848 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1849 	       int slot)
1850 {
1851 	int level = btrfs_header_level(parent);
1852 	struct extent_buffer *eb;
1853 
1854 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1855 		return ERR_PTR(-ENOENT);
1856 
1857 	BUG_ON(level == 0);
1858 
1859 	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1860 			     btrfs_node_ptr_generation(parent, slot));
1861 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1862 		free_extent_buffer(eb);
1863 		eb = ERR_PTR(-EIO);
1864 	}
1865 
1866 	return eb;
1867 }
1868 
1869 /*
1870  * node level balancing, used to make sure nodes are in proper order for
1871  * item deletion.  We balance from the top down, so we have to make sure
1872  * that a deletion won't leave an node completely empty later on.
1873  */
1874 static noinline int balance_level(struct btrfs_trans_handle *trans,
1875 			 struct btrfs_root *root,
1876 			 struct btrfs_path *path, int level)
1877 {
1878 	struct btrfs_fs_info *fs_info = root->fs_info;
1879 	struct extent_buffer *right = NULL;
1880 	struct extent_buffer *mid;
1881 	struct extent_buffer *left = NULL;
1882 	struct extent_buffer *parent = NULL;
1883 	int ret = 0;
1884 	int wret;
1885 	int pslot;
1886 	int orig_slot = path->slots[level];
1887 	u64 orig_ptr;
1888 
1889 	if (level == 0)
1890 		return 0;
1891 
1892 	mid = path->nodes[level];
1893 
1894 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1895 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1896 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1897 
1898 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1899 
1900 	if (level < BTRFS_MAX_LEVEL - 1) {
1901 		parent = path->nodes[level + 1];
1902 		pslot = path->slots[level + 1];
1903 	}
1904 
1905 	/*
1906 	 * deal with the case where there is only one pointer in the root
1907 	 * by promoting the node below to a root
1908 	 */
1909 	if (!parent) {
1910 		struct extent_buffer *child;
1911 
1912 		if (btrfs_header_nritems(mid) != 1)
1913 			return 0;
1914 
1915 		/* promote the child to a root */
1916 		child = read_node_slot(fs_info, mid, 0);
1917 		if (IS_ERR(child)) {
1918 			ret = PTR_ERR(child);
1919 			btrfs_handle_fs_error(fs_info, ret, NULL);
1920 			goto enospc;
1921 		}
1922 
1923 		btrfs_tree_lock(child);
1924 		btrfs_set_lock_blocking(child);
1925 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1926 		if (ret) {
1927 			btrfs_tree_unlock(child);
1928 			free_extent_buffer(child);
1929 			goto enospc;
1930 		}
1931 
1932 		tree_mod_log_set_root_pointer(root, child, 1);
1933 		rcu_assign_pointer(root->node, child);
1934 
1935 		add_root_to_dirty_list(root);
1936 		btrfs_tree_unlock(child);
1937 
1938 		path->locks[level] = 0;
1939 		path->nodes[level] = NULL;
1940 		clean_tree_block(trans, fs_info, mid);
1941 		btrfs_tree_unlock(mid);
1942 		/* once for the path */
1943 		free_extent_buffer(mid);
1944 
1945 		root_sub_used(root, mid->len);
1946 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1947 		/* once for the root ptr */
1948 		free_extent_buffer_stale(mid);
1949 		return 0;
1950 	}
1951 	if (btrfs_header_nritems(mid) >
1952 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1953 		return 0;
1954 
1955 	left = read_node_slot(fs_info, parent, pslot - 1);
1956 	if (IS_ERR(left))
1957 		left = NULL;
1958 
1959 	if (left) {
1960 		btrfs_tree_lock(left);
1961 		btrfs_set_lock_blocking(left);
1962 		wret = btrfs_cow_block(trans, root, left,
1963 				       parent, pslot - 1, &left);
1964 		if (wret) {
1965 			ret = wret;
1966 			goto enospc;
1967 		}
1968 	}
1969 
1970 	right = read_node_slot(fs_info, parent, pslot + 1);
1971 	if (IS_ERR(right))
1972 		right = NULL;
1973 
1974 	if (right) {
1975 		btrfs_tree_lock(right);
1976 		btrfs_set_lock_blocking(right);
1977 		wret = btrfs_cow_block(trans, root, right,
1978 				       parent, pslot + 1, &right);
1979 		if (wret) {
1980 			ret = wret;
1981 			goto enospc;
1982 		}
1983 	}
1984 
1985 	/* first, try to make some room in the middle buffer */
1986 	if (left) {
1987 		orig_slot += btrfs_header_nritems(left);
1988 		wret = push_node_left(trans, fs_info, left, mid, 1);
1989 		if (wret < 0)
1990 			ret = wret;
1991 	}
1992 
1993 	/*
1994 	 * then try to empty the right most buffer into the middle
1995 	 */
1996 	if (right) {
1997 		wret = push_node_left(trans, fs_info, mid, right, 1);
1998 		if (wret < 0 && wret != -ENOSPC)
1999 			ret = wret;
2000 		if (btrfs_header_nritems(right) == 0) {
2001 			clean_tree_block(trans, fs_info, right);
2002 			btrfs_tree_unlock(right);
2003 			del_ptr(root, path, level + 1, pslot + 1);
2004 			root_sub_used(root, right->len);
2005 			btrfs_free_tree_block(trans, root, right, 0, 1);
2006 			free_extent_buffer_stale(right);
2007 			right = NULL;
2008 		} else {
2009 			struct btrfs_disk_key right_key;
2010 			btrfs_node_key(right, &right_key, 0);
2011 			tree_mod_log_set_node_key(fs_info, parent,
2012 						  pslot + 1, 0);
2013 			btrfs_set_node_key(parent, &right_key, pslot + 1);
2014 			btrfs_mark_buffer_dirty(parent);
2015 		}
2016 	}
2017 	if (btrfs_header_nritems(mid) == 1) {
2018 		/*
2019 		 * we're not allowed to leave a node with one item in the
2020 		 * tree during a delete.  A deletion from lower in the tree
2021 		 * could try to delete the only pointer in this node.
2022 		 * So, pull some keys from the left.
2023 		 * There has to be a left pointer at this point because
2024 		 * otherwise we would have pulled some pointers from the
2025 		 * right
2026 		 */
2027 		if (!left) {
2028 			ret = -EROFS;
2029 			btrfs_handle_fs_error(fs_info, ret, NULL);
2030 			goto enospc;
2031 		}
2032 		wret = balance_node_right(trans, fs_info, mid, left);
2033 		if (wret < 0) {
2034 			ret = wret;
2035 			goto enospc;
2036 		}
2037 		if (wret == 1) {
2038 			wret = push_node_left(trans, fs_info, left, mid, 1);
2039 			if (wret < 0)
2040 				ret = wret;
2041 		}
2042 		BUG_ON(wret == 1);
2043 	}
2044 	if (btrfs_header_nritems(mid) == 0) {
2045 		clean_tree_block(trans, fs_info, mid);
2046 		btrfs_tree_unlock(mid);
2047 		del_ptr(root, path, level + 1, pslot);
2048 		root_sub_used(root, mid->len);
2049 		btrfs_free_tree_block(trans, root, mid, 0, 1);
2050 		free_extent_buffer_stale(mid);
2051 		mid = NULL;
2052 	} else {
2053 		/* update the parent key to reflect our changes */
2054 		struct btrfs_disk_key mid_key;
2055 		btrfs_node_key(mid, &mid_key, 0);
2056 		tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
2057 		btrfs_set_node_key(parent, &mid_key, pslot);
2058 		btrfs_mark_buffer_dirty(parent);
2059 	}
2060 
2061 	/* update the path */
2062 	if (left) {
2063 		if (btrfs_header_nritems(left) > orig_slot) {
2064 			extent_buffer_get(left);
2065 			/* left was locked after cow */
2066 			path->nodes[level] = left;
2067 			path->slots[level + 1] -= 1;
2068 			path->slots[level] = orig_slot;
2069 			if (mid) {
2070 				btrfs_tree_unlock(mid);
2071 				free_extent_buffer(mid);
2072 			}
2073 		} else {
2074 			orig_slot -= btrfs_header_nritems(left);
2075 			path->slots[level] = orig_slot;
2076 		}
2077 	}
2078 	/* double check we haven't messed things up */
2079 	if (orig_ptr !=
2080 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2081 		BUG();
2082 enospc:
2083 	if (right) {
2084 		btrfs_tree_unlock(right);
2085 		free_extent_buffer(right);
2086 	}
2087 	if (left) {
2088 		if (path->nodes[level] != left)
2089 			btrfs_tree_unlock(left);
2090 		free_extent_buffer(left);
2091 	}
2092 	return ret;
2093 }
2094 
2095 /* Node balancing for insertion.  Here we only split or push nodes around
2096  * when they are completely full.  This is also done top down, so we
2097  * have to be pessimistic.
2098  */
2099 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2100 					  struct btrfs_root *root,
2101 					  struct btrfs_path *path, int level)
2102 {
2103 	struct btrfs_fs_info *fs_info = root->fs_info;
2104 	struct extent_buffer *right = NULL;
2105 	struct extent_buffer *mid;
2106 	struct extent_buffer *left = NULL;
2107 	struct extent_buffer *parent = NULL;
2108 	int ret = 0;
2109 	int wret;
2110 	int pslot;
2111 	int orig_slot = path->slots[level];
2112 
2113 	if (level == 0)
2114 		return 1;
2115 
2116 	mid = path->nodes[level];
2117 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2118 
2119 	if (level < BTRFS_MAX_LEVEL - 1) {
2120 		parent = path->nodes[level + 1];
2121 		pslot = path->slots[level + 1];
2122 	}
2123 
2124 	if (!parent)
2125 		return 1;
2126 
2127 	left = read_node_slot(fs_info, parent, pslot - 1);
2128 	if (IS_ERR(left))
2129 		left = NULL;
2130 
2131 	/* first, try to make some room in the middle buffer */
2132 	if (left) {
2133 		u32 left_nr;
2134 
2135 		btrfs_tree_lock(left);
2136 		btrfs_set_lock_blocking(left);
2137 
2138 		left_nr = btrfs_header_nritems(left);
2139 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2140 			wret = 1;
2141 		} else {
2142 			ret = btrfs_cow_block(trans, root, left, parent,
2143 					      pslot - 1, &left);
2144 			if (ret)
2145 				wret = 1;
2146 			else {
2147 				wret = push_node_left(trans, fs_info,
2148 						      left, mid, 0);
2149 			}
2150 		}
2151 		if (wret < 0)
2152 			ret = wret;
2153 		if (wret == 0) {
2154 			struct btrfs_disk_key disk_key;
2155 			orig_slot += left_nr;
2156 			btrfs_node_key(mid, &disk_key, 0);
2157 			tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
2158 			btrfs_set_node_key(parent, &disk_key, pslot);
2159 			btrfs_mark_buffer_dirty(parent);
2160 			if (btrfs_header_nritems(left) > orig_slot) {
2161 				path->nodes[level] = left;
2162 				path->slots[level + 1] -= 1;
2163 				path->slots[level] = orig_slot;
2164 				btrfs_tree_unlock(mid);
2165 				free_extent_buffer(mid);
2166 			} else {
2167 				orig_slot -=
2168 					btrfs_header_nritems(left);
2169 				path->slots[level] = orig_slot;
2170 				btrfs_tree_unlock(left);
2171 				free_extent_buffer(left);
2172 			}
2173 			return 0;
2174 		}
2175 		btrfs_tree_unlock(left);
2176 		free_extent_buffer(left);
2177 	}
2178 	right = read_node_slot(fs_info, parent, pslot + 1);
2179 	if (IS_ERR(right))
2180 		right = NULL;
2181 
2182 	/*
2183 	 * then try to empty the right most buffer into the middle
2184 	 */
2185 	if (right) {
2186 		u32 right_nr;
2187 
2188 		btrfs_tree_lock(right);
2189 		btrfs_set_lock_blocking(right);
2190 
2191 		right_nr = btrfs_header_nritems(right);
2192 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2193 			wret = 1;
2194 		} else {
2195 			ret = btrfs_cow_block(trans, root, right,
2196 					      parent, pslot + 1,
2197 					      &right);
2198 			if (ret)
2199 				wret = 1;
2200 			else {
2201 				wret = balance_node_right(trans, fs_info,
2202 							  right, mid);
2203 			}
2204 		}
2205 		if (wret < 0)
2206 			ret = wret;
2207 		if (wret == 0) {
2208 			struct btrfs_disk_key disk_key;
2209 
2210 			btrfs_node_key(right, &disk_key, 0);
2211 			tree_mod_log_set_node_key(fs_info, parent,
2212 						  pslot + 1, 0);
2213 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2214 			btrfs_mark_buffer_dirty(parent);
2215 
2216 			if (btrfs_header_nritems(mid) <= orig_slot) {
2217 				path->nodes[level] = right;
2218 				path->slots[level + 1] += 1;
2219 				path->slots[level] = orig_slot -
2220 					btrfs_header_nritems(mid);
2221 				btrfs_tree_unlock(mid);
2222 				free_extent_buffer(mid);
2223 			} else {
2224 				btrfs_tree_unlock(right);
2225 				free_extent_buffer(right);
2226 			}
2227 			return 0;
2228 		}
2229 		btrfs_tree_unlock(right);
2230 		free_extent_buffer(right);
2231 	}
2232 	return 1;
2233 }
2234 
2235 /*
2236  * readahead one full node of leaves, finding things that are close
2237  * to the block in 'slot', and triggering ra on them.
2238  */
2239 static void reada_for_search(struct btrfs_fs_info *fs_info,
2240 			     struct btrfs_path *path,
2241 			     int level, int slot, u64 objectid)
2242 {
2243 	struct extent_buffer *node;
2244 	struct btrfs_disk_key disk_key;
2245 	u32 nritems;
2246 	u64 search;
2247 	u64 target;
2248 	u64 nread = 0;
2249 	struct extent_buffer *eb;
2250 	u32 nr;
2251 	u32 blocksize;
2252 	u32 nscan = 0;
2253 
2254 	if (level != 1)
2255 		return;
2256 
2257 	if (!path->nodes[level])
2258 		return;
2259 
2260 	node = path->nodes[level];
2261 
2262 	search = btrfs_node_blockptr(node, slot);
2263 	blocksize = fs_info->nodesize;
2264 	eb = find_extent_buffer(fs_info, search);
2265 	if (eb) {
2266 		free_extent_buffer(eb);
2267 		return;
2268 	}
2269 
2270 	target = search;
2271 
2272 	nritems = btrfs_header_nritems(node);
2273 	nr = slot;
2274 
2275 	while (1) {
2276 		if (path->reada == READA_BACK) {
2277 			if (nr == 0)
2278 				break;
2279 			nr--;
2280 		} else if (path->reada == READA_FORWARD) {
2281 			nr++;
2282 			if (nr >= nritems)
2283 				break;
2284 		}
2285 		if (path->reada == READA_BACK && objectid) {
2286 			btrfs_node_key(node, &disk_key, nr);
2287 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2288 				break;
2289 		}
2290 		search = btrfs_node_blockptr(node, nr);
2291 		if ((search <= target && target - search <= 65536) ||
2292 		    (search > target && search - target <= 65536)) {
2293 			readahead_tree_block(fs_info, search);
2294 			nread += blocksize;
2295 		}
2296 		nscan++;
2297 		if ((nread > 65536 || nscan > 32))
2298 			break;
2299 	}
2300 }
2301 
2302 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2303 				       struct btrfs_path *path, int level)
2304 {
2305 	int slot;
2306 	int nritems;
2307 	struct extent_buffer *parent;
2308 	struct extent_buffer *eb;
2309 	u64 gen;
2310 	u64 block1 = 0;
2311 	u64 block2 = 0;
2312 
2313 	parent = path->nodes[level + 1];
2314 	if (!parent)
2315 		return;
2316 
2317 	nritems = btrfs_header_nritems(parent);
2318 	slot = path->slots[level + 1];
2319 
2320 	if (slot > 0) {
2321 		block1 = btrfs_node_blockptr(parent, slot - 1);
2322 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2323 		eb = find_extent_buffer(fs_info, block1);
2324 		/*
2325 		 * if we get -eagain from btrfs_buffer_uptodate, we
2326 		 * don't want to return eagain here.  That will loop
2327 		 * forever
2328 		 */
2329 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2330 			block1 = 0;
2331 		free_extent_buffer(eb);
2332 	}
2333 	if (slot + 1 < nritems) {
2334 		block2 = btrfs_node_blockptr(parent, slot + 1);
2335 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2336 		eb = find_extent_buffer(fs_info, block2);
2337 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2338 			block2 = 0;
2339 		free_extent_buffer(eb);
2340 	}
2341 
2342 	if (block1)
2343 		readahead_tree_block(fs_info, block1);
2344 	if (block2)
2345 		readahead_tree_block(fs_info, block2);
2346 }
2347 
2348 
2349 /*
2350  * when we walk down the tree, it is usually safe to unlock the higher layers
2351  * in the tree.  The exceptions are when our path goes through slot 0, because
2352  * operations on the tree might require changing key pointers higher up in the
2353  * tree.
2354  *
2355  * callers might also have set path->keep_locks, which tells this code to keep
2356  * the lock if the path points to the last slot in the block.  This is part of
2357  * walking through the tree, and selecting the next slot in the higher block.
2358  *
2359  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2360  * if lowest_unlock is 1, level 0 won't be unlocked
2361  */
2362 static noinline void unlock_up(struct btrfs_path *path, int level,
2363 			       int lowest_unlock, int min_write_lock_level,
2364 			       int *write_lock_level)
2365 {
2366 	int i;
2367 	int skip_level = level;
2368 	int no_skips = 0;
2369 	struct extent_buffer *t;
2370 
2371 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2372 		if (!path->nodes[i])
2373 			break;
2374 		if (!path->locks[i])
2375 			break;
2376 		if (!no_skips && path->slots[i] == 0) {
2377 			skip_level = i + 1;
2378 			continue;
2379 		}
2380 		if (!no_skips && path->keep_locks) {
2381 			u32 nritems;
2382 			t = path->nodes[i];
2383 			nritems = btrfs_header_nritems(t);
2384 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2385 				skip_level = i + 1;
2386 				continue;
2387 			}
2388 		}
2389 		if (skip_level < i && i >= lowest_unlock)
2390 			no_skips = 1;
2391 
2392 		t = path->nodes[i];
2393 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2394 			btrfs_tree_unlock_rw(t, path->locks[i]);
2395 			path->locks[i] = 0;
2396 			if (write_lock_level &&
2397 			    i > min_write_lock_level &&
2398 			    i <= *write_lock_level) {
2399 				*write_lock_level = i - 1;
2400 			}
2401 		}
2402 	}
2403 }
2404 
2405 /*
2406  * This releases any locks held in the path starting at level and
2407  * going all the way up to the root.
2408  *
2409  * btrfs_search_slot will keep the lock held on higher nodes in a few
2410  * corner cases, such as COW of the block at slot zero in the node.  This
2411  * ignores those rules, and it should only be called when there are no
2412  * more updates to be done higher up in the tree.
2413  */
2414 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2415 {
2416 	int i;
2417 
2418 	if (path->keep_locks)
2419 		return;
2420 
2421 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2422 		if (!path->nodes[i])
2423 			continue;
2424 		if (!path->locks[i])
2425 			continue;
2426 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2427 		path->locks[i] = 0;
2428 	}
2429 }
2430 
2431 /*
2432  * helper function for btrfs_search_slot.  The goal is to find a block
2433  * in cache without setting the path to blocking.  If we find the block
2434  * we return zero and the path is unchanged.
2435  *
2436  * If we can't find the block, we set the path blocking and do some
2437  * reada.  -EAGAIN is returned and the search must be repeated.
2438  */
2439 static int
2440 read_block_for_search(struct btrfs_trans_handle *trans,
2441 		       struct btrfs_root *root, struct btrfs_path *p,
2442 		       struct extent_buffer **eb_ret, int level, int slot,
2443 		       struct btrfs_key *key, u64 time_seq)
2444 {
2445 	struct btrfs_fs_info *fs_info = root->fs_info;
2446 	u64 blocknr;
2447 	u64 gen;
2448 	struct extent_buffer *b = *eb_ret;
2449 	struct extent_buffer *tmp;
2450 	int ret;
2451 
2452 	blocknr = btrfs_node_blockptr(b, slot);
2453 	gen = btrfs_node_ptr_generation(b, slot);
2454 
2455 	tmp = find_extent_buffer(fs_info, blocknr);
2456 	if (tmp) {
2457 		/* first we do an atomic uptodate check */
2458 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2459 			*eb_ret = tmp;
2460 			return 0;
2461 		}
2462 
2463 		/* the pages were up to date, but we failed
2464 		 * the generation number check.  Do a full
2465 		 * read for the generation number that is correct.
2466 		 * We must do this without dropping locks so
2467 		 * we can trust our generation number
2468 		 */
2469 		btrfs_set_path_blocking(p);
2470 
2471 		/* now we're allowed to do a blocking uptodate check */
2472 		ret = btrfs_read_buffer(tmp, gen);
2473 		if (!ret) {
2474 			*eb_ret = tmp;
2475 			return 0;
2476 		}
2477 		free_extent_buffer(tmp);
2478 		btrfs_release_path(p);
2479 		return -EIO;
2480 	}
2481 
2482 	/*
2483 	 * reduce lock contention at high levels
2484 	 * of the btree by dropping locks before
2485 	 * we read.  Don't release the lock on the current
2486 	 * level because we need to walk this node to figure
2487 	 * out which blocks to read.
2488 	 */
2489 	btrfs_unlock_up_safe(p, level + 1);
2490 	btrfs_set_path_blocking(p);
2491 
2492 	free_extent_buffer(tmp);
2493 	if (p->reada != READA_NONE)
2494 		reada_for_search(fs_info, p, level, slot, key->objectid);
2495 
2496 	btrfs_release_path(p);
2497 
2498 	ret = -EAGAIN;
2499 	tmp = read_tree_block(fs_info, blocknr, 0);
2500 	if (!IS_ERR(tmp)) {
2501 		/*
2502 		 * If the read above didn't mark this buffer up to date,
2503 		 * it will never end up being up to date.  Set ret to EIO now
2504 		 * and give up so that our caller doesn't loop forever
2505 		 * on our EAGAINs.
2506 		 */
2507 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2508 			ret = -EIO;
2509 		free_extent_buffer(tmp);
2510 	} else {
2511 		ret = PTR_ERR(tmp);
2512 	}
2513 	return ret;
2514 }
2515 
2516 /*
2517  * helper function for btrfs_search_slot.  This does all of the checks
2518  * for node-level blocks and does any balancing required based on
2519  * the ins_len.
2520  *
2521  * If no extra work was required, zero is returned.  If we had to
2522  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2523  * start over
2524  */
2525 static int
2526 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2527 		       struct btrfs_root *root, struct btrfs_path *p,
2528 		       struct extent_buffer *b, int level, int ins_len,
2529 		       int *write_lock_level)
2530 {
2531 	struct btrfs_fs_info *fs_info = root->fs_info;
2532 	int ret;
2533 
2534 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2535 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2536 		int sret;
2537 
2538 		if (*write_lock_level < level + 1) {
2539 			*write_lock_level = level + 1;
2540 			btrfs_release_path(p);
2541 			goto again;
2542 		}
2543 
2544 		btrfs_set_path_blocking(p);
2545 		reada_for_balance(fs_info, p, level);
2546 		sret = split_node(trans, root, p, level);
2547 		btrfs_clear_path_blocking(p, NULL, 0);
2548 
2549 		BUG_ON(sret > 0);
2550 		if (sret) {
2551 			ret = sret;
2552 			goto done;
2553 		}
2554 		b = p->nodes[level];
2555 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2556 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2557 		int sret;
2558 
2559 		if (*write_lock_level < level + 1) {
2560 			*write_lock_level = level + 1;
2561 			btrfs_release_path(p);
2562 			goto again;
2563 		}
2564 
2565 		btrfs_set_path_blocking(p);
2566 		reada_for_balance(fs_info, p, level);
2567 		sret = balance_level(trans, root, p, level);
2568 		btrfs_clear_path_blocking(p, NULL, 0);
2569 
2570 		if (sret) {
2571 			ret = sret;
2572 			goto done;
2573 		}
2574 		b = p->nodes[level];
2575 		if (!b) {
2576 			btrfs_release_path(p);
2577 			goto again;
2578 		}
2579 		BUG_ON(btrfs_header_nritems(b) == 1);
2580 	}
2581 	return 0;
2582 
2583 again:
2584 	ret = -EAGAIN;
2585 done:
2586 	return ret;
2587 }
2588 
2589 static void key_search_validate(struct extent_buffer *b,
2590 				struct btrfs_key *key,
2591 				int level)
2592 {
2593 #ifdef CONFIG_BTRFS_ASSERT
2594 	struct btrfs_disk_key disk_key;
2595 
2596 	btrfs_cpu_key_to_disk(&disk_key, key);
2597 
2598 	if (level == 0)
2599 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2600 		    offsetof(struct btrfs_leaf, items[0].key),
2601 		    sizeof(disk_key)));
2602 	else
2603 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2604 		    offsetof(struct btrfs_node, ptrs[0].key),
2605 		    sizeof(disk_key)));
2606 #endif
2607 }
2608 
2609 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2610 		      int level, int *prev_cmp, int *slot)
2611 {
2612 	if (*prev_cmp != 0) {
2613 		*prev_cmp = bin_search(b, key, level, slot);
2614 		return *prev_cmp;
2615 	}
2616 
2617 	key_search_validate(b, key, level);
2618 	*slot = 0;
2619 
2620 	return 0;
2621 }
2622 
2623 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2624 		u64 iobjectid, u64 ioff, u8 key_type,
2625 		struct btrfs_key *found_key)
2626 {
2627 	int ret;
2628 	struct btrfs_key key;
2629 	struct extent_buffer *eb;
2630 
2631 	ASSERT(path);
2632 	ASSERT(found_key);
2633 
2634 	key.type = key_type;
2635 	key.objectid = iobjectid;
2636 	key.offset = ioff;
2637 
2638 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2639 	if (ret < 0)
2640 		return ret;
2641 
2642 	eb = path->nodes[0];
2643 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2644 		ret = btrfs_next_leaf(fs_root, path);
2645 		if (ret)
2646 			return ret;
2647 		eb = path->nodes[0];
2648 	}
2649 
2650 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2651 	if (found_key->type != key.type ||
2652 			found_key->objectid != key.objectid)
2653 		return 1;
2654 
2655 	return 0;
2656 }
2657 
2658 /*
2659  * look for key in the tree.  path is filled in with nodes along the way
2660  * if key is found, we return zero and you can find the item in the leaf
2661  * level of the path (level 0)
2662  *
2663  * If the key isn't found, the path points to the slot where it should
2664  * be inserted, and 1 is returned.  If there are other errors during the
2665  * search a negative error number is returned.
2666  *
2667  * if ins_len > 0, nodes and leaves will be split as we walk down the
2668  * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
2669  * possible)
2670  */
2671 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2672 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
2673 		      ins_len, int cow)
2674 {
2675 	struct btrfs_fs_info *fs_info = root->fs_info;
2676 	struct extent_buffer *b;
2677 	int slot;
2678 	int ret;
2679 	int err;
2680 	int level;
2681 	int lowest_unlock = 1;
2682 	int root_lock;
2683 	/* everything at write_lock_level or lower must be write locked */
2684 	int write_lock_level = 0;
2685 	u8 lowest_level = 0;
2686 	int min_write_lock_level;
2687 	int prev_cmp;
2688 
2689 	lowest_level = p->lowest_level;
2690 	WARN_ON(lowest_level && ins_len > 0);
2691 	WARN_ON(p->nodes[0] != NULL);
2692 	BUG_ON(!cow && ins_len);
2693 
2694 	if (ins_len < 0) {
2695 		lowest_unlock = 2;
2696 
2697 		/* when we are removing items, we might have to go up to level
2698 		 * two as we update tree pointers  Make sure we keep write
2699 		 * for those levels as well
2700 		 */
2701 		write_lock_level = 2;
2702 	} else if (ins_len > 0) {
2703 		/*
2704 		 * for inserting items, make sure we have a write lock on
2705 		 * level 1 so we can update keys
2706 		 */
2707 		write_lock_level = 1;
2708 	}
2709 
2710 	if (!cow)
2711 		write_lock_level = -1;
2712 
2713 	if (cow && (p->keep_locks || p->lowest_level))
2714 		write_lock_level = BTRFS_MAX_LEVEL;
2715 
2716 	min_write_lock_level = write_lock_level;
2717 
2718 again:
2719 	prev_cmp = -1;
2720 	/*
2721 	 * we try very hard to do read locks on the root
2722 	 */
2723 	root_lock = BTRFS_READ_LOCK;
2724 	level = 0;
2725 	if (p->search_commit_root) {
2726 		/*
2727 		 * the commit roots are read only
2728 		 * so we always do read locks
2729 		 */
2730 		if (p->need_commit_sem)
2731 			down_read(&fs_info->commit_root_sem);
2732 		b = root->commit_root;
2733 		extent_buffer_get(b);
2734 		level = btrfs_header_level(b);
2735 		if (p->need_commit_sem)
2736 			up_read(&fs_info->commit_root_sem);
2737 		if (!p->skip_locking)
2738 			btrfs_tree_read_lock(b);
2739 	} else {
2740 		if (p->skip_locking) {
2741 			b = btrfs_root_node(root);
2742 			level = btrfs_header_level(b);
2743 		} else {
2744 			/* we don't know the level of the root node
2745 			 * until we actually have it read locked
2746 			 */
2747 			b = btrfs_read_lock_root_node(root);
2748 			level = btrfs_header_level(b);
2749 			if (level <= write_lock_level) {
2750 				/* whoops, must trade for write lock */
2751 				btrfs_tree_read_unlock(b);
2752 				free_extent_buffer(b);
2753 				b = btrfs_lock_root_node(root);
2754 				root_lock = BTRFS_WRITE_LOCK;
2755 
2756 				/* the level might have changed, check again */
2757 				level = btrfs_header_level(b);
2758 			}
2759 		}
2760 	}
2761 	p->nodes[level] = b;
2762 	if (!p->skip_locking)
2763 		p->locks[level] = root_lock;
2764 
2765 	while (b) {
2766 		level = btrfs_header_level(b);
2767 
2768 		/*
2769 		 * setup the path here so we can release it under lock
2770 		 * contention with the cow code
2771 		 */
2772 		if (cow) {
2773 			/*
2774 			 * if we don't really need to cow this block
2775 			 * then we don't want to set the path blocking,
2776 			 * so we test it here
2777 			 */
2778 			if (!should_cow_block(trans, root, b)) {
2779 				trans->dirty = true;
2780 				goto cow_done;
2781 			}
2782 
2783 			/*
2784 			 * must have write locks on this node and the
2785 			 * parent
2786 			 */
2787 			if (level > write_lock_level ||
2788 			    (level + 1 > write_lock_level &&
2789 			    level + 1 < BTRFS_MAX_LEVEL &&
2790 			    p->nodes[level + 1])) {
2791 				write_lock_level = level + 1;
2792 				btrfs_release_path(p);
2793 				goto again;
2794 			}
2795 
2796 			btrfs_set_path_blocking(p);
2797 			err = btrfs_cow_block(trans, root, b,
2798 					      p->nodes[level + 1],
2799 					      p->slots[level + 1], &b);
2800 			if (err) {
2801 				ret = err;
2802 				goto done;
2803 			}
2804 		}
2805 cow_done:
2806 		p->nodes[level] = b;
2807 		btrfs_clear_path_blocking(p, NULL, 0);
2808 
2809 		/*
2810 		 * we have a lock on b and as long as we aren't changing
2811 		 * the tree, there is no way to for the items in b to change.
2812 		 * It is safe to drop the lock on our parent before we
2813 		 * go through the expensive btree search on b.
2814 		 *
2815 		 * If we're inserting or deleting (ins_len != 0), then we might
2816 		 * be changing slot zero, which may require changing the parent.
2817 		 * So, we can't drop the lock until after we know which slot
2818 		 * we're operating on.
2819 		 */
2820 		if (!ins_len && !p->keep_locks) {
2821 			int u = level + 1;
2822 
2823 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2824 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2825 				p->locks[u] = 0;
2826 			}
2827 		}
2828 
2829 		ret = key_search(b, key, level, &prev_cmp, &slot);
2830 		if (ret < 0)
2831 			goto done;
2832 
2833 		if (level != 0) {
2834 			int dec = 0;
2835 			if (ret && slot > 0) {
2836 				dec = 1;
2837 				slot -= 1;
2838 			}
2839 			p->slots[level] = slot;
2840 			err = setup_nodes_for_search(trans, root, p, b, level,
2841 					     ins_len, &write_lock_level);
2842 			if (err == -EAGAIN)
2843 				goto again;
2844 			if (err) {
2845 				ret = err;
2846 				goto done;
2847 			}
2848 			b = p->nodes[level];
2849 			slot = p->slots[level];
2850 
2851 			/*
2852 			 * slot 0 is special, if we change the key
2853 			 * we have to update the parent pointer
2854 			 * which means we must have a write lock
2855 			 * on the parent
2856 			 */
2857 			if (slot == 0 && ins_len &&
2858 			    write_lock_level < level + 1) {
2859 				write_lock_level = level + 1;
2860 				btrfs_release_path(p);
2861 				goto again;
2862 			}
2863 
2864 			unlock_up(p, level, lowest_unlock,
2865 				  min_write_lock_level, &write_lock_level);
2866 
2867 			if (level == lowest_level) {
2868 				if (dec)
2869 					p->slots[level]++;
2870 				goto done;
2871 			}
2872 
2873 			err = read_block_for_search(trans, root, p,
2874 						    &b, level, slot, key, 0);
2875 			if (err == -EAGAIN)
2876 				goto again;
2877 			if (err) {
2878 				ret = err;
2879 				goto done;
2880 			}
2881 
2882 			if (!p->skip_locking) {
2883 				level = btrfs_header_level(b);
2884 				if (level <= write_lock_level) {
2885 					err = btrfs_try_tree_write_lock(b);
2886 					if (!err) {
2887 						btrfs_set_path_blocking(p);
2888 						btrfs_tree_lock(b);
2889 						btrfs_clear_path_blocking(p, b,
2890 								  BTRFS_WRITE_LOCK);
2891 					}
2892 					p->locks[level] = BTRFS_WRITE_LOCK;
2893 				} else {
2894 					err = btrfs_tree_read_lock_atomic(b);
2895 					if (!err) {
2896 						btrfs_set_path_blocking(p);
2897 						btrfs_tree_read_lock(b);
2898 						btrfs_clear_path_blocking(p, b,
2899 								  BTRFS_READ_LOCK);
2900 					}
2901 					p->locks[level] = BTRFS_READ_LOCK;
2902 				}
2903 				p->nodes[level] = b;
2904 			}
2905 		} else {
2906 			p->slots[level] = slot;
2907 			if (ins_len > 0 &&
2908 			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
2909 				if (write_lock_level < 1) {
2910 					write_lock_level = 1;
2911 					btrfs_release_path(p);
2912 					goto again;
2913 				}
2914 
2915 				btrfs_set_path_blocking(p);
2916 				err = split_leaf(trans, root, key,
2917 						 p, ins_len, ret == 0);
2918 				btrfs_clear_path_blocking(p, NULL, 0);
2919 
2920 				BUG_ON(err > 0);
2921 				if (err) {
2922 					ret = err;
2923 					goto done;
2924 				}
2925 			}
2926 			if (!p->search_for_split)
2927 				unlock_up(p, level, lowest_unlock,
2928 					  min_write_lock_level, &write_lock_level);
2929 			goto done;
2930 		}
2931 	}
2932 	ret = 1;
2933 done:
2934 	/*
2935 	 * we don't really know what they plan on doing with the path
2936 	 * from here on, so for now just mark it as blocking
2937 	 */
2938 	if (!p->leave_spinning)
2939 		btrfs_set_path_blocking(p);
2940 	if (ret < 0 && !p->skip_release_on_error)
2941 		btrfs_release_path(p);
2942 	return ret;
2943 }
2944 
2945 /*
2946  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2947  * current state of the tree together with the operations recorded in the tree
2948  * modification log to search for the key in a previous version of this tree, as
2949  * denoted by the time_seq parameter.
2950  *
2951  * Naturally, there is no support for insert, delete or cow operations.
2952  *
2953  * The resulting path and return value will be set up as if we called
2954  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2955  */
2956 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2957 			  struct btrfs_path *p, u64 time_seq)
2958 {
2959 	struct btrfs_fs_info *fs_info = root->fs_info;
2960 	struct extent_buffer *b;
2961 	int slot;
2962 	int ret;
2963 	int err;
2964 	int level;
2965 	int lowest_unlock = 1;
2966 	u8 lowest_level = 0;
2967 	int prev_cmp = -1;
2968 
2969 	lowest_level = p->lowest_level;
2970 	WARN_ON(p->nodes[0] != NULL);
2971 
2972 	if (p->search_commit_root) {
2973 		BUG_ON(time_seq);
2974 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2975 	}
2976 
2977 again:
2978 	b = get_old_root(root, time_seq);
2979 	level = btrfs_header_level(b);
2980 	p->locks[level] = BTRFS_READ_LOCK;
2981 
2982 	while (b) {
2983 		level = btrfs_header_level(b);
2984 		p->nodes[level] = b;
2985 		btrfs_clear_path_blocking(p, NULL, 0);
2986 
2987 		/*
2988 		 * we have a lock on b and as long as we aren't changing
2989 		 * the tree, there is no way to for the items in b to change.
2990 		 * It is safe to drop the lock on our parent before we
2991 		 * go through the expensive btree search on b.
2992 		 */
2993 		btrfs_unlock_up_safe(p, level + 1);
2994 
2995 		/*
2996 		 * Since we can unwind ebs we want to do a real search every
2997 		 * time.
2998 		 */
2999 		prev_cmp = -1;
3000 		ret = key_search(b, key, level, &prev_cmp, &slot);
3001 
3002 		if (level != 0) {
3003 			int dec = 0;
3004 			if (ret && slot > 0) {
3005 				dec = 1;
3006 				slot -= 1;
3007 			}
3008 			p->slots[level] = slot;
3009 			unlock_up(p, level, lowest_unlock, 0, NULL);
3010 
3011 			if (level == lowest_level) {
3012 				if (dec)
3013 					p->slots[level]++;
3014 				goto done;
3015 			}
3016 
3017 			err = read_block_for_search(NULL, root, p, &b, level,
3018 						    slot, key, time_seq);
3019 			if (err == -EAGAIN)
3020 				goto again;
3021 			if (err) {
3022 				ret = err;
3023 				goto done;
3024 			}
3025 
3026 			level = btrfs_header_level(b);
3027 			err = btrfs_tree_read_lock_atomic(b);
3028 			if (!err) {
3029 				btrfs_set_path_blocking(p);
3030 				btrfs_tree_read_lock(b);
3031 				btrfs_clear_path_blocking(p, b,
3032 							  BTRFS_READ_LOCK);
3033 			}
3034 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3035 			if (!b) {
3036 				ret = -ENOMEM;
3037 				goto done;
3038 			}
3039 			p->locks[level] = BTRFS_READ_LOCK;
3040 			p->nodes[level] = b;
3041 		} else {
3042 			p->slots[level] = slot;
3043 			unlock_up(p, level, lowest_unlock, 0, NULL);
3044 			goto done;
3045 		}
3046 	}
3047 	ret = 1;
3048 done:
3049 	if (!p->leave_spinning)
3050 		btrfs_set_path_blocking(p);
3051 	if (ret < 0)
3052 		btrfs_release_path(p);
3053 
3054 	return ret;
3055 }
3056 
3057 /*
3058  * helper to use instead of search slot if no exact match is needed but
3059  * instead the next or previous item should be returned.
3060  * When find_higher is true, the next higher item is returned, the next lower
3061  * otherwise.
3062  * When return_any and find_higher are both true, and no higher item is found,
3063  * return the next lower instead.
3064  * When return_any is true and find_higher is false, and no lower item is found,
3065  * return the next higher instead.
3066  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3067  * < 0 on error
3068  */
3069 int btrfs_search_slot_for_read(struct btrfs_root *root,
3070 			       struct btrfs_key *key, struct btrfs_path *p,
3071 			       int find_higher, int return_any)
3072 {
3073 	int ret;
3074 	struct extent_buffer *leaf;
3075 
3076 again:
3077 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3078 	if (ret <= 0)
3079 		return ret;
3080 	/*
3081 	 * a return value of 1 means the path is at the position where the
3082 	 * item should be inserted. Normally this is the next bigger item,
3083 	 * but in case the previous item is the last in a leaf, path points
3084 	 * to the first free slot in the previous leaf, i.e. at an invalid
3085 	 * item.
3086 	 */
3087 	leaf = p->nodes[0];
3088 
3089 	if (find_higher) {
3090 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3091 			ret = btrfs_next_leaf(root, p);
3092 			if (ret <= 0)
3093 				return ret;
3094 			if (!return_any)
3095 				return 1;
3096 			/*
3097 			 * no higher item found, return the next
3098 			 * lower instead
3099 			 */
3100 			return_any = 0;
3101 			find_higher = 0;
3102 			btrfs_release_path(p);
3103 			goto again;
3104 		}
3105 	} else {
3106 		if (p->slots[0] == 0) {
3107 			ret = btrfs_prev_leaf(root, p);
3108 			if (ret < 0)
3109 				return ret;
3110 			if (!ret) {
3111 				leaf = p->nodes[0];
3112 				if (p->slots[0] == btrfs_header_nritems(leaf))
3113 					p->slots[0]--;
3114 				return 0;
3115 			}
3116 			if (!return_any)
3117 				return 1;
3118 			/*
3119 			 * no lower item found, return the next
3120 			 * higher instead
3121 			 */
3122 			return_any = 0;
3123 			find_higher = 1;
3124 			btrfs_release_path(p);
3125 			goto again;
3126 		} else {
3127 			--p->slots[0];
3128 		}
3129 	}
3130 	return 0;
3131 }
3132 
3133 /*
3134  * adjust the pointers going up the tree, starting at level
3135  * making sure the right key of each node is points to 'key'.
3136  * This is used after shifting pointers to the left, so it stops
3137  * fixing up pointers when a given leaf/node is not in slot 0 of the
3138  * higher levels
3139  *
3140  */
3141 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3142 			   struct btrfs_path *path,
3143 			   struct btrfs_disk_key *key, int level)
3144 {
3145 	int i;
3146 	struct extent_buffer *t;
3147 
3148 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3149 		int tslot = path->slots[i];
3150 		if (!path->nodes[i])
3151 			break;
3152 		t = path->nodes[i];
3153 		tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3154 		btrfs_set_node_key(t, key, tslot);
3155 		btrfs_mark_buffer_dirty(path->nodes[i]);
3156 		if (tslot != 0)
3157 			break;
3158 	}
3159 }
3160 
3161 /*
3162  * update item key.
3163  *
3164  * This function isn't completely safe. It's the caller's responsibility
3165  * that the new key won't break the order
3166  */
3167 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3168 			     struct btrfs_path *path,
3169 			     struct btrfs_key *new_key)
3170 {
3171 	struct btrfs_disk_key disk_key;
3172 	struct extent_buffer *eb;
3173 	int slot;
3174 
3175 	eb = path->nodes[0];
3176 	slot = path->slots[0];
3177 	if (slot > 0) {
3178 		btrfs_item_key(eb, &disk_key, slot - 1);
3179 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3180 	}
3181 	if (slot < btrfs_header_nritems(eb) - 1) {
3182 		btrfs_item_key(eb, &disk_key, slot + 1);
3183 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3184 	}
3185 
3186 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3187 	btrfs_set_item_key(eb, &disk_key, slot);
3188 	btrfs_mark_buffer_dirty(eb);
3189 	if (slot == 0)
3190 		fixup_low_keys(fs_info, path, &disk_key, 1);
3191 }
3192 
3193 /*
3194  * try to push data from one node into the next node left in the
3195  * tree.
3196  *
3197  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3198  * error, and > 0 if there was no room in the left hand block.
3199  */
3200 static int push_node_left(struct btrfs_trans_handle *trans,
3201 			  struct btrfs_fs_info *fs_info,
3202 			  struct extent_buffer *dst,
3203 			  struct extent_buffer *src, int empty)
3204 {
3205 	int push_items = 0;
3206 	int src_nritems;
3207 	int dst_nritems;
3208 	int ret = 0;
3209 
3210 	src_nritems = btrfs_header_nritems(src);
3211 	dst_nritems = btrfs_header_nritems(dst);
3212 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3213 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3214 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3215 
3216 	if (!empty && src_nritems <= 8)
3217 		return 1;
3218 
3219 	if (push_items <= 0)
3220 		return 1;
3221 
3222 	if (empty) {
3223 		push_items = min(src_nritems, push_items);
3224 		if (push_items < src_nritems) {
3225 			/* leave at least 8 pointers in the node if
3226 			 * we aren't going to empty it
3227 			 */
3228 			if (src_nritems - push_items < 8) {
3229 				if (push_items <= 8)
3230 					return 1;
3231 				push_items -= 8;
3232 			}
3233 		}
3234 	} else
3235 		push_items = min(src_nritems - 8, push_items);
3236 
3237 	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3238 				   push_items);
3239 	if (ret) {
3240 		btrfs_abort_transaction(trans, ret);
3241 		return ret;
3242 	}
3243 	copy_extent_buffer(dst, src,
3244 			   btrfs_node_key_ptr_offset(dst_nritems),
3245 			   btrfs_node_key_ptr_offset(0),
3246 			   push_items * sizeof(struct btrfs_key_ptr));
3247 
3248 	if (push_items < src_nritems) {
3249 		/*
3250 		 * don't call tree_mod_log_eb_move here, key removal was already
3251 		 * fully logged by tree_mod_log_eb_copy above.
3252 		 */
3253 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3254 				      btrfs_node_key_ptr_offset(push_items),
3255 				      (src_nritems - push_items) *
3256 				      sizeof(struct btrfs_key_ptr));
3257 	}
3258 	btrfs_set_header_nritems(src, src_nritems - push_items);
3259 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3260 	btrfs_mark_buffer_dirty(src);
3261 	btrfs_mark_buffer_dirty(dst);
3262 
3263 	return ret;
3264 }
3265 
3266 /*
3267  * try to push data from one node into the next node right in the
3268  * tree.
3269  *
3270  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3271  * error, and > 0 if there was no room in the right hand block.
3272  *
3273  * this will  only push up to 1/2 the contents of the left node over
3274  */
3275 static int balance_node_right(struct btrfs_trans_handle *trans,
3276 			      struct btrfs_fs_info *fs_info,
3277 			      struct extent_buffer *dst,
3278 			      struct extent_buffer *src)
3279 {
3280 	int push_items = 0;
3281 	int max_push;
3282 	int src_nritems;
3283 	int dst_nritems;
3284 	int ret = 0;
3285 
3286 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3287 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3288 
3289 	src_nritems = btrfs_header_nritems(src);
3290 	dst_nritems = btrfs_header_nritems(dst);
3291 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3292 	if (push_items <= 0)
3293 		return 1;
3294 
3295 	if (src_nritems < 4)
3296 		return 1;
3297 
3298 	max_push = src_nritems / 2 + 1;
3299 	/* don't try to empty the node */
3300 	if (max_push >= src_nritems)
3301 		return 1;
3302 
3303 	if (max_push < push_items)
3304 		push_items = max_push;
3305 
3306 	tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
3307 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3308 				      btrfs_node_key_ptr_offset(0),
3309 				      (dst_nritems) *
3310 				      sizeof(struct btrfs_key_ptr));
3311 
3312 	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3313 				   src_nritems - push_items, push_items);
3314 	if (ret) {
3315 		btrfs_abort_transaction(trans, ret);
3316 		return ret;
3317 	}
3318 	copy_extent_buffer(dst, src,
3319 			   btrfs_node_key_ptr_offset(0),
3320 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3321 			   push_items * sizeof(struct btrfs_key_ptr));
3322 
3323 	btrfs_set_header_nritems(src, src_nritems - push_items);
3324 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3325 
3326 	btrfs_mark_buffer_dirty(src);
3327 	btrfs_mark_buffer_dirty(dst);
3328 
3329 	return ret;
3330 }
3331 
3332 /*
3333  * helper function to insert a new root level in the tree.
3334  * A new node is allocated, and a single item is inserted to
3335  * point to the existing root
3336  *
3337  * returns zero on success or < 0 on failure.
3338  */
3339 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3340 			   struct btrfs_root *root,
3341 			   struct btrfs_path *path, int level)
3342 {
3343 	struct btrfs_fs_info *fs_info = root->fs_info;
3344 	u64 lower_gen;
3345 	struct extent_buffer *lower;
3346 	struct extent_buffer *c;
3347 	struct extent_buffer *old;
3348 	struct btrfs_disk_key lower_key;
3349 
3350 	BUG_ON(path->nodes[level]);
3351 	BUG_ON(path->nodes[level-1] != root->node);
3352 
3353 	lower = path->nodes[level-1];
3354 	if (level == 1)
3355 		btrfs_item_key(lower, &lower_key, 0);
3356 	else
3357 		btrfs_node_key(lower, &lower_key, 0);
3358 
3359 	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3360 				   &lower_key, level, root->node->start, 0);
3361 	if (IS_ERR(c))
3362 		return PTR_ERR(c);
3363 
3364 	root_add_used(root, fs_info->nodesize);
3365 
3366 	memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
3367 	btrfs_set_header_nritems(c, 1);
3368 	btrfs_set_header_level(c, level);
3369 	btrfs_set_header_bytenr(c, c->start);
3370 	btrfs_set_header_generation(c, trans->transid);
3371 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3372 	btrfs_set_header_owner(c, root->root_key.objectid);
3373 
3374 	write_extent_buffer_fsid(c, fs_info->fsid);
3375 	write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
3376 
3377 	btrfs_set_node_key(c, &lower_key, 0);
3378 	btrfs_set_node_blockptr(c, 0, lower->start);
3379 	lower_gen = btrfs_header_generation(lower);
3380 	WARN_ON(lower_gen != trans->transid);
3381 
3382 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3383 
3384 	btrfs_mark_buffer_dirty(c);
3385 
3386 	old = root->node;
3387 	tree_mod_log_set_root_pointer(root, c, 0);
3388 	rcu_assign_pointer(root->node, c);
3389 
3390 	/* the super has an extra ref to root->node */
3391 	free_extent_buffer(old);
3392 
3393 	add_root_to_dirty_list(root);
3394 	extent_buffer_get(c);
3395 	path->nodes[level] = c;
3396 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3397 	path->slots[level] = 0;
3398 	return 0;
3399 }
3400 
3401 /*
3402  * worker function to insert a single pointer in a node.
3403  * the node should have enough room for the pointer already
3404  *
3405  * slot and level indicate where you want the key to go, and
3406  * blocknr is the block the key points to.
3407  */
3408 static void insert_ptr(struct btrfs_trans_handle *trans,
3409 		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3410 		       struct btrfs_disk_key *key, u64 bytenr,
3411 		       int slot, int level)
3412 {
3413 	struct extent_buffer *lower;
3414 	int nritems;
3415 	int ret;
3416 
3417 	BUG_ON(!path->nodes[level]);
3418 	btrfs_assert_tree_locked(path->nodes[level]);
3419 	lower = path->nodes[level];
3420 	nritems = btrfs_header_nritems(lower);
3421 	BUG_ON(slot > nritems);
3422 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3423 	if (slot != nritems) {
3424 		if (level)
3425 			tree_mod_log_eb_move(fs_info, lower, slot + 1,
3426 					     slot, nritems - slot);
3427 		memmove_extent_buffer(lower,
3428 			      btrfs_node_key_ptr_offset(slot + 1),
3429 			      btrfs_node_key_ptr_offset(slot),
3430 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3431 	}
3432 	if (level) {
3433 		ret = tree_mod_log_insert_key(fs_info, lower, slot,
3434 					      MOD_LOG_KEY_ADD, GFP_NOFS);
3435 		BUG_ON(ret < 0);
3436 	}
3437 	btrfs_set_node_key(lower, key, slot);
3438 	btrfs_set_node_blockptr(lower, slot, bytenr);
3439 	WARN_ON(trans->transid == 0);
3440 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3441 	btrfs_set_header_nritems(lower, nritems + 1);
3442 	btrfs_mark_buffer_dirty(lower);
3443 }
3444 
3445 /*
3446  * split the node at the specified level in path in two.
3447  * The path is corrected to point to the appropriate node after the split
3448  *
3449  * Before splitting this tries to make some room in the node by pushing
3450  * left and right, if either one works, it returns right away.
3451  *
3452  * returns 0 on success and < 0 on failure
3453  */
3454 static noinline int split_node(struct btrfs_trans_handle *trans,
3455 			       struct btrfs_root *root,
3456 			       struct btrfs_path *path, int level)
3457 {
3458 	struct btrfs_fs_info *fs_info = root->fs_info;
3459 	struct extent_buffer *c;
3460 	struct extent_buffer *split;
3461 	struct btrfs_disk_key disk_key;
3462 	int mid;
3463 	int ret;
3464 	u32 c_nritems;
3465 
3466 	c = path->nodes[level];
3467 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3468 	if (c == root->node) {
3469 		/*
3470 		 * trying to split the root, lets make a new one
3471 		 *
3472 		 * tree mod log: We don't log_removal old root in
3473 		 * insert_new_root, because that root buffer will be kept as a
3474 		 * normal node. We are going to log removal of half of the
3475 		 * elements below with tree_mod_log_eb_copy. We're holding a
3476 		 * tree lock on the buffer, which is why we cannot race with
3477 		 * other tree_mod_log users.
3478 		 */
3479 		ret = insert_new_root(trans, root, path, level + 1);
3480 		if (ret)
3481 			return ret;
3482 	} else {
3483 		ret = push_nodes_for_insert(trans, root, path, level);
3484 		c = path->nodes[level];
3485 		if (!ret && btrfs_header_nritems(c) <
3486 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3487 			return 0;
3488 		if (ret < 0)
3489 			return ret;
3490 	}
3491 
3492 	c_nritems = btrfs_header_nritems(c);
3493 	mid = (c_nritems + 1) / 2;
3494 	btrfs_node_key(c, &disk_key, mid);
3495 
3496 	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3497 			&disk_key, level, c->start, 0);
3498 	if (IS_ERR(split))
3499 		return PTR_ERR(split);
3500 
3501 	root_add_used(root, fs_info->nodesize);
3502 
3503 	memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
3504 	btrfs_set_header_level(split, btrfs_header_level(c));
3505 	btrfs_set_header_bytenr(split, split->start);
3506 	btrfs_set_header_generation(split, trans->transid);
3507 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3508 	btrfs_set_header_owner(split, root->root_key.objectid);
3509 	write_extent_buffer_fsid(split, fs_info->fsid);
3510 	write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
3511 
3512 	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3513 	if (ret) {
3514 		btrfs_abort_transaction(trans, ret);
3515 		return ret;
3516 	}
3517 	copy_extent_buffer(split, c,
3518 			   btrfs_node_key_ptr_offset(0),
3519 			   btrfs_node_key_ptr_offset(mid),
3520 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3521 	btrfs_set_header_nritems(split, c_nritems - mid);
3522 	btrfs_set_header_nritems(c, mid);
3523 	ret = 0;
3524 
3525 	btrfs_mark_buffer_dirty(c);
3526 	btrfs_mark_buffer_dirty(split);
3527 
3528 	insert_ptr(trans, fs_info, path, &disk_key, split->start,
3529 		   path->slots[level + 1] + 1, level + 1);
3530 
3531 	if (path->slots[level] >= mid) {
3532 		path->slots[level] -= mid;
3533 		btrfs_tree_unlock(c);
3534 		free_extent_buffer(c);
3535 		path->nodes[level] = split;
3536 		path->slots[level + 1] += 1;
3537 	} else {
3538 		btrfs_tree_unlock(split);
3539 		free_extent_buffer(split);
3540 	}
3541 	return ret;
3542 }
3543 
3544 /*
3545  * how many bytes are required to store the items in a leaf.  start
3546  * and nr indicate which items in the leaf to check.  This totals up the
3547  * space used both by the item structs and the item data
3548  */
3549 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3550 {
3551 	struct btrfs_item *start_item;
3552 	struct btrfs_item *end_item;
3553 	struct btrfs_map_token token;
3554 	int data_len;
3555 	int nritems = btrfs_header_nritems(l);
3556 	int end = min(nritems, start + nr) - 1;
3557 
3558 	if (!nr)
3559 		return 0;
3560 	btrfs_init_map_token(&token);
3561 	start_item = btrfs_item_nr(start);
3562 	end_item = btrfs_item_nr(end);
3563 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3564 		btrfs_token_item_size(l, start_item, &token);
3565 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3566 	data_len += sizeof(struct btrfs_item) * nr;
3567 	WARN_ON(data_len < 0);
3568 	return data_len;
3569 }
3570 
3571 /*
3572  * The space between the end of the leaf items and
3573  * the start of the leaf data.  IOW, how much room
3574  * the leaf has left for both items and data
3575  */
3576 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3577 				   struct extent_buffer *leaf)
3578 {
3579 	int nritems = btrfs_header_nritems(leaf);
3580 	int ret;
3581 
3582 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3583 	if (ret < 0) {
3584 		btrfs_crit(fs_info,
3585 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3586 			   ret,
3587 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3588 			   leaf_space_used(leaf, 0, nritems), nritems);
3589 	}
3590 	return ret;
3591 }
3592 
3593 /*
3594  * min slot controls the lowest index we're willing to push to the
3595  * right.  We'll push up to and including min_slot, but no lower
3596  */
3597 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3598 				      struct btrfs_fs_info *fs_info,
3599 				      struct btrfs_path *path,
3600 				      int data_size, int empty,
3601 				      struct extent_buffer *right,
3602 				      int free_space, u32 left_nritems,
3603 				      u32 min_slot)
3604 {
3605 	struct extent_buffer *left = path->nodes[0];
3606 	struct extent_buffer *upper = path->nodes[1];
3607 	struct btrfs_map_token token;
3608 	struct btrfs_disk_key disk_key;
3609 	int slot;
3610 	u32 i;
3611 	int push_space = 0;
3612 	int push_items = 0;
3613 	struct btrfs_item *item;
3614 	u32 nr;
3615 	u32 right_nritems;
3616 	u32 data_end;
3617 	u32 this_item_size;
3618 
3619 	btrfs_init_map_token(&token);
3620 
3621 	if (empty)
3622 		nr = 0;
3623 	else
3624 		nr = max_t(u32, 1, min_slot);
3625 
3626 	if (path->slots[0] >= left_nritems)
3627 		push_space += data_size;
3628 
3629 	slot = path->slots[1];
3630 	i = left_nritems - 1;
3631 	while (i >= nr) {
3632 		item = btrfs_item_nr(i);
3633 
3634 		if (!empty && push_items > 0) {
3635 			if (path->slots[0] > i)
3636 				break;
3637 			if (path->slots[0] == i) {
3638 				int space = btrfs_leaf_free_space(fs_info, left);
3639 				if (space + push_space * 2 > free_space)
3640 					break;
3641 			}
3642 		}
3643 
3644 		if (path->slots[0] == i)
3645 			push_space += data_size;
3646 
3647 		this_item_size = btrfs_item_size(left, item);
3648 		if (this_item_size + sizeof(*item) + push_space > free_space)
3649 			break;
3650 
3651 		push_items++;
3652 		push_space += this_item_size + sizeof(*item);
3653 		if (i == 0)
3654 			break;
3655 		i--;
3656 	}
3657 
3658 	if (push_items == 0)
3659 		goto out_unlock;
3660 
3661 	WARN_ON(!empty && push_items == left_nritems);
3662 
3663 	/* push left to right */
3664 	right_nritems = btrfs_header_nritems(right);
3665 
3666 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3667 	push_space -= leaf_data_end(fs_info, left);
3668 
3669 	/* make room in the right data area */
3670 	data_end = leaf_data_end(fs_info, right);
3671 	memmove_extent_buffer(right,
3672 			      btrfs_leaf_data(right) + data_end - push_space,
3673 			      btrfs_leaf_data(right) + data_end,
3674 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3675 
3676 	/* copy from the left data area */
3677 	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3678 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3679 		     btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
3680 		     push_space);
3681 
3682 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3683 			      btrfs_item_nr_offset(0),
3684 			      right_nritems * sizeof(struct btrfs_item));
3685 
3686 	/* copy the items from left to right */
3687 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3688 		   btrfs_item_nr_offset(left_nritems - push_items),
3689 		   push_items * sizeof(struct btrfs_item));
3690 
3691 	/* update the item pointers */
3692 	right_nritems += push_items;
3693 	btrfs_set_header_nritems(right, right_nritems);
3694 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3695 	for (i = 0; i < right_nritems; i++) {
3696 		item = btrfs_item_nr(i);
3697 		push_space -= btrfs_token_item_size(right, item, &token);
3698 		btrfs_set_token_item_offset(right, item, push_space, &token);
3699 	}
3700 
3701 	left_nritems -= push_items;
3702 	btrfs_set_header_nritems(left, left_nritems);
3703 
3704 	if (left_nritems)
3705 		btrfs_mark_buffer_dirty(left);
3706 	else
3707 		clean_tree_block(trans, fs_info, left);
3708 
3709 	btrfs_mark_buffer_dirty(right);
3710 
3711 	btrfs_item_key(right, &disk_key, 0);
3712 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3713 	btrfs_mark_buffer_dirty(upper);
3714 
3715 	/* then fixup the leaf pointer in the path */
3716 	if (path->slots[0] >= left_nritems) {
3717 		path->slots[0] -= left_nritems;
3718 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3719 			clean_tree_block(trans, fs_info, path->nodes[0]);
3720 		btrfs_tree_unlock(path->nodes[0]);
3721 		free_extent_buffer(path->nodes[0]);
3722 		path->nodes[0] = right;
3723 		path->slots[1] += 1;
3724 	} else {
3725 		btrfs_tree_unlock(right);
3726 		free_extent_buffer(right);
3727 	}
3728 	return 0;
3729 
3730 out_unlock:
3731 	btrfs_tree_unlock(right);
3732 	free_extent_buffer(right);
3733 	return 1;
3734 }
3735 
3736 /*
3737  * push some data in the path leaf to the right, trying to free up at
3738  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3739  *
3740  * returns 1 if the push failed because the other node didn't have enough
3741  * room, 0 if everything worked out and < 0 if there were major errors.
3742  *
3743  * this will push starting from min_slot to the end of the leaf.  It won't
3744  * push any slot lower than min_slot
3745  */
3746 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3747 			   *root, struct btrfs_path *path,
3748 			   int min_data_size, int data_size,
3749 			   int empty, u32 min_slot)
3750 {
3751 	struct btrfs_fs_info *fs_info = root->fs_info;
3752 	struct extent_buffer *left = path->nodes[0];
3753 	struct extent_buffer *right;
3754 	struct extent_buffer *upper;
3755 	int slot;
3756 	int free_space;
3757 	u32 left_nritems;
3758 	int ret;
3759 
3760 	if (!path->nodes[1])
3761 		return 1;
3762 
3763 	slot = path->slots[1];
3764 	upper = path->nodes[1];
3765 	if (slot >= btrfs_header_nritems(upper) - 1)
3766 		return 1;
3767 
3768 	btrfs_assert_tree_locked(path->nodes[1]);
3769 
3770 	right = read_node_slot(fs_info, upper, slot + 1);
3771 	/*
3772 	 * slot + 1 is not valid or we fail to read the right node,
3773 	 * no big deal, just return.
3774 	 */
3775 	if (IS_ERR(right))
3776 		return 1;
3777 
3778 	btrfs_tree_lock(right);
3779 	btrfs_set_lock_blocking(right);
3780 
3781 	free_space = btrfs_leaf_free_space(fs_info, right);
3782 	if (free_space < data_size)
3783 		goto out_unlock;
3784 
3785 	/* cow and double check */
3786 	ret = btrfs_cow_block(trans, root, right, upper,
3787 			      slot + 1, &right);
3788 	if (ret)
3789 		goto out_unlock;
3790 
3791 	free_space = btrfs_leaf_free_space(fs_info, right);
3792 	if (free_space < data_size)
3793 		goto out_unlock;
3794 
3795 	left_nritems = btrfs_header_nritems(left);
3796 	if (left_nritems == 0)
3797 		goto out_unlock;
3798 
3799 	if (path->slots[0] == left_nritems && !empty) {
3800 		/* Key greater than all keys in the leaf, right neighbor has
3801 		 * enough room for it and we're not emptying our leaf to delete
3802 		 * it, therefore use right neighbor to insert the new item and
3803 		 * no need to touch/dirty our left leaft. */
3804 		btrfs_tree_unlock(left);
3805 		free_extent_buffer(left);
3806 		path->nodes[0] = right;
3807 		path->slots[0] = 0;
3808 		path->slots[1]++;
3809 		return 0;
3810 	}
3811 
3812 	return __push_leaf_right(trans, fs_info, path, min_data_size, empty,
3813 				right, free_space, left_nritems, min_slot);
3814 out_unlock:
3815 	btrfs_tree_unlock(right);
3816 	free_extent_buffer(right);
3817 	return 1;
3818 }
3819 
3820 /*
3821  * push some data in the path leaf to the left, trying to free up at
3822  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3823  *
3824  * max_slot can put a limit on how far into the leaf we'll push items.  The
3825  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3826  * items
3827  */
3828 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3829 				     struct btrfs_fs_info *fs_info,
3830 				     struct btrfs_path *path, int data_size,
3831 				     int empty, struct extent_buffer *left,
3832 				     int free_space, u32 right_nritems,
3833 				     u32 max_slot)
3834 {
3835 	struct btrfs_disk_key disk_key;
3836 	struct extent_buffer *right = path->nodes[0];
3837 	int i;
3838 	int push_space = 0;
3839 	int push_items = 0;
3840 	struct btrfs_item *item;
3841 	u32 old_left_nritems;
3842 	u32 nr;
3843 	int ret = 0;
3844 	u32 this_item_size;
3845 	u32 old_left_item_size;
3846 	struct btrfs_map_token token;
3847 
3848 	btrfs_init_map_token(&token);
3849 
3850 	if (empty)
3851 		nr = min(right_nritems, max_slot);
3852 	else
3853 		nr = min(right_nritems - 1, max_slot);
3854 
3855 	for (i = 0; i < nr; i++) {
3856 		item = btrfs_item_nr(i);
3857 
3858 		if (!empty && push_items > 0) {
3859 			if (path->slots[0] < i)
3860 				break;
3861 			if (path->slots[0] == i) {
3862 				int space = btrfs_leaf_free_space(fs_info, right);
3863 				if (space + push_space * 2 > free_space)
3864 					break;
3865 			}
3866 		}
3867 
3868 		if (path->slots[0] == i)
3869 			push_space += data_size;
3870 
3871 		this_item_size = btrfs_item_size(right, item);
3872 		if (this_item_size + sizeof(*item) + push_space > free_space)
3873 			break;
3874 
3875 		push_items++;
3876 		push_space += this_item_size + sizeof(*item);
3877 	}
3878 
3879 	if (push_items == 0) {
3880 		ret = 1;
3881 		goto out;
3882 	}
3883 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3884 
3885 	/* push data from right to left */
3886 	copy_extent_buffer(left, right,
3887 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3888 			   btrfs_item_nr_offset(0),
3889 			   push_items * sizeof(struct btrfs_item));
3890 
3891 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3892 		     btrfs_item_offset_nr(right, push_items - 1);
3893 
3894 	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3895 		     leaf_data_end(fs_info, left) - push_space,
3896 		     btrfs_leaf_data(right) +
3897 		     btrfs_item_offset_nr(right, push_items - 1),
3898 		     push_space);
3899 	old_left_nritems = btrfs_header_nritems(left);
3900 	BUG_ON(old_left_nritems <= 0);
3901 
3902 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3903 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3904 		u32 ioff;
3905 
3906 		item = btrfs_item_nr(i);
3907 
3908 		ioff = btrfs_token_item_offset(left, item, &token);
3909 		btrfs_set_token_item_offset(left, item,
3910 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3911 		      &token);
3912 	}
3913 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3914 
3915 	/* fixup right node */
3916 	if (push_items > right_nritems)
3917 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3918 		       right_nritems);
3919 
3920 	if (push_items < right_nritems) {
3921 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3922 						  leaf_data_end(fs_info, right);
3923 		memmove_extent_buffer(right, btrfs_leaf_data(right) +
3924 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3925 				      btrfs_leaf_data(right) +
3926 				      leaf_data_end(fs_info, right), push_space);
3927 
3928 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3929 			      btrfs_item_nr_offset(push_items),
3930 			     (btrfs_header_nritems(right) - push_items) *
3931 			     sizeof(struct btrfs_item));
3932 	}
3933 	right_nritems -= push_items;
3934 	btrfs_set_header_nritems(right, right_nritems);
3935 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3936 	for (i = 0; i < right_nritems; i++) {
3937 		item = btrfs_item_nr(i);
3938 
3939 		push_space = push_space - btrfs_token_item_size(right,
3940 								item, &token);
3941 		btrfs_set_token_item_offset(right, item, push_space, &token);
3942 	}
3943 
3944 	btrfs_mark_buffer_dirty(left);
3945 	if (right_nritems)
3946 		btrfs_mark_buffer_dirty(right);
3947 	else
3948 		clean_tree_block(trans, fs_info, right);
3949 
3950 	btrfs_item_key(right, &disk_key, 0);
3951 	fixup_low_keys(fs_info, path, &disk_key, 1);
3952 
3953 	/* then fixup the leaf pointer in the path */
3954 	if (path->slots[0] < push_items) {
3955 		path->slots[0] += old_left_nritems;
3956 		btrfs_tree_unlock(path->nodes[0]);
3957 		free_extent_buffer(path->nodes[0]);
3958 		path->nodes[0] = left;
3959 		path->slots[1] -= 1;
3960 	} else {
3961 		btrfs_tree_unlock(left);
3962 		free_extent_buffer(left);
3963 		path->slots[0] -= push_items;
3964 	}
3965 	BUG_ON(path->slots[0] < 0);
3966 	return ret;
3967 out:
3968 	btrfs_tree_unlock(left);
3969 	free_extent_buffer(left);
3970 	return ret;
3971 }
3972 
3973 /*
3974  * push some data in the path leaf to the left, trying to free up at
3975  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3976  *
3977  * max_slot can put a limit on how far into the leaf we'll push items.  The
3978  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3979  * items
3980  */
3981 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3982 			  *root, struct btrfs_path *path, int min_data_size,
3983 			  int data_size, int empty, u32 max_slot)
3984 {
3985 	struct btrfs_fs_info *fs_info = root->fs_info;
3986 	struct extent_buffer *right = path->nodes[0];
3987 	struct extent_buffer *left;
3988 	int slot;
3989 	int free_space;
3990 	u32 right_nritems;
3991 	int ret = 0;
3992 
3993 	slot = path->slots[1];
3994 	if (slot == 0)
3995 		return 1;
3996 	if (!path->nodes[1])
3997 		return 1;
3998 
3999 	right_nritems = btrfs_header_nritems(right);
4000 	if (right_nritems == 0)
4001 		return 1;
4002 
4003 	btrfs_assert_tree_locked(path->nodes[1]);
4004 
4005 	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
4006 	/*
4007 	 * slot - 1 is not valid or we fail to read the left node,
4008 	 * no big deal, just return.
4009 	 */
4010 	if (IS_ERR(left))
4011 		return 1;
4012 
4013 	btrfs_tree_lock(left);
4014 	btrfs_set_lock_blocking(left);
4015 
4016 	free_space = btrfs_leaf_free_space(fs_info, left);
4017 	if (free_space < data_size) {
4018 		ret = 1;
4019 		goto out;
4020 	}
4021 
4022 	/* cow and double check */
4023 	ret = btrfs_cow_block(trans, root, left,
4024 			      path->nodes[1], slot - 1, &left);
4025 	if (ret) {
4026 		/* we hit -ENOSPC, but it isn't fatal here */
4027 		if (ret == -ENOSPC)
4028 			ret = 1;
4029 		goto out;
4030 	}
4031 
4032 	free_space = btrfs_leaf_free_space(fs_info, left);
4033 	if (free_space < data_size) {
4034 		ret = 1;
4035 		goto out;
4036 	}
4037 
4038 	return __push_leaf_left(trans, fs_info, path, min_data_size,
4039 			       empty, left, free_space, right_nritems,
4040 			       max_slot);
4041 out:
4042 	btrfs_tree_unlock(left);
4043 	free_extent_buffer(left);
4044 	return ret;
4045 }
4046 
4047 /*
4048  * split the path's leaf in two, making sure there is at least data_size
4049  * available for the resulting leaf level of the path.
4050  */
4051 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4052 				    struct btrfs_fs_info *fs_info,
4053 				    struct btrfs_path *path,
4054 				    struct extent_buffer *l,
4055 				    struct extent_buffer *right,
4056 				    int slot, int mid, int nritems)
4057 {
4058 	int data_copy_size;
4059 	int rt_data_off;
4060 	int i;
4061 	struct btrfs_disk_key disk_key;
4062 	struct btrfs_map_token token;
4063 
4064 	btrfs_init_map_token(&token);
4065 
4066 	nritems = nritems - mid;
4067 	btrfs_set_header_nritems(right, nritems);
4068 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4069 
4070 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4071 			   btrfs_item_nr_offset(mid),
4072 			   nritems * sizeof(struct btrfs_item));
4073 
4074 	copy_extent_buffer(right, l,
4075 		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
4076 		     data_copy_size, btrfs_leaf_data(l) +
4077 		     leaf_data_end(fs_info, l), data_copy_size);
4078 
4079 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4080 
4081 	for (i = 0; i < nritems; i++) {
4082 		struct btrfs_item *item = btrfs_item_nr(i);
4083 		u32 ioff;
4084 
4085 		ioff = btrfs_token_item_offset(right, item, &token);
4086 		btrfs_set_token_item_offset(right, item,
4087 					    ioff + rt_data_off, &token);
4088 	}
4089 
4090 	btrfs_set_header_nritems(l, mid);
4091 	btrfs_item_key(right, &disk_key, 0);
4092 	insert_ptr(trans, fs_info, path, &disk_key, right->start,
4093 		   path->slots[1] + 1, 1);
4094 
4095 	btrfs_mark_buffer_dirty(right);
4096 	btrfs_mark_buffer_dirty(l);
4097 	BUG_ON(path->slots[0] != slot);
4098 
4099 	if (mid <= slot) {
4100 		btrfs_tree_unlock(path->nodes[0]);
4101 		free_extent_buffer(path->nodes[0]);
4102 		path->nodes[0] = right;
4103 		path->slots[0] -= mid;
4104 		path->slots[1] += 1;
4105 	} else {
4106 		btrfs_tree_unlock(right);
4107 		free_extent_buffer(right);
4108 	}
4109 
4110 	BUG_ON(path->slots[0] < 0);
4111 }
4112 
4113 /*
4114  * double splits happen when we need to insert a big item in the middle
4115  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4116  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4117  *          A                 B                 C
4118  *
4119  * We avoid this by trying to push the items on either side of our target
4120  * into the adjacent leaves.  If all goes well we can avoid the double split
4121  * completely.
4122  */
4123 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4124 					  struct btrfs_root *root,
4125 					  struct btrfs_path *path,
4126 					  int data_size)
4127 {
4128 	struct btrfs_fs_info *fs_info = root->fs_info;
4129 	int ret;
4130 	int progress = 0;
4131 	int slot;
4132 	u32 nritems;
4133 	int space_needed = data_size;
4134 
4135 	slot = path->slots[0];
4136 	if (slot < btrfs_header_nritems(path->nodes[0]))
4137 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4138 
4139 	/*
4140 	 * try to push all the items after our slot into the
4141 	 * right leaf
4142 	 */
4143 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4144 	if (ret < 0)
4145 		return ret;
4146 
4147 	if (ret == 0)
4148 		progress++;
4149 
4150 	nritems = btrfs_header_nritems(path->nodes[0]);
4151 	/*
4152 	 * our goal is to get our slot at the start or end of a leaf.  If
4153 	 * we've done so we're done
4154 	 */
4155 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4156 		return 0;
4157 
4158 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4159 		return 0;
4160 
4161 	/* try to push all the items before our slot into the next leaf */
4162 	slot = path->slots[0];
4163 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4164 	if (ret < 0)
4165 		return ret;
4166 
4167 	if (ret == 0)
4168 		progress++;
4169 
4170 	if (progress)
4171 		return 0;
4172 	return 1;
4173 }
4174 
4175 /*
4176  * split the path's leaf in two, making sure there is at least data_size
4177  * available for the resulting leaf level of the path.
4178  *
4179  * returns 0 if all went well and < 0 on failure.
4180  */
4181 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4182 			       struct btrfs_root *root,
4183 			       struct btrfs_key *ins_key,
4184 			       struct btrfs_path *path, int data_size,
4185 			       int extend)
4186 {
4187 	struct btrfs_disk_key disk_key;
4188 	struct extent_buffer *l;
4189 	u32 nritems;
4190 	int mid;
4191 	int slot;
4192 	struct extent_buffer *right;
4193 	struct btrfs_fs_info *fs_info = root->fs_info;
4194 	int ret = 0;
4195 	int wret;
4196 	int split;
4197 	int num_doubles = 0;
4198 	int tried_avoid_double = 0;
4199 
4200 	l = path->nodes[0];
4201 	slot = path->slots[0];
4202 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4203 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4204 		return -EOVERFLOW;
4205 
4206 	/* first try to make some room by pushing left and right */
4207 	if (data_size && path->nodes[1]) {
4208 		int space_needed = data_size;
4209 
4210 		if (slot < btrfs_header_nritems(l))
4211 			space_needed -= btrfs_leaf_free_space(fs_info, l);
4212 
4213 		wret = push_leaf_right(trans, root, path, space_needed,
4214 				       space_needed, 0, 0);
4215 		if (wret < 0)
4216 			return wret;
4217 		if (wret) {
4218 			wret = push_leaf_left(trans, root, path, space_needed,
4219 					      space_needed, 0, (u32)-1);
4220 			if (wret < 0)
4221 				return wret;
4222 		}
4223 		l = path->nodes[0];
4224 
4225 		/* did the pushes work? */
4226 		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4227 			return 0;
4228 	}
4229 
4230 	if (!path->nodes[1]) {
4231 		ret = insert_new_root(trans, root, path, 1);
4232 		if (ret)
4233 			return ret;
4234 	}
4235 again:
4236 	split = 1;
4237 	l = path->nodes[0];
4238 	slot = path->slots[0];
4239 	nritems = btrfs_header_nritems(l);
4240 	mid = (nritems + 1) / 2;
4241 
4242 	if (mid <= slot) {
4243 		if (nritems == 1 ||
4244 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4245 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4246 			if (slot >= nritems) {
4247 				split = 0;
4248 			} else {
4249 				mid = slot;
4250 				if (mid != nritems &&
4251 				    leaf_space_used(l, mid, nritems - mid) +
4252 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4253 					if (data_size && !tried_avoid_double)
4254 						goto push_for_double;
4255 					split = 2;
4256 				}
4257 			}
4258 		}
4259 	} else {
4260 		if (leaf_space_used(l, 0, mid) + data_size >
4261 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4262 			if (!extend && data_size && slot == 0) {
4263 				split = 0;
4264 			} else if ((extend || !data_size) && slot == 0) {
4265 				mid = 1;
4266 			} else {
4267 				mid = slot;
4268 				if (mid != nritems &&
4269 				    leaf_space_used(l, mid, nritems - mid) +
4270 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4271 					if (data_size && !tried_avoid_double)
4272 						goto push_for_double;
4273 					split = 2;
4274 				}
4275 			}
4276 		}
4277 	}
4278 
4279 	if (split == 0)
4280 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4281 	else
4282 		btrfs_item_key(l, &disk_key, mid);
4283 
4284 	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4285 			&disk_key, 0, l->start, 0);
4286 	if (IS_ERR(right))
4287 		return PTR_ERR(right);
4288 
4289 	root_add_used(root, fs_info->nodesize);
4290 
4291 	memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
4292 	btrfs_set_header_bytenr(right, right->start);
4293 	btrfs_set_header_generation(right, trans->transid);
4294 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4295 	btrfs_set_header_owner(right, root->root_key.objectid);
4296 	btrfs_set_header_level(right, 0);
4297 	write_extent_buffer_fsid(right, fs_info->fsid);
4298 	write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
4299 
4300 	if (split == 0) {
4301 		if (mid <= slot) {
4302 			btrfs_set_header_nritems(right, 0);
4303 			insert_ptr(trans, fs_info, path, &disk_key,
4304 				   right->start, path->slots[1] + 1, 1);
4305 			btrfs_tree_unlock(path->nodes[0]);
4306 			free_extent_buffer(path->nodes[0]);
4307 			path->nodes[0] = right;
4308 			path->slots[0] = 0;
4309 			path->slots[1] += 1;
4310 		} else {
4311 			btrfs_set_header_nritems(right, 0);
4312 			insert_ptr(trans, fs_info, path, &disk_key,
4313 				   right->start, path->slots[1], 1);
4314 			btrfs_tree_unlock(path->nodes[0]);
4315 			free_extent_buffer(path->nodes[0]);
4316 			path->nodes[0] = right;
4317 			path->slots[0] = 0;
4318 			if (path->slots[1] == 0)
4319 				fixup_low_keys(fs_info, path, &disk_key, 1);
4320 		}
4321 		/*
4322 		 * We create a new leaf 'right' for the required ins_len and
4323 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4324 		 * the content of ins_len to 'right'.
4325 		 */
4326 		return ret;
4327 	}
4328 
4329 	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4330 
4331 	if (split == 2) {
4332 		BUG_ON(num_doubles != 0);
4333 		num_doubles++;
4334 		goto again;
4335 	}
4336 
4337 	return 0;
4338 
4339 push_for_double:
4340 	push_for_double_split(trans, root, path, data_size);
4341 	tried_avoid_double = 1;
4342 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4343 		return 0;
4344 	goto again;
4345 }
4346 
4347 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4348 					 struct btrfs_root *root,
4349 					 struct btrfs_path *path, int ins_len)
4350 {
4351 	struct btrfs_fs_info *fs_info = root->fs_info;
4352 	struct btrfs_key key;
4353 	struct extent_buffer *leaf;
4354 	struct btrfs_file_extent_item *fi;
4355 	u64 extent_len = 0;
4356 	u32 item_size;
4357 	int ret;
4358 
4359 	leaf = path->nodes[0];
4360 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4361 
4362 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4363 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4364 
4365 	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4366 		return 0;
4367 
4368 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4369 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4370 		fi = btrfs_item_ptr(leaf, path->slots[0],
4371 				    struct btrfs_file_extent_item);
4372 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4373 	}
4374 	btrfs_release_path(path);
4375 
4376 	path->keep_locks = 1;
4377 	path->search_for_split = 1;
4378 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4379 	path->search_for_split = 0;
4380 	if (ret > 0)
4381 		ret = -EAGAIN;
4382 	if (ret < 0)
4383 		goto err;
4384 
4385 	ret = -EAGAIN;
4386 	leaf = path->nodes[0];
4387 	/* if our item isn't there, return now */
4388 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4389 		goto err;
4390 
4391 	/* the leaf has  changed, it now has room.  return now */
4392 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4393 		goto err;
4394 
4395 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4396 		fi = btrfs_item_ptr(leaf, path->slots[0],
4397 				    struct btrfs_file_extent_item);
4398 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4399 			goto err;
4400 	}
4401 
4402 	btrfs_set_path_blocking(path);
4403 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4404 	if (ret)
4405 		goto err;
4406 
4407 	path->keep_locks = 0;
4408 	btrfs_unlock_up_safe(path, 1);
4409 	return 0;
4410 err:
4411 	path->keep_locks = 0;
4412 	return ret;
4413 }
4414 
4415 static noinline int split_item(struct btrfs_trans_handle *trans,
4416 			       struct btrfs_fs_info *fs_info,
4417 			       struct btrfs_path *path,
4418 			       struct btrfs_key *new_key,
4419 			       unsigned long split_offset)
4420 {
4421 	struct extent_buffer *leaf;
4422 	struct btrfs_item *item;
4423 	struct btrfs_item *new_item;
4424 	int slot;
4425 	char *buf;
4426 	u32 nritems;
4427 	u32 item_size;
4428 	u32 orig_offset;
4429 	struct btrfs_disk_key disk_key;
4430 
4431 	leaf = path->nodes[0];
4432 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4433 
4434 	btrfs_set_path_blocking(path);
4435 
4436 	item = btrfs_item_nr(path->slots[0]);
4437 	orig_offset = btrfs_item_offset(leaf, item);
4438 	item_size = btrfs_item_size(leaf, item);
4439 
4440 	buf = kmalloc(item_size, GFP_NOFS);
4441 	if (!buf)
4442 		return -ENOMEM;
4443 
4444 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4445 			    path->slots[0]), item_size);
4446 
4447 	slot = path->slots[0] + 1;
4448 	nritems = btrfs_header_nritems(leaf);
4449 	if (slot != nritems) {
4450 		/* shift the items */
4451 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4452 				btrfs_item_nr_offset(slot),
4453 				(nritems - slot) * sizeof(struct btrfs_item));
4454 	}
4455 
4456 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4457 	btrfs_set_item_key(leaf, &disk_key, slot);
4458 
4459 	new_item = btrfs_item_nr(slot);
4460 
4461 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4462 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4463 
4464 	btrfs_set_item_offset(leaf, item,
4465 			      orig_offset + item_size - split_offset);
4466 	btrfs_set_item_size(leaf, item, split_offset);
4467 
4468 	btrfs_set_header_nritems(leaf, nritems + 1);
4469 
4470 	/* write the data for the start of the original item */
4471 	write_extent_buffer(leaf, buf,
4472 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4473 			    split_offset);
4474 
4475 	/* write the data for the new item */
4476 	write_extent_buffer(leaf, buf + split_offset,
4477 			    btrfs_item_ptr_offset(leaf, slot),
4478 			    item_size - split_offset);
4479 	btrfs_mark_buffer_dirty(leaf);
4480 
4481 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4482 	kfree(buf);
4483 	return 0;
4484 }
4485 
4486 /*
4487  * This function splits a single item into two items,
4488  * giving 'new_key' to the new item and splitting the
4489  * old one at split_offset (from the start of the item).
4490  *
4491  * The path may be released by this operation.  After
4492  * the split, the path is pointing to the old item.  The
4493  * new item is going to be in the same node as the old one.
4494  *
4495  * Note, the item being split must be smaller enough to live alone on
4496  * a tree block with room for one extra struct btrfs_item
4497  *
4498  * This allows us to split the item in place, keeping a lock on the
4499  * leaf the entire time.
4500  */
4501 int btrfs_split_item(struct btrfs_trans_handle *trans,
4502 		     struct btrfs_root *root,
4503 		     struct btrfs_path *path,
4504 		     struct btrfs_key *new_key,
4505 		     unsigned long split_offset)
4506 {
4507 	int ret;
4508 	ret = setup_leaf_for_split(trans, root, path,
4509 				   sizeof(struct btrfs_item));
4510 	if (ret)
4511 		return ret;
4512 
4513 	ret = split_item(trans, root->fs_info, path, new_key, split_offset);
4514 	return ret;
4515 }
4516 
4517 /*
4518  * This function duplicate a item, giving 'new_key' to the new item.
4519  * It guarantees both items live in the same tree leaf and the new item
4520  * is contiguous with the original item.
4521  *
4522  * This allows us to split file extent in place, keeping a lock on the
4523  * leaf the entire time.
4524  */
4525 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4526 			 struct btrfs_root *root,
4527 			 struct btrfs_path *path,
4528 			 struct btrfs_key *new_key)
4529 {
4530 	struct extent_buffer *leaf;
4531 	int ret;
4532 	u32 item_size;
4533 
4534 	leaf = path->nodes[0];
4535 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4536 	ret = setup_leaf_for_split(trans, root, path,
4537 				   item_size + sizeof(struct btrfs_item));
4538 	if (ret)
4539 		return ret;
4540 
4541 	path->slots[0]++;
4542 	setup_items_for_insert(root, path, new_key, &item_size,
4543 			       item_size, item_size +
4544 			       sizeof(struct btrfs_item), 1);
4545 	leaf = path->nodes[0];
4546 	memcpy_extent_buffer(leaf,
4547 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4548 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4549 			     item_size);
4550 	return 0;
4551 }
4552 
4553 /*
4554  * make the item pointed to by the path smaller.  new_size indicates
4555  * how small to make it, and from_end tells us if we just chop bytes
4556  * off the end of the item or if we shift the item to chop bytes off
4557  * the front.
4558  */
4559 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4560 			 struct btrfs_path *path, u32 new_size, int from_end)
4561 {
4562 	int slot;
4563 	struct extent_buffer *leaf;
4564 	struct btrfs_item *item;
4565 	u32 nritems;
4566 	unsigned int data_end;
4567 	unsigned int old_data_start;
4568 	unsigned int old_size;
4569 	unsigned int size_diff;
4570 	int i;
4571 	struct btrfs_map_token token;
4572 
4573 	btrfs_init_map_token(&token);
4574 
4575 	leaf = path->nodes[0];
4576 	slot = path->slots[0];
4577 
4578 	old_size = btrfs_item_size_nr(leaf, slot);
4579 	if (old_size == new_size)
4580 		return;
4581 
4582 	nritems = btrfs_header_nritems(leaf);
4583 	data_end = leaf_data_end(fs_info, leaf);
4584 
4585 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4586 
4587 	size_diff = old_size - new_size;
4588 
4589 	BUG_ON(slot < 0);
4590 	BUG_ON(slot >= nritems);
4591 
4592 	/*
4593 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4594 	 */
4595 	/* first correct the data pointers */
4596 	for (i = slot; i < nritems; i++) {
4597 		u32 ioff;
4598 		item = btrfs_item_nr(i);
4599 
4600 		ioff = btrfs_token_item_offset(leaf, item, &token);
4601 		btrfs_set_token_item_offset(leaf, item,
4602 					    ioff + size_diff, &token);
4603 	}
4604 
4605 	/* shift the data */
4606 	if (from_end) {
4607 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4608 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4609 			      data_end, old_data_start + new_size - data_end);
4610 	} else {
4611 		struct btrfs_disk_key disk_key;
4612 		u64 offset;
4613 
4614 		btrfs_item_key(leaf, &disk_key, slot);
4615 
4616 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4617 			unsigned long ptr;
4618 			struct btrfs_file_extent_item *fi;
4619 
4620 			fi = btrfs_item_ptr(leaf, slot,
4621 					    struct btrfs_file_extent_item);
4622 			fi = (struct btrfs_file_extent_item *)(
4623 			     (unsigned long)fi - size_diff);
4624 
4625 			if (btrfs_file_extent_type(leaf, fi) ==
4626 			    BTRFS_FILE_EXTENT_INLINE) {
4627 				ptr = btrfs_item_ptr_offset(leaf, slot);
4628 				memmove_extent_buffer(leaf, ptr,
4629 				      (unsigned long)fi,
4630 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4631 			}
4632 		}
4633 
4634 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4635 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4636 			      data_end, old_data_start - data_end);
4637 
4638 		offset = btrfs_disk_key_offset(&disk_key);
4639 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4640 		btrfs_set_item_key(leaf, &disk_key, slot);
4641 		if (slot == 0)
4642 			fixup_low_keys(fs_info, path, &disk_key, 1);
4643 	}
4644 
4645 	item = btrfs_item_nr(slot);
4646 	btrfs_set_item_size(leaf, item, new_size);
4647 	btrfs_mark_buffer_dirty(leaf);
4648 
4649 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4650 		btrfs_print_leaf(fs_info, leaf);
4651 		BUG();
4652 	}
4653 }
4654 
4655 /*
4656  * make the item pointed to by the path bigger, data_size is the added size.
4657  */
4658 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4659 		       u32 data_size)
4660 {
4661 	int slot;
4662 	struct extent_buffer *leaf;
4663 	struct btrfs_item *item;
4664 	u32 nritems;
4665 	unsigned int data_end;
4666 	unsigned int old_data;
4667 	unsigned int old_size;
4668 	int i;
4669 	struct btrfs_map_token token;
4670 
4671 	btrfs_init_map_token(&token);
4672 
4673 	leaf = path->nodes[0];
4674 
4675 	nritems = btrfs_header_nritems(leaf);
4676 	data_end = leaf_data_end(fs_info, leaf);
4677 
4678 	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4679 		btrfs_print_leaf(fs_info, leaf);
4680 		BUG();
4681 	}
4682 	slot = path->slots[0];
4683 	old_data = btrfs_item_end_nr(leaf, slot);
4684 
4685 	BUG_ON(slot < 0);
4686 	if (slot >= nritems) {
4687 		btrfs_print_leaf(fs_info, leaf);
4688 		btrfs_crit(fs_info, "slot %d too large, nritems %d",
4689 			   slot, nritems);
4690 		BUG_ON(1);
4691 	}
4692 
4693 	/*
4694 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4695 	 */
4696 	/* first correct the data pointers */
4697 	for (i = slot; i < nritems; i++) {
4698 		u32 ioff;
4699 		item = btrfs_item_nr(i);
4700 
4701 		ioff = btrfs_token_item_offset(leaf, item, &token);
4702 		btrfs_set_token_item_offset(leaf, item,
4703 					    ioff - data_size, &token);
4704 	}
4705 
4706 	/* shift the data */
4707 	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4708 		      data_end - data_size, btrfs_leaf_data(leaf) +
4709 		      data_end, old_data - data_end);
4710 
4711 	data_end = old_data;
4712 	old_size = btrfs_item_size_nr(leaf, slot);
4713 	item = btrfs_item_nr(slot);
4714 	btrfs_set_item_size(leaf, item, old_size + data_size);
4715 	btrfs_mark_buffer_dirty(leaf);
4716 
4717 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4718 		btrfs_print_leaf(fs_info, leaf);
4719 		BUG();
4720 	}
4721 }
4722 
4723 /*
4724  * this is a helper for btrfs_insert_empty_items, the main goal here is
4725  * to save stack depth by doing the bulk of the work in a function
4726  * that doesn't call btrfs_search_slot
4727  */
4728 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4729 			    struct btrfs_key *cpu_key, u32 *data_size,
4730 			    u32 total_data, u32 total_size, int nr)
4731 {
4732 	struct btrfs_fs_info *fs_info = root->fs_info;
4733 	struct btrfs_item *item;
4734 	int i;
4735 	u32 nritems;
4736 	unsigned int data_end;
4737 	struct btrfs_disk_key disk_key;
4738 	struct extent_buffer *leaf;
4739 	int slot;
4740 	struct btrfs_map_token token;
4741 
4742 	if (path->slots[0] == 0) {
4743 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4744 		fixup_low_keys(fs_info, path, &disk_key, 1);
4745 	}
4746 	btrfs_unlock_up_safe(path, 1);
4747 
4748 	btrfs_init_map_token(&token);
4749 
4750 	leaf = path->nodes[0];
4751 	slot = path->slots[0];
4752 
4753 	nritems = btrfs_header_nritems(leaf);
4754 	data_end = leaf_data_end(fs_info, leaf);
4755 
4756 	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4757 		btrfs_print_leaf(fs_info, leaf);
4758 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4759 			   total_size, btrfs_leaf_free_space(fs_info, leaf));
4760 		BUG();
4761 	}
4762 
4763 	if (slot != nritems) {
4764 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4765 
4766 		if (old_data < data_end) {
4767 			btrfs_print_leaf(fs_info, leaf);
4768 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4769 				   slot, old_data, data_end);
4770 			BUG_ON(1);
4771 		}
4772 		/*
4773 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4774 		 */
4775 		/* first correct the data pointers */
4776 		for (i = slot; i < nritems; i++) {
4777 			u32 ioff;
4778 
4779 			item = btrfs_item_nr(i);
4780 			ioff = btrfs_token_item_offset(leaf, item, &token);
4781 			btrfs_set_token_item_offset(leaf, item,
4782 						    ioff - total_data, &token);
4783 		}
4784 		/* shift the items */
4785 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4786 			      btrfs_item_nr_offset(slot),
4787 			      (nritems - slot) * sizeof(struct btrfs_item));
4788 
4789 		/* shift the data */
4790 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4791 			      data_end - total_data, btrfs_leaf_data(leaf) +
4792 			      data_end, old_data - data_end);
4793 		data_end = old_data;
4794 	}
4795 
4796 	/* setup the item for the new data */
4797 	for (i = 0; i < nr; i++) {
4798 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4799 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4800 		item = btrfs_item_nr(slot + i);
4801 		btrfs_set_token_item_offset(leaf, item,
4802 					    data_end - data_size[i], &token);
4803 		data_end -= data_size[i];
4804 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4805 	}
4806 
4807 	btrfs_set_header_nritems(leaf, nritems + nr);
4808 	btrfs_mark_buffer_dirty(leaf);
4809 
4810 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4811 		btrfs_print_leaf(fs_info, leaf);
4812 		BUG();
4813 	}
4814 }
4815 
4816 /*
4817  * Given a key and some data, insert items into the tree.
4818  * This does all the path init required, making room in the tree if needed.
4819  */
4820 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4821 			    struct btrfs_root *root,
4822 			    struct btrfs_path *path,
4823 			    struct btrfs_key *cpu_key, u32 *data_size,
4824 			    int nr)
4825 {
4826 	int ret = 0;
4827 	int slot;
4828 	int i;
4829 	u32 total_size = 0;
4830 	u32 total_data = 0;
4831 
4832 	for (i = 0; i < nr; i++)
4833 		total_data += data_size[i];
4834 
4835 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4836 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4837 	if (ret == 0)
4838 		return -EEXIST;
4839 	if (ret < 0)
4840 		return ret;
4841 
4842 	slot = path->slots[0];
4843 	BUG_ON(slot < 0);
4844 
4845 	setup_items_for_insert(root, path, cpu_key, data_size,
4846 			       total_data, total_size, nr);
4847 	return 0;
4848 }
4849 
4850 /*
4851  * Given a key and some data, insert an item into the tree.
4852  * This does all the path init required, making room in the tree if needed.
4853  */
4854 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4855 		      *root, struct btrfs_key *cpu_key, void *data, u32
4856 		      data_size)
4857 {
4858 	int ret = 0;
4859 	struct btrfs_path *path;
4860 	struct extent_buffer *leaf;
4861 	unsigned long ptr;
4862 
4863 	path = btrfs_alloc_path();
4864 	if (!path)
4865 		return -ENOMEM;
4866 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4867 	if (!ret) {
4868 		leaf = path->nodes[0];
4869 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4870 		write_extent_buffer(leaf, data, ptr, data_size);
4871 		btrfs_mark_buffer_dirty(leaf);
4872 	}
4873 	btrfs_free_path(path);
4874 	return ret;
4875 }
4876 
4877 /*
4878  * delete the pointer from a given node.
4879  *
4880  * the tree should have been previously balanced so the deletion does not
4881  * empty a node.
4882  */
4883 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4884 		    int level, int slot)
4885 {
4886 	struct btrfs_fs_info *fs_info = root->fs_info;
4887 	struct extent_buffer *parent = path->nodes[level];
4888 	u32 nritems;
4889 	int ret;
4890 
4891 	nritems = btrfs_header_nritems(parent);
4892 	if (slot != nritems - 1) {
4893 		if (level)
4894 			tree_mod_log_eb_move(fs_info, parent, slot,
4895 					     slot + 1, nritems - slot - 1);
4896 		memmove_extent_buffer(parent,
4897 			      btrfs_node_key_ptr_offset(slot),
4898 			      btrfs_node_key_ptr_offset(slot + 1),
4899 			      sizeof(struct btrfs_key_ptr) *
4900 			      (nritems - slot - 1));
4901 	} else if (level) {
4902 		ret = tree_mod_log_insert_key(fs_info, parent, slot,
4903 					      MOD_LOG_KEY_REMOVE, GFP_NOFS);
4904 		BUG_ON(ret < 0);
4905 	}
4906 
4907 	nritems--;
4908 	btrfs_set_header_nritems(parent, nritems);
4909 	if (nritems == 0 && parent == root->node) {
4910 		BUG_ON(btrfs_header_level(root->node) != 1);
4911 		/* just turn the root into a leaf and break */
4912 		btrfs_set_header_level(root->node, 0);
4913 	} else if (slot == 0) {
4914 		struct btrfs_disk_key disk_key;
4915 
4916 		btrfs_node_key(parent, &disk_key, 0);
4917 		fixup_low_keys(fs_info, path, &disk_key, level + 1);
4918 	}
4919 	btrfs_mark_buffer_dirty(parent);
4920 }
4921 
4922 /*
4923  * a helper function to delete the leaf pointed to by path->slots[1] and
4924  * path->nodes[1].
4925  *
4926  * This deletes the pointer in path->nodes[1] and frees the leaf
4927  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4928  *
4929  * The path must have already been setup for deleting the leaf, including
4930  * all the proper balancing.  path->nodes[1] must be locked.
4931  */
4932 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4933 				    struct btrfs_root *root,
4934 				    struct btrfs_path *path,
4935 				    struct extent_buffer *leaf)
4936 {
4937 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4938 	del_ptr(root, path, 1, path->slots[1]);
4939 
4940 	/*
4941 	 * btrfs_free_extent is expensive, we want to make sure we
4942 	 * aren't holding any locks when we call it
4943 	 */
4944 	btrfs_unlock_up_safe(path, 0);
4945 
4946 	root_sub_used(root, leaf->len);
4947 
4948 	extent_buffer_get(leaf);
4949 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4950 	free_extent_buffer_stale(leaf);
4951 }
4952 /*
4953  * delete the item at the leaf level in path.  If that empties
4954  * the leaf, remove it from the tree
4955  */
4956 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4957 		    struct btrfs_path *path, int slot, int nr)
4958 {
4959 	struct btrfs_fs_info *fs_info = root->fs_info;
4960 	struct extent_buffer *leaf;
4961 	struct btrfs_item *item;
4962 	u32 last_off;
4963 	u32 dsize = 0;
4964 	int ret = 0;
4965 	int wret;
4966 	int i;
4967 	u32 nritems;
4968 	struct btrfs_map_token token;
4969 
4970 	btrfs_init_map_token(&token);
4971 
4972 	leaf = path->nodes[0];
4973 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4974 
4975 	for (i = 0; i < nr; i++)
4976 		dsize += btrfs_item_size_nr(leaf, slot + i);
4977 
4978 	nritems = btrfs_header_nritems(leaf);
4979 
4980 	if (slot + nr != nritems) {
4981 		int data_end = leaf_data_end(fs_info, leaf);
4982 
4983 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4984 			      data_end + dsize,
4985 			      btrfs_leaf_data(leaf) + data_end,
4986 			      last_off - data_end);
4987 
4988 		for (i = slot + nr; i < nritems; i++) {
4989 			u32 ioff;
4990 
4991 			item = btrfs_item_nr(i);
4992 			ioff = btrfs_token_item_offset(leaf, item, &token);
4993 			btrfs_set_token_item_offset(leaf, item,
4994 						    ioff + dsize, &token);
4995 		}
4996 
4997 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4998 			      btrfs_item_nr_offset(slot + nr),
4999 			      sizeof(struct btrfs_item) *
5000 			      (nritems - slot - nr));
5001 	}
5002 	btrfs_set_header_nritems(leaf, nritems - nr);
5003 	nritems -= nr;
5004 
5005 	/* delete the leaf if we've emptied it */
5006 	if (nritems == 0) {
5007 		if (leaf == root->node) {
5008 			btrfs_set_header_level(leaf, 0);
5009 		} else {
5010 			btrfs_set_path_blocking(path);
5011 			clean_tree_block(trans, fs_info, leaf);
5012 			btrfs_del_leaf(trans, root, path, leaf);
5013 		}
5014 	} else {
5015 		int used = leaf_space_used(leaf, 0, nritems);
5016 		if (slot == 0) {
5017 			struct btrfs_disk_key disk_key;
5018 
5019 			btrfs_item_key(leaf, &disk_key, 0);
5020 			fixup_low_keys(fs_info, path, &disk_key, 1);
5021 		}
5022 
5023 		/* delete the leaf if it is mostly empty */
5024 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5025 			/* push_leaf_left fixes the path.
5026 			 * make sure the path still points to our leaf
5027 			 * for possible call to del_ptr below
5028 			 */
5029 			slot = path->slots[1];
5030 			extent_buffer_get(leaf);
5031 
5032 			btrfs_set_path_blocking(path);
5033 			wret = push_leaf_left(trans, root, path, 1, 1,
5034 					      1, (u32)-1);
5035 			if (wret < 0 && wret != -ENOSPC)
5036 				ret = wret;
5037 
5038 			if (path->nodes[0] == leaf &&
5039 			    btrfs_header_nritems(leaf)) {
5040 				wret = push_leaf_right(trans, root, path, 1,
5041 						       1, 1, 0);
5042 				if (wret < 0 && wret != -ENOSPC)
5043 					ret = wret;
5044 			}
5045 
5046 			if (btrfs_header_nritems(leaf) == 0) {
5047 				path->slots[1] = slot;
5048 				btrfs_del_leaf(trans, root, path, leaf);
5049 				free_extent_buffer(leaf);
5050 				ret = 0;
5051 			} else {
5052 				/* if we're still in the path, make sure
5053 				 * we're dirty.  Otherwise, one of the
5054 				 * push_leaf functions must have already
5055 				 * dirtied this buffer
5056 				 */
5057 				if (path->nodes[0] == leaf)
5058 					btrfs_mark_buffer_dirty(leaf);
5059 				free_extent_buffer(leaf);
5060 			}
5061 		} else {
5062 			btrfs_mark_buffer_dirty(leaf);
5063 		}
5064 	}
5065 	return ret;
5066 }
5067 
5068 /*
5069  * search the tree again to find a leaf with lesser keys
5070  * returns 0 if it found something or 1 if there are no lesser leaves.
5071  * returns < 0 on io errors.
5072  *
5073  * This may release the path, and so you may lose any locks held at the
5074  * time you call it.
5075  */
5076 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5077 {
5078 	struct btrfs_key key;
5079 	struct btrfs_disk_key found_key;
5080 	int ret;
5081 
5082 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5083 
5084 	if (key.offset > 0) {
5085 		key.offset--;
5086 	} else if (key.type > 0) {
5087 		key.type--;
5088 		key.offset = (u64)-1;
5089 	} else if (key.objectid > 0) {
5090 		key.objectid--;
5091 		key.type = (u8)-1;
5092 		key.offset = (u64)-1;
5093 	} else {
5094 		return 1;
5095 	}
5096 
5097 	btrfs_release_path(path);
5098 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5099 	if (ret < 0)
5100 		return ret;
5101 	btrfs_item_key(path->nodes[0], &found_key, 0);
5102 	ret = comp_keys(&found_key, &key);
5103 	/*
5104 	 * We might have had an item with the previous key in the tree right
5105 	 * before we released our path. And after we released our path, that
5106 	 * item might have been pushed to the first slot (0) of the leaf we
5107 	 * were holding due to a tree balance. Alternatively, an item with the
5108 	 * previous key can exist as the only element of a leaf (big fat item).
5109 	 * Therefore account for these 2 cases, so that our callers (like
5110 	 * btrfs_previous_item) don't miss an existing item with a key matching
5111 	 * the previous key we computed above.
5112 	 */
5113 	if (ret <= 0)
5114 		return 0;
5115 	return 1;
5116 }
5117 
5118 /*
5119  * A helper function to walk down the tree starting at min_key, and looking
5120  * for nodes or leaves that are have a minimum transaction id.
5121  * This is used by the btree defrag code, and tree logging
5122  *
5123  * This does not cow, but it does stuff the starting key it finds back
5124  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5125  * key and get a writable path.
5126  *
5127  * This does lock as it descends, and path->keep_locks should be set
5128  * to 1 by the caller.
5129  *
5130  * This honors path->lowest_level to prevent descent past a given level
5131  * of the tree.
5132  *
5133  * min_trans indicates the oldest transaction that you are interested
5134  * in walking through.  Any nodes or leaves older than min_trans are
5135  * skipped over (without reading them).
5136  *
5137  * returns zero if something useful was found, < 0 on error and 1 if there
5138  * was nothing in the tree that matched the search criteria.
5139  */
5140 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5141 			 struct btrfs_path *path,
5142 			 u64 min_trans)
5143 {
5144 	struct btrfs_fs_info *fs_info = root->fs_info;
5145 	struct extent_buffer *cur;
5146 	struct btrfs_key found_key;
5147 	int slot;
5148 	int sret;
5149 	u32 nritems;
5150 	int level;
5151 	int ret = 1;
5152 	int keep_locks = path->keep_locks;
5153 
5154 	path->keep_locks = 1;
5155 again:
5156 	cur = btrfs_read_lock_root_node(root);
5157 	level = btrfs_header_level(cur);
5158 	WARN_ON(path->nodes[level]);
5159 	path->nodes[level] = cur;
5160 	path->locks[level] = BTRFS_READ_LOCK;
5161 
5162 	if (btrfs_header_generation(cur) < min_trans) {
5163 		ret = 1;
5164 		goto out;
5165 	}
5166 	while (1) {
5167 		nritems = btrfs_header_nritems(cur);
5168 		level = btrfs_header_level(cur);
5169 		sret = bin_search(cur, min_key, level, &slot);
5170 
5171 		/* at the lowest level, we're done, setup the path and exit */
5172 		if (level == path->lowest_level) {
5173 			if (slot >= nritems)
5174 				goto find_next_key;
5175 			ret = 0;
5176 			path->slots[level] = slot;
5177 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5178 			goto out;
5179 		}
5180 		if (sret && slot > 0)
5181 			slot--;
5182 		/*
5183 		 * check this node pointer against the min_trans parameters.
5184 		 * If it is too old, old, skip to the next one.
5185 		 */
5186 		while (slot < nritems) {
5187 			u64 gen;
5188 
5189 			gen = btrfs_node_ptr_generation(cur, slot);
5190 			if (gen < min_trans) {
5191 				slot++;
5192 				continue;
5193 			}
5194 			break;
5195 		}
5196 find_next_key:
5197 		/*
5198 		 * we didn't find a candidate key in this node, walk forward
5199 		 * and find another one
5200 		 */
5201 		if (slot >= nritems) {
5202 			path->slots[level] = slot;
5203 			btrfs_set_path_blocking(path);
5204 			sret = btrfs_find_next_key(root, path, min_key, level,
5205 						  min_trans);
5206 			if (sret == 0) {
5207 				btrfs_release_path(path);
5208 				goto again;
5209 			} else {
5210 				goto out;
5211 			}
5212 		}
5213 		/* save our key for returning back */
5214 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5215 		path->slots[level] = slot;
5216 		if (level == path->lowest_level) {
5217 			ret = 0;
5218 			goto out;
5219 		}
5220 		btrfs_set_path_blocking(path);
5221 		cur = read_node_slot(fs_info, cur, slot);
5222 		if (IS_ERR(cur)) {
5223 			ret = PTR_ERR(cur);
5224 			goto out;
5225 		}
5226 
5227 		btrfs_tree_read_lock(cur);
5228 
5229 		path->locks[level - 1] = BTRFS_READ_LOCK;
5230 		path->nodes[level - 1] = cur;
5231 		unlock_up(path, level, 1, 0, NULL);
5232 		btrfs_clear_path_blocking(path, NULL, 0);
5233 	}
5234 out:
5235 	path->keep_locks = keep_locks;
5236 	if (ret == 0) {
5237 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5238 		btrfs_set_path_blocking(path);
5239 		memcpy(min_key, &found_key, sizeof(found_key));
5240 	}
5241 	return ret;
5242 }
5243 
5244 static int tree_move_down(struct btrfs_fs_info *fs_info,
5245 			   struct btrfs_path *path,
5246 			   int *level, int root_level)
5247 {
5248 	struct extent_buffer *eb;
5249 
5250 	BUG_ON(*level == 0);
5251 	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5252 	if (IS_ERR(eb))
5253 		return PTR_ERR(eb);
5254 
5255 	path->nodes[*level - 1] = eb;
5256 	path->slots[*level - 1] = 0;
5257 	(*level)--;
5258 	return 0;
5259 }
5260 
5261 static int tree_move_next_or_upnext(struct btrfs_fs_info *fs_info,
5262 				    struct btrfs_path *path,
5263 				    int *level, int root_level)
5264 {
5265 	int ret = 0;
5266 	int nritems;
5267 	nritems = btrfs_header_nritems(path->nodes[*level]);
5268 
5269 	path->slots[*level]++;
5270 
5271 	while (path->slots[*level] >= nritems) {
5272 		if (*level == root_level)
5273 			return -1;
5274 
5275 		/* move upnext */
5276 		path->slots[*level] = 0;
5277 		free_extent_buffer(path->nodes[*level]);
5278 		path->nodes[*level] = NULL;
5279 		(*level)++;
5280 		path->slots[*level]++;
5281 
5282 		nritems = btrfs_header_nritems(path->nodes[*level]);
5283 		ret = 1;
5284 	}
5285 	return ret;
5286 }
5287 
5288 /*
5289  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5290  * or down.
5291  */
5292 static int tree_advance(struct btrfs_fs_info *fs_info,
5293 			struct btrfs_path *path,
5294 			int *level, int root_level,
5295 			int allow_down,
5296 			struct btrfs_key *key)
5297 {
5298 	int ret;
5299 
5300 	if (*level == 0 || !allow_down) {
5301 		ret = tree_move_next_or_upnext(fs_info, path, level,
5302 					       root_level);
5303 	} else {
5304 		ret = tree_move_down(fs_info, path, level, root_level);
5305 	}
5306 	if (ret >= 0) {
5307 		if (*level == 0)
5308 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5309 					path->slots[*level]);
5310 		else
5311 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5312 					path->slots[*level]);
5313 	}
5314 	return ret;
5315 }
5316 
5317 static int tree_compare_item(struct btrfs_path *left_path,
5318 			     struct btrfs_path *right_path,
5319 			     char *tmp_buf)
5320 {
5321 	int cmp;
5322 	int len1, len2;
5323 	unsigned long off1, off2;
5324 
5325 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5326 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5327 	if (len1 != len2)
5328 		return 1;
5329 
5330 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5331 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5332 				right_path->slots[0]);
5333 
5334 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5335 
5336 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5337 	if (cmp)
5338 		return 1;
5339 	return 0;
5340 }
5341 
5342 #define ADVANCE 1
5343 #define ADVANCE_ONLY_NEXT -1
5344 
5345 /*
5346  * This function compares two trees and calls the provided callback for
5347  * every changed/new/deleted item it finds.
5348  * If shared tree blocks are encountered, whole subtrees are skipped, making
5349  * the compare pretty fast on snapshotted subvolumes.
5350  *
5351  * This currently works on commit roots only. As commit roots are read only,
5352  * we don't do any locking. The commit roots are protected with transactions.
5353  * Transactions are ended and rejoined when a commit is tried in between.
5354  *
5355  * This function checks for modifications done to the trees while comparing.
5356  * If it detects a change, it aborts immediately.
5357  */
5358 int btrfs_compare_trees(struct btrfs_root *left_root,
5359 			struct btrfs_root *right_root,
5360 			btrfs_changed_cb_t changed_cb, void *ctx)
5361 {
5362 	struct btrfs_fs_info *fs_info = left_root->fs_info;
5363 	int ret;
5364 	int cmp;
5365 	struct btrfs_path *left_path = NULL;
5366 	struct btrfs_path *right_path = NULL;
5367 	struct btrfs_key left_key;
5368 	struct btrfs_key right_key;
5369 	char *tmp_buf = NULL;
5370 	int left_root_level;
5371 	int right_root_level;
5372 	int left_level;
5373 	int right_level;
5374 	int left_end_reached;
5375 	int right_end_reached;
5376 	int advance_left;
5377 	int advance_right;
5378 	u64 left_blockptr;
5379 	u64 right_blockptr;
5380 	u64 left_gen;
5381 	u64 right_gen;
5382 
5383 	left_path = btrfs_alloc_path();
5384 	if (!left_path) {
5385 		ret = -ENOMEM;
5386 		goto out;
5387 	}
5388 	right_path = btrfs_alloc_path();
5389 	if (!right_path) {
5390 		ret = -ENOMEM;
5391 		goto out;
5392 	}
5393 
5394 	tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
5395 	if (!tmp_buf) {
5396 		tmp_buf = vmalloc(fs_info->nodesize);
5397 		if (!tmp_buf) {
5398 			ret = -ENOMEM;
5399 			goto out;
5400 		}
5401 	}
5402 
5403 	left_path->search_commit_root = 1;
5404 	left_path->skip_locking = 1;
5405 	right_path->search_commit_root = 1;
5406 	right_path->skip_locking = 1;
5407 
5408 	/*
5409 	 * Strategy: Go to the first items of both trees. Then do
5410 	 *
5411 	 * If both trees are at level 0
5412 	 *   Compare keys of current items
5413 	 *     If left < right treat left item as new, advance left tree
5414 	 *       and repeat
5415 	 *     If left > right treat right item as deleted, advance right tree
5416 	 *       and repeat
5417 	 *     If left == right do deep compare of items, treat as changed if
5418 	 *       needed, advance both trees and repeat
5419 	 * If both trees are at the same level but not at level 0
5420 	 *   Compare keys of current nodes/leafs
5421 	 *     If left < right advance left tree and repeat
5422 	 *     If left > right advance right tree and repeat
5423 	 *     If left == right compare blockptrs of the next nodes/leafs
5424 	 *       If they match advance both trees but stay at the same level
5425 	 *         and repeat
5426 	 *       If they don't match advance both trees while allowing to go
5427 	 *         deeper and repeat
5428 	 * If tree levels are different
5429 	 *   Advance the tree that needs it and repeat
5430 	 *
5431 	 * Advancing a tree means:
5432 	 *   If we are at level 0, try to go to the next slot. If that's not
5433 	 *   possible, go one level up and repeat. Stop when we found a level
5434 	 *   where we could go to the next slot. We may at this point be on a
5435 	 *   node or a leaf.
5436 	 *
5437 	 *   If we are not at level 0 and not on shared tree blocks, go one
5438 	 *   level deeper.
5439 	 *
5440 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5441 	 *   the right if possible or go up and right.
5442 	 */
5443 
5444 	down_read(&fs_info->commit_root_sem);
5445 	left_level = btrfs_header_level(left_root->commit_root);
5446 	left_root_level = left_level;
5447 	left_path->nodes[left_level] = left_root->commit_root;
5448 	extent_buffer_get(left_path->nodes[left_level]);
5449 
5450 	right_level = btrfs_header_level(right_root->commit_root);
5451 	right_root_level = right_level;
5452 	right_path->nodes[right_level] = right_root->commit_root;
5453 	extent_buffer_get(right_path->nodes[right_level]);
5454 	up_read(&fs_info->commit_root_sem);
5455 
5456 	if (left_level == 0)
5457 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5458 				&left_key, left_path->slots[left_level]);
5459 	else
5460 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5461 				&left_key, left_path->slots[left_level]);
5462 	if (right_level == 0)
5463 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5464 				&right_key, right_path->slots[right_level]);
5465 	else
5466 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5467 				&right_key, right_path->slots[right_level]);
5468 
5469 	left_end_reached = right_end_reached = 0;
5470 	advance_left = advance_right = 0;
5471 
5472 	while (1) {
5473 		if (advance_left && !left_end_reached) {
5474 			ret = tree_advance(fs_info, left_path, &left_level,
5475 					left_root_level,
5476 					advance_left != ADVANCE_ONLY_NEXT,
5477 					&left_key);
5478 			if (ret == -1)
5479 				left_end_reached = ADVANCE;
5480 			else if (ret < 0)
5481 				goto out;
5482 			advance_left = 0;
5483 		}
5484 		if (advance_right && !right_end_reached) {
5485 			ret = tree_advance(fs_info, right_path, &right_level,
5486 					right_root_level,
5487 					advance_right != ADVANCE_ONLY_NEXT,
5488 					&right_key);
5489 			if (ret == -1)
5490 				right_end_reached = ADVANCE;
5491 			else if (ret < 0)
5492 				goto out;
5493 			advance_right = 0;
5494 		}
5495 
5496 		if (left_end_reached && right_end_reached) {
5497 			ret = 0;
5498 			goto out;
5499 		} else if (left_end_reached) {
5500 			if (right_level == 0) {
5501 				ret = changed_cb(left_root, right_root,
5502 						left_path, right_path,
5503 						&right_key,
5504 						BTRFS_COMPARE_TREE_DELETED,
5505 						ctx);
5506 				if (ret < 0)
5507 					goto out;
5508 			}
5509 			advance_right = ADVANCE;
5510 			continue;
5511 		} else if (right_end_reached) {
5512 			if (left_level == 0) {
5513 				ret = changed_cb(left_root, right_root,
5514 						left_path, right_path,
5515 						&left_key,
5516 						BTRFS_COMPARE_TREE_NEW,
5517 						ctx);
5518 				if (ret < 0)
5519 					goto out;
5520 			}
5521 			advance_left = ADVANCE;
5522 			continue;
5523 		}
5524 
5525 		if (left_level == 0 && right_level == 0) {
5526 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5527 			if (cmp < 0) {
5528 				ret = changed_cb(left_root, right_root,
5529 						left_path, right_path,
5530 						&left_key,
5531 						BTRFS_COMPARE_TREE_NEW,
5532 						ctx);
5533 				if (ret < 0)
5534 					goto out;
5535 				advance_left = ADVANCE;
5536 			} else if (cmp > 0) {
5537 				ret = changed_cb(left_root, right_root,
5538 						left_path, right_path,
5539 						&right_key,
5540 						BTRFS_COMPARE_TREE_DELETED,
5541 						ctx);
5542 				if (ret < 0)
5543 					goto out;
5544 				advance_right = ADVANCE;
5545 			} else {
5546 				enum btrfs_compare_tree_result result;
5547 
5548 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5549 				ret = tree_compare_item(left_path, right_path,
5550 							tmp_buf);
5551 				if (ret)
5552 					result = BTRFS_COMPARE_TREE_CHANGED;
5553 				else
5554 					result = BTRFS_COMPARE_TREE_SAME;
5555 				ret = changed_cb(left_root, right_root,
5556 						 left_path, right_path,
5557 						 &left_key, result, ctx);
5558 				if (ret < 0)
5559 					goto out;
5560 				advance_left = ADVANCE;
5561 				advance_right = ADVANCE;
5562 			}
5563 		} else if (left_level == right_level) {
5564 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5565 			if (cmp < 0) {
5566 				advance_left = ADVANCE;
5567 			} else if (cmp > 0) {
5568 				advance_right = ADVANCE;
5569 			} else {
5570 				left_blockptr = btrfs_node_blockptr(
5571 						left_path->nodes[left_level],
5572 						left_path->slots[left_level]);
5573 				right_blockptr = btrfs_node_blockptr(
5574 						right_path->nodes[right_level],
5575 						right_path->slots[right_level]);
5576 				left_gen = btrfs_node_ptr_generation(
5577 						left_path->nodes[left_level],
5578 						left_path->slots[left_level]);
5579 				right_gen = btrfs_node_ptr_generation(
5580 						right_path->nodes[right_level],
5581 						right_path->slots[right_level]);
5582 				if (left_blockptr == right_blockptr &&
5583 				    left_gen == right_gen) {
5584 					/*
5585 					 * As we're on a shared block, don't
5586 					 * allow to go deeper.
5587 					 */
5588 					advance_left = ADVANCE_ONLY_NEXT;
5589 					advance_right = ADVANCE_ONLY_NEXT;
5590 				} else {
5591 					advance_left = ADVANCE;
5592 					advance_right = ADVANCE;
5593 				}
5594 			}
5595 		} else if (left_level < right_level) {
5596 			advance_right = ADVANCE;
5597 		} else {
5598 			advance_left = ADVANCE;
5599 		}
5600 	}
5601 
5602 out:
5603 	btrfs_free_path(left_path);
5604 	btrfs_free_path(right_path);
5605 	kvfree(tmp_buf);
5606 	return ret;
5607 }
5608 
5609 /*
5610  * this is similar to btrfs_next_leaf, but does not try to preserve
5611  * and fixup the path.  It looks for and returns the next key in the
5612  * tree based on the current path and the min_trans parameters.
5613  *
5614  * 0 is returned if another key is found, < 0 if there are any errors
5615  * and 1 is returned if there are no higher keys in the tree
5616  *
5617  * path->keep_locks should be set to 1 on the search made before
5618  * calling this function.
5619  */
5620 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5621 			struct btrfs_key *key, int level, u64 min_trans)
5622 {
5623 	int slot;
5624 	struct extent_buffer *c;
5625 
5626 	WARN_ON(!path->keep_locks);
5627 	while (level < BTRFS_MAX_LEVEL) {
5628 		if (!path->nodes[level])
5629 			return 1;
5630 
5631 		slot = path->slots[level] + 1;
5632 		c = path->nodes[level];
5633 next:
5634 		if (slot >= btrfs_header_nritems(c)) {
5635 			int ret;
5636 			int orig_lowest;
5637 			struct btrfs_key cur_key;
5638 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5639 			    !path->nodes[level + 1])
5640 				return 1;
5641 
5642 			if (path->locks[level + 1]) {
5643 				level++;
5644 				continue;
5645 			}
5646 
5647 			slot = btrfs_header_nritems(c) - 1;
5648 			if (level == 0)
5649 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5650 			else
5651 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5652 
5653 			orig_lowest = path->lowest_level;
5654 			btrfs_release_path(path);
5655 			path->lowest_level = level;
5656 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5657 						0, 0);
5658 			path->lowest_level = orig_lowest;
5659 			if (ret < 0)
5660 				return ret;
5661 
5662 			c = path->nodes[level];
5663 			slot = path->slots[level];
5664 			if (ret == 0)
5665 				slot++;
5666 			goto next;
5667 		}
5668 
5669 		if (level == 0)
5670 			btrfs_item_key_to_cpu(c, key, slot);
5671 		else {
5672 			u64 gen = btrfs_node_ptr_generation(c, slot);
5673 
5674 			if (gen < min_trans) {
5675 				slot++;
5676 				goto next;
5677 			}
5678 			btrfs_node_key_to_cpu(c, key, slot);
5679 		}
5680 		return 0;
5681 	}
5682 	return 1;
5683 }
5684 
5685 /*
5686  * search the tree again to find a leaf with greater keys
5687  * returns 0 if it found something or 1 if there are no greater leaves.
5688  * returns < 0 on io errors.
5689  */
5690 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5691 {
5692 	return btrfs_next_old_leaf(root, path, 0);
5693 }
5694 
5695 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5696 			u64 time_seq)
5697 {
5698 	int slot;
5699 	int level;
5700 	struct extent_buffer *c;
5701 	struct extent_buffer *next;
5702 	struct btrfs_key key;
5703 	u32 nritems;
5704 	int ret;
5705 	int old_spinning = path->leave_spinning;
5706 	int next_rw_lock = 0;
5707 
5708 	nritems = btrfs_header_nritems(path->nodes[0]);
5709 	if (nritems == 0)
5710 		return 1;
5711 
5712 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5713 again:
5714 	level = 1;
5715 	next = NULL;
5716 	next_rw_lock = 0;
5717 	btrfs_release_path(path);
5718 
5719 	path->keep_locks = 1;
5720 	path->leave_spinning = 1;
5721 
5722 	if (time_seq)
5723 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5724 	else
5725 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5726 	path->keep_locks = 0;
5727 
5728 	if (ret < 0)
5729 		return ret;
5730 
5731 	nritems = btrfs_header_nritems(path->nodes[0]);
5732 	/*
5733 	 * by releasing the path above we dropped all our locks.  A balance
5734 	 * could have added more items next to the key that used to be
5735 	 * at the very end of the block.  So, check again here and
5736 	 * advance the path if there are now more items available.
5737 	 */
5738 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5739 		if (ret == 0)
5740 			path->slots[0]++;
5741 		ret = 0;
5742 		goto done;
5743 	}
5744 	/*
5745 	 * So the above check misses one case:
5746 	 * - after releasing the path above, someone has removed the item that
5747 	 *   used to be at the very end of the block, and balance between leafs
5748 	 *   gets another one with bigger key.offset to replace it.
5749 	 *
5750 	 * This one should be returned as well, or we can get leaf corruption
5751 	 * later(esp. in __btrfs_drop_extents()).
5752 	 *
5753 	 * And a bit more explanation about this check,
5754 	 * with ret > 0, the key isn't found, the path points to the slot
5755 	 * where it should be inserted, so the path->slots[0] item must be the
5756 	 * bigger one.
5757 	 */
5758 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5759 		ret = 0;
5760 		goto done;
5761 	}
5762 
5763 	while (level < BTRFS_MAX_LEVEL) {
5764 		if (!path->nodes[level]) {
5765 			ret = 1;
5766 			goto done;
5767 		}
5768 
5769 		slot = path->slots[level] + 1;
5770 		c = path->nodes[level];
5771 		if (slot >= btrfs_header_nritems(c)) {
5772 			level++;
5773 			if (level == BTRFS_MAX_LEVEL) {
5774 				ret = 1;
5775 				goto done;
5776 			}
5777 			continue;
5778 		}
5779 
5780 		if (next) {
5781 			btrfs_tree_unlock_rw(next, next_rw_lock);
5782 			free_extent_buffer(next);
5783 		}
5784 
5785 		next = c;
5786 		next_rw_lock = path->locks[level];
5787 		ret = read_block_for_search(NULL, root, path, &next, level,
5788 					    slot, &key, 0);
5789 		if (ret == -EAGAIN)
5790 			goto again;
5791 
5792 		if (ret < 0) {
5793 			btrfs_release_path(path);
5794 			goto done;
5795 		}
5796 
5797 		if (!path->skip_locking) {
5798 			ret = btrfs_try_tree_read_lock(next);
5799 			if (!ret && time_seq) {
5800 				/*
5801 				 * If we don't get the lock, we may be racing
5802 				 * with push_leaf_left, holding that lock while
5803 				 * itself waiting for the leaf we've currently
5804 				 * locked. To solve this situation, we give up
5805 				 * on our lock and cycle.
5806 				 */
5807 				free_extent_buffer(next);
5808 				btrfs_release_path(path);
5809 				cond_resched();
5810 				goto again;
5811 			}
5812 			if (!ret) {
5813 				btrfs_set_path_blocking(path);
5814 				btrfs_tree_read_lock(next);
5815 				btrfs_clear_path_blocking(path, next,
5816 							  BTRFS_READ_LOCK);
5817 			}
5818 			next_rw_lock = BTRFS_READ_LOCK;
5819 		}
5820 		break;
5821 	}
5822 	path->slots[level] = slot;
5823 	while (1) {
5824 		level--;
5825 		c = path->nodes[level];
5826 		if (path->locks[level])
5827 			btrfs_tree_unlock_rw(c, path->locks[level]);
5828 
5829 		free_extent_buffer(c);
5830 		path->nodes[level] = next;
5831 		path->slots[level] = 0;
5832 		if (!path->skip_locking)
5833 			path->locks[level] = next_rw_lock;
5834 		if (!level)
5835 			break;
5836 
5837 		ret = read_block_for_search(NULL, root, path, &next, level,
5838 					    0, &key, 0);
5839 		if (ret == -EAGAIN)
5840 			goto again;
5841 
5842 		if (ret < 0) {
5843 			btrfs_release_path(path);
5844 			goto done;
5845 		}
5846 
5847 		if (!path->skip_locking) {
5848 			ret = btrfs_try_tree_read_lock(next);
5849 			if (!ret) {
5850 				btrfs_set_path_blocking(path);
5851 				btrfs_tree_read_lock(next);
5852 				btrfs_clear_path_blocking(path, next,
5853 							  BTRFS_READ_LOCK);
5854 			}
5855 			next_rw_lock = BTRFS_READ_LOCK;
5856 		}
5857 	}
5858 	ret = 0;
5859 done:
5860 	unlock_up(path, 0, 1, 0, NULL);
5861 	path->leave_spinning = old_spinning;
5862 	if (!old_spinning)
5863 		btrfs_set_path_blocking(path);
5864 
5865 	return ret;
5866 }
5867 
5868 /*
5869  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5870  * searching until it gets past min_objectid or finds an item of 'type'
5871  *
5872  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5873  */
5874 int btrfs_previous_item(struct btrfs_root *root,
5875 			struct btrfs_path *path, u64 min_objectid,
5876 			int type)
5877 {
5878 	struct btrfs_key found_key;
5879 	struct extent_buffer *leaf;
5880 	u32 nritems;
5881 	int ret;
5882 
5883 	while (1) {
5884 		if (path->slots[0] == 0) {
5885 			btrfs_set_path_blocking(path);
5886 			ret = btrfs_prev_leaf(root, path);
5887 			if (ret != 0)
5888 				return ret;
5889 		} else {
5890 			path->slots[0]--;
5891 		}
5892 		leaf = path->nodes[0];
5893 		nritems = btrfs_header_nritems(leaf);
5894 		if (nritems == 0)
5895 			return 1;
5896 		if (path->slots[0] == nritems)
5897 			path->slots[0]--;
5898 
5899 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5900 		if (found_key.objectid < min_objectid)
5901 			break;
5902 		if (found_key.type == type)
5903 			return 0;
5904 		if (found_key.objectid == min_objectid &&
5905 		    found_key.type < type)
5906 			break;
5907 	}
5908 	return 1;
5909 }
5910 
5911 /*
5912  * search in extent tree to find a previous Metadata/Data extent item with
5913  * min objecitd.
5914  *
5915  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5916  */
5917 int btrfs_previous_extent_item(struct btrfs_root *root,
5918 			struct btrfs_path *path, u64 min_objectid)
5919 {
5920 	struct btrfs_key found_key;
5921 	struct extent_buffer *leaf;
5922 	u32 nritems;
5923 	int ret;
5924 
5925 	while (1) {
5926 		if (path->slots[0] == 0) {
5927 			btrfs_set_path_blocking(path);
5928 			ret = btrfs_prev_leaf(root, path);
5929 			if (ret != 0)
5930 				return ret;
5931 		} else {
5932 			path->slots[0]--;
5933 		}
5934 		leaf = path->nodes[0];
5935 		nritems = btrfs_header_nritems(leaf);
5936 		if (nritems == 0)
5937 			return 1;
5938 		if (path->slots[0] == nritems)
5939 			path->slots[0]--;
5940 
5941 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5942 		if (found_key.objectid < min_objectid)
5943 			break;
5944 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5945 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5946 			return 0;
5947 		if (found_key.objectid == min_objectid &&
5948 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5949 			break;
5950 	}
5951 	return 1;
5952 }
5953