xref: /openbmc/linux/fs/btrfs/ctree.c (revision 3932b9ca)
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27 
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 		      *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 		      *root, struct btrfs_key *ins_key,
32 		      struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 			  struct btrfs_root *root, struct extent_buffer *dst,
35 			  struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 			      struct btrfs_root *root,
38 			      struct extent_buffer *dst_buf,
39 			      struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 		    int level, int slot);
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 				 struct extent_buffer *eb);
44 
45 struct btrfs_path *btrfs_alloc_path(void)
46 {
47 	struct btrfs_path *path;
48 	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
49 	return path;
50 }
51 
52 /*
53  * set all locked nodes in the path to blocking locks.  This should
54  * be done before scheduling
55  */
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 {
58 	int i;
59 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 		if (!p->nodes[i] || !p->locks[i])
61 			continue;
62 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 		if (p->locks[i] == BTRFS_READ_LOCK)
64 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
67 	}
68 }
69 
70 /*
71  * reset all the locked nodes in the patch to spinning locks.
72  *
73  * held is used to keep lockdep happy, when lockdep is enabled
74  * we set held to a blocking lock before we go around and
75  * retake all the spinlocks in the path.  You can safely use NULL
76  * for held
77  */
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 					struct extent_buffer *held, int held_rw)
80 {
81 	int i;
82 
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 	/* lockdep really cares that we take all of these spinlocks
85 	 * in the right order.  If any of the locks in the path are not
86 	 * currently blocking, it is going to complain.  So, make really
87 	 * really sure by forcing the path to blocking before we clear
88 	 * the path blocking.
89 	 */
90 	if (held) {
91 		btrfs_set_lock_blocking_rw(held, held_rw);
92 		if (held_rw == BTRFS_WRITE_LOCK)
93 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
94 		else if (held_rw == BTRFS_READ_LOCK)
95 			held_rw = BTRFS_READ_LOCK_BLOCKING;
96 	}
97 	btrfs_set_path_blocking(p);
98 #endif
99 
100 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 		if (p->nodes[i] && p->locks[i]) {
102 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
103 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
104 				p->locks[i] = BTRFS_WRITE_LOCK;
105 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
106 				p->locks[i] = BTRFS_READ_LOCK;
107 		}
108 	}
109 
110 #ifdef CONFIG_DEBUG_LOCK_ALLOC
111 	if (held)
112 		btrfs_clear_lock_blocking_rw(held, held_rw);
113 #endif
114 }
115 
116 /* this also releases the path */
117 void btrfs_free_path(struct btrfs_path *p)
118 {
119 	if (!p)
120 		return;
121 	btrfs_release_path(p);
122 	kmem_cache_free(btrfs_path_cachep, p);
123 }
124 
125 /*
126  * path release drops references on the extent buffers in the path
127  * and it drops any locks held by this path
128  *
129  * It is safe to call this on paths that no locks or extent buffers held.
130  */
131 noinline void btrfs_release_path(struct btrfs_path *p)
132 {
133 	int i;
134 
135 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
136 		p->slots[i] = 0;
137 		if (!p->nodes[i])
138 			continue;
139 		if (p->locks[i]) {
140 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
141 			p->locks[i] = 0;
142 		}
143 		free_extent_buffer(p->nodes[i]);
144 		p->nodes[i] = NULL;
145 	}
146 }
147 
148 /*
149  * safely gets a reference on the root node of a tree.  A lock
150  * is not taken, so a concurrent writer may put a different node
151  * at the root of the tree.  See btrfs_lock_root_node for the
152  * looping required.
153  *
154  * The extent buffer returned by this has a reference taken, so
155  * it won't disappear.  It may stop being the root of the tree
156  * at any time because there are no locks held.
157  */
158 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
159 {
160 	struct extent_buffer *eb;
161 
162 	while (1) {
163 		rcu_read_lock();
164 		eb = rcu_dereference(root->node);
165 
166 		/*
167 		 * RCU really hurts here, we could free up the root node because
168 		 * it was cow'ed but we may not get the new root node yet so do
169 		 * the inc_not_zero dance and if it doesn't work then
170 		 * synchronize_rcu and try again.
171 		 */
172 		if (atomic_inc_not_zero(&eb->refs)) {
173 			rcu_read_unlock();
174 			break;
175 		}
176 		rcu_read_unlock();
177 		synchronize_rcu();
178 	}
179 	return eb;
180 }
181 
182 /* loop around taking references on and locking the root node of the
183  * tree until you end up with a lock on the root.  A locked buffer
184  * is returned, with a reference held.
185  */
186 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
187 {
188 	struct extent_buffer *eb;
189 
190 	while (1) {
191 		eb = btrfs_root_node(root);
192 		btrfs_tree_lock(eb);
193 		if (eb == root->node)
194 			break;
195 		btrfs_tree_unlock(eb);
196 		free_extent_buffer(eb);
197 	}
198 	return eb;
199 }
200 
201 /* loop around taking references on and locking the root node of the
202  * tree until you end up with a lock on the root.  A locked buffer
203  * is returned, with a reference held.
204  */
205 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
206 {
207 	struct extent_buffer *eb;
208 
209 	while (1) {
210 		eb = btrfs_root_node(root);
211 		btrfs_tree_read_lock(eb);
212 		if (eb == root->node)
213 			break;
214 		btrfs_tree_read_unlock(eb);
215 		free_extent_buffer(eb);
216 	}
217 	return eb;
218 }
219 
220 /* cowonly root (everything not a reference counted cow subvolume), just get
221  * put onto a simple dirty list.  transaction.c walks this to make sure they
222  * get properly updated on disk.
223  */
224 static void add_root_to_dirty_list(struct btrfs_root *root)
225 {
226 	spin_lock(&root->fs_info->trans_lock);
227 	if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
228 	    list_empty(&root->dirty_list)) {
229 		list_add(&root->dirty_list,
230 			 &root->fs_info->dirty_cowonly_roots);
231 	}
232 	spin_unlock(&root->fs_info->trans_lock);
233 }
234 
235 /*
236  * used by snapshot creation to make a copy of a root for a tree with
237  * a given objectid.  The buffer with the new root node is returned in
238  * cow_ret, and this func returns zero on success or a negative error code.
239  */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 		      struct btrfs_root *root,
242 		      struct extent_buffer *buf,
243 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 	struct extent_buffer *cow;
246 	int ret = 0;
247 	int level;
248 	struct btrfs_disk_key disk_key;
249 
250 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 		trans->transid != root->fs_info->running_transaction->transid);
252 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
253 		trans->transid != root->last_trans);
254 
255 	level = btrfs_header_level(buf);
256 	if (level == 0)
257 		btrfs_item_key(buf, &disk_key, 0);
258 	else
259 		btrfs_node_key(buf, &disk_key, 0);
260 
261 	cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
262 				     new_root_objectid, &disk_key, level,
263 				     buf->start, 0);
264 	if (IS_ERR(cow))
265 		return PTR_ERR(cow);
266 
267 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
268 	btrfs_set_header_bytenr(cow, cow->start);
269 	btrfs_set_header_generation(cow, trans->transid);
270 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
271 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
272 				     BTRFS_HEADER_FLAG_RELOC);
273 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
274 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
275 	else
276 		btrfs_set_header_owner(cow, new_root_objectid);
277 
278 	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
279 			    BTRFS_FSID_SIZE);
280 
281 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
282 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
283 		ret = btrfs_inc_ref(trans, root, cow, 1);
284 	else
285 		ret = btrfs_inc_ref(trans, root, cow, 0);
286 
287 	if (ret)
288 		return ret;
289 
290 	btrfs_mark_buffer_dirty(cow);
291 	*cow_ret = cow;
292 	return 0;
293 }
294 
295 enum mod_log_op {
296 	MOD_LOG_KEY_REPLACE,
297 	MOD_LOG_KEY_ADD,
298 	MOD_LOG_KEY_REMOVE,
299 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
300 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
301 	MOD_LOG_MOVE_KEYS,
302 	MOD_LOG_ROOT_REPLACE,
303 };
304 
305 struct tree_mod_move {
306 	int dst_slot;
307 	int nr_items;
308 };
309 
310 struct tree_mod_root {
311 	u64 logical;
312 	u8 level;
313 };
314 
315 struct tree_mod_elem {
316 	struct rb_node node;
317 	u64 index;		/* shifted logical */
318 	u64 seq;
319 	enum mod_log_op op;
320 
321 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 	int slot;
323 
324 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 	u64 generation;
326 
327 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
328 	struct btrfs_disk_key key;
329 	u64 blockptr;
330 
331 	/* this is used for op == MOD_LOG_MOVE_KEYS */
332 	struct tree_mod_move move;
333 
334 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
335 	struct tree_mod_root old_root;
336 };
337 
338 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
339 {
340 	read_lock(&fs_info->tree_mod_log_lock);
341 }
342 
343 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344 {
345 	read_unlock(&fs_info->tree_mod_log_lock);
346 }
347 
348 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349 {
350 	write_lock(&fs_info->tree_mod_log_lock);
351 }
352 
353 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354 {
355 	write_unlock(&fs_info->tree_mod_log_lock);
356 }
357 
358 /*
359  * Pull a new tree mod seq number for our operation.
360  */
361 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
362 {
363 	return atomic64_inc_return(&fs_info->tree_mod_seq);
364 }
365 
366 /*
367  * This adds a new blocker to the tree mod log's blocker list if the @elem
368  * passed does not already have a sequence number set. So when a caller expects
369  * to record tree modifications, it should ensure to set elem->seq to zero
370  * before calling btrfs_get_tree_mod_seq.
371  * Returns a fresh, unused tree log modification sequence number, even if no new
372  * blocker was added.
373  */
374 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
375 			   struct seq_list *elem)
376 {
377 	tree_mod_log_write_lock(fs_info);
378 	spin_lock(&fs_info->tree_mod_seq_lock);
379 	if (!elem->seq) {
380 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
381 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
382 	}
383 	spin_unlock(&fs_info->tree_mod_seq_lock);
384 	tree_mod_log_write_unlock(fs_info);
385 
386 	return elem->seq;
387 }
388 
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
390 			    struct seq_list *elem)
391 {
392 	struct rb_root *tm_root;
393 	struct rb_node *node;
394 	struct rb_node *next;
395 	struct seq_list *cur_elem;
396 	struct tree_mod_elem *tm;
397 	u64 min_seq = (u64)-1;
398 	u64 seq_putting = elem->seq;
399 
400 	if (!seq_putting)
401 		return;
402 
403 	spin_lock(&fs_info->tree_mod_seq_lock);
404 	list_del(&elem->list);
405 	elem->seq = 0;
406 
407 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
408 		if (cur_elem->seq < min_seq) {
409 			if (seq_putting > cur_elem->seq) {
410 				/*
411 				 * blocker with lower sequence number exists, we
412 				 * cannot remove anything from the log
413 				 */
414 				spin_unlock(&fs_info->tree_mod_seq_lock);
415 				return;
416 			}
417 			min_seq = cur_elem->seq;
418 		}
419 	}
420 	spin_unlock(&fs_info->tree_mod_seq_lock);
421 
422 	/*
423 	 * anything that's lower than the lowest existing (read: blocked)
424 	 * sequence number can be removed from the tree.
425 	 */
426 	tree_mod_log_write_lock(fs_info);
427 	tm_root = &fs_info->tree_mod_log;
428 	for (node = rb_first(tm_root); node; node = next) {
429 		next = rb_next(node);
430 		tm = container_of(node, struct tree_mod_elem, node);
431 		if (tm->seq > min_seq)
432 			continue;
433 		rb_erase(node, tm_root);
434 		kfree(tm);
435 	}
436 	tree_mod_log_write_unlock(fs_info);
437 }
438 
439 /*
440  * key order of the log:
441  *       index -> sequence
442  *
443  * the index is the shifted logical of the *new* root node for root replace
444  * operations, or the shifted logical of the affected block for all other
445  * operations.
446  *
447  * Note: must be called with write lock (tree_mod_log_write_lock).
448  */
449 static noinline int
450 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
451 {
452 	struct rb_root *tm_root;
453 	struct rb_node **new;
454 	struct rb_node *parent = NULL;
455 	struct tree_mod_elem *cur;
456 
457 	BUG_ON(!tm);
458 
459 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
460 
461 	tm_root = &fs_info->tree_mod_log;
462 	new = &tm_root->rb_node;
463 	while (*new) {
464 		cur = container_of(*new, struct tree_mod_elem, node);
465 		parent = *new;
466 		if (cur->index < tm->index)
467 			new = &((*new)->rb_left);
468 		else if (cur->index > tm->index)
469 			new = &((*new)->rb_right);
470 		else if (cur->seq < tm->seq)
471 			new = &((*new)->rb_left);
472 		else if (cur->seq > tm->seq)
473 			new = &((*new)->rb_right);
474 		else
475 			return -EEXIST;
476 	}
477 
478 	rb_link_node(&tm->node, parent, new);
479 	rb_insert_color(&tm->node, tm_root);
480 	return 0;
481 }
482 
483 /*
484  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
485  * returns zero with the tree_mod_log_lock acquired. The caller must hold
486  * this until all tree mod log insertions are recorded in the rb tree and then
487  * call tree_mod_log_write_unlock() to release.
488  */
489 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
490 				    struct extent_buffer *eb) {
491 	smp_mb();
492 	if (list_empty(&(fs_info)->tree_mod_seq_list))
493 		return 1;
494 	if (eb && btrfs_header_level(eb) == 0)
495 		return 1;
496 
497 	tree_mod_log_write_lock(fs_info);
498 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
499 		tree_mod_log_write_unlock(fs_info);
500 		return 1;
501 	}
502 
503 	return 0;
504 }
505 
506 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
507 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
508 				    struct extent_buffer *eb)
509 {
510 	smp_mb();
511 	if (list_empty(&(fs_info)->tree_mod_seq_list))
512 		return 0;
513 	if (eb && btrfs_header_level(eb) == 0)
514 		return 0;
515 
516 	return 1;
517 }
518 
519 static struct tree_mod_elem *
520 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
521 		    enum mod_log_op op, gfp_t flags)
522 {
523 	struct tree_mod_elem *tm;
524 
525 	tm = kzalloc(sizeof(*tm), flags);
526 	if (!tm)
527 		return NULL;
528 
529 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
530 	if (op != MOD_LOG_KEY_ADD) {
531 		btrfs_node_key(eb, &tm->key, slot);
532 		tm->blockptr = btrfs_node_blockptr(eb, slot);
533 	}
534 	tm->op = op;
535 	tm->slot = slot;
536 	tm->generation = btrfs_node_ptr_generation(eb, slot);
537 	RB_CLEAR_NODE(&tm->node);
538 
539 	return tm;
540 }
541 
542 static noinline int
543 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
544 			struct extent_buffer *eb, int slot,
545 			enum mod_log_op op, gfp_t flags)
546 {
547 	struct tree_mod_elem *tm;
548 	int ret;
549 
550 	if (!tree_mod_need_log(fs_info, eb))
551 		return 0;
552 
553 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
554 	if (!tm)
555 		return -ENOMEM;
556 
557 	if (tree_mod_dont_log(fs_info, eb)) {
558 		kfree(tm);
559 		return 0;
560 	}
561 
562 	ret = __tree_mod_log_insert(fs_info, tm);
563 	tree_mod_log_write_unlock(fs_info);
564 	if (ret)
565 		kfree(tm);
566 
567 	return ret;
568 }
569 
570 static noinline int
571 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
572 			 struct extent_buffer *eb, int dst_slot, int src_slot,
573 			 int nr_items, gfp_t flags)
574 {
575 	struct tree_mod_elem *tm = NULL;
576 	struct tree_mod_elem **tm_list = NULL;
577 	int ret = 0;
578 	int i;
579 	int locked = 0;
580 
581 	if (!tree_mod_need_log(fs_info, eb))
582 		return 0;
583 
584 	tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
585 	if (!tm_list)
586 		return -ENOMEM;
587 
588 	tm = kzalloc(sizeof(*tm), flags);
589 	if (!tm) {
590 		ret = -ENOMEM;
591 		goto free_tms;
592 	}
593 
594 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
595 	tm->slot = src_slot;
596 	tm->move.dst_slot = dst_slot;
597 	tm->move.nr_items = nr_items;
598 	tm->op = MOD_LOG_MOVE_KEYS;
599 
600 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
601 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
602 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
603 		if (!tm_list[i]) {
604 			ret = -ENOMEM;
605 			goto free_tms;
606 		}
607 	}
608 
609 	if (tree_mod_dont_log(fs_info, eb))
610 		goto free_tms;
611 	locked = 1;
612 
613 	/*
614 	 * When we override something during the move, we log these removals.
615 	 * This can only happen when we move towards the beginning of the
616 	 * buffer, i.e. dst_slot < src_slot.
617 	 */
618 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
619 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
620 		if (ret)
621 			goto free_tms;
622 	}
623 
624 	ret = __tree_mod_log_insert(fs_info, tm);
625 	if (ret)
626 		goto free_tms;
627 	tree_mod_log_write_unlock(fs_info);
628 	kfree(tm_list);
629 
630 	return 0;
631 free_tms:
632 	for (i = 0; i < nr_items; i++) {
633 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
634 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
635 		kfree(tm_list[i]);
636 	}
637 	if (locked)
638 		tree_mod_log_write_unlock(fs_info);
639 	kfree(tm_list);
640 	kfree(tm);
641 
642 	return ret;
643 }
644 
645 static inline int
646 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
647 		       struct tree_mod_elem **tm_list,
648 		       int nritems)
649 {
650 	int i, j;
651 	int ret;
652 
653 	for (i = nritems - 1; i >= 0; i--) {
654 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
655 		if (ret) {
656 			for (j = nritems - 1; j > i; j--)
657 				rb_erase(&tm_list[j]->node,
658 					 &fs_info->tree_mod_log);
659 			return ret;
660 		}
661 	}
662 
663 	return 0;
664 }
665 
666 static noinline int
667 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
668 			 struct extent_buffer *old_root,
669 			 struct extent_buffer *new_root, gfp_t flags,
670 			 int log_removal)
671 {
672 	struct tree_mod_elem *tm = NULL;
673 	struct tree_mod_elem **tm_list = NULL;
674 	int nritems = 0;
675 	int ret = 0;
676 	int i;
677 
678 	if (!tree_mod_need_log(fs_info, NULL))
679 		return 0;
680 
681 	if (log_removal && btrfs_header_level(old_root) > 0) {
682 		nritems = btrfs_header_nritems(old_root);
683 		tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
684 				  flags);
685 		if (!tm_list) {
686 			ret = -ENOMEM;
687 			goto free_tms;
688 		}
689 		for (i = 0; i < nritems; i++) {
690 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
691 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
692 			if (!tm_list[i]) {
693 				ret = -ENOMEM;
694 				goto free_tms;
695 			}
696 		}
697 	}
698 
699 	tm = kzalloc(sizeof(*tm), flags);
700 	if (!tm) {
701 		ret = -ENOMEM;
702 		goto free_tms;
703 	}
704 
705 	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
706 	tm->old_root.logical = old_root->start;
707 	tm->old_root.level = btrfs_header_level(old_root);
708 	tm->generation = btrfs_header_generation(old_root);
709 	tm->op = MOD_LOG_ROOT_REPLACE;
710 
711 	if (tree_mod_dont_log(fs_info, NULL))
712 		goto free_tms;
713 
714 	if (tm_list)
715 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
716 	if (!ret)
717 		ret = __tree_mod_log_insert(fs_info, tm);
718 
719 	tree_mod_log_write_unlock(fs_info);
720 	if (ret)
721 		goto free_tms;
722 	kfree(tm_list);
723 
724 	return ret;
725 
726 free_tms:
727 	if (tm_list) {
728 		for (i = 0; i < nritems; i++)
729 			kfree(tm_list[i]);
730 		kfree(tm_list);
731 	}
732 	kfree(tm);
733 
734 	return ret;
735 }
736 
737 static struct tree_mod_elem *
738 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
739 		      int smallest)
740 {
741 	struct rb_root *tm_root;
742 	struct rb_node *node;
743 	struct tree_mod_elem *cur = NULL;
744 	struct tree_mod_elem *found = NULL;
745 	u64 index = start >> PAGE_CACHE_SHIFT;
746 
747 	tree_mod_log_read_lock(fs_info);
748 	tm_root = &fs_info->tree_mod_log;
749 	node = tm_root->rb_node;
750 	while (node) {
751 		cur = container_of(node, struct tree_mod_elem, node);
752 		if (cur->index < index) {
753 			node = node->rb_left;
754 		} else if (cur->index > index) {
755 			node = node->rb_right;
756 		} else if (cur->seq < min_seq) {
757 			node = node->rb_left;
758 		} else if (!smallest) {
759 			/* we want the node with the highest seq */
760 			if (found)
761 				BUG_ON(found->seq > cur->seq);
762 			found = cur;
763 			node = node->rb_left;
764 		} else if (cur->seq > min_seq) {
765 			/* we want the node with the smallest seq */
766 			if (found)
767 				BUG_ON(found->seq < cur->seq);
768 			found = cur;
769 			node = node->rb_right;
770 		} else {
771 			found = cur;
772 			break;
773 		}
774 	}
775 	tree_mod_log_read_unlock(fs_info);
776 
777 	return found;
778 }
779 
780 /*
781  * this returns the element from the log with the smallest time sequence
782  * value that's in the log (the oldest log item). any element with a time
783  * sequence lower than min_seq will be ignored.
784  */
785 static struct tree_mod_elem *
786 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
787 			   u64 min_seq)
788 {
789 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
790 }
791 
792 /*
793  * this returns the element from the log with the largest time sequence
794  * value that's in the log (the most recent log item). any element with
795  * a time sequence lower than min_seq will be ignored.
796  */
797 static struct tree_mod_elem *
798 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
799 {
800 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
801 }
802 
803 static noinline int
804 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
805 		     struct extent_buffer *src, unsigned long dst_offset,
806 		     unsigned long src_offset, int nr_items)
807 {
808 	int ret = 0;
809 	struct tree_mod_elem **tm_list = NULL;
810 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
811 	int i;
812 	int locked = 0;
813 
814 	if (!tree_mod_need_log(fs_info, NULL))
815 		return 0;
816 
817 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
818 		return 0;
819 
820 	tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
821 			  GFP_NOFS);
822 	if (!tm_list)
823 		return -ENOMEM;
824 
825 	tm_list_add = tm_list;
826 	tm_list_rem = tm_list + nr_items;
827 	for (i = 0; i < nr_items; i++) {
828 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
829 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
830 		if (!tm_list_rem[i]) {
831 			ret = -ENOMEM;
832 			goto free_tms;
833 		}
834 
835 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
836 		    MOD_LOG_KEY_ADD, GFP_NOFS);
837 		if (!tm_list_add[i]) {
838 			ret = -ENOMEM;
839 			goto free_tms;
840 		}
841 	}
842 
843 	if (tree_mod_dont_log(fs_info, NULL))
844 		goto free_tms;
845 	locked = 1;
846 
847 	for (i = 0; i < nr_items; i++) {
848 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
849 		if (ret)
850 			goto free_tms;
851 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
852 		if (ret)
853 			goto free_tms;
854 	}
855 
856 	tree_mod_log_write_unlock(fs_info);
857 	kfree(tm_list);
858 
859 	return 0;
860 
861 free_tms:
862 	for (i = 0; i < nr_items * 2; i++) {
863 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
864 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
865 		kfree(tm_list[i]);
866 	}
867 	if (locked)
868 		tree_mod_log_write_unlock(fs_info);
869 	kfree(tm_list);
870 
871 	return ret;
872 }
873 
874 static inline void
875 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
876 		     int dst_offset, int src_offset, int nr_items)
877 {
878 	int ret;
879 	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
880 				       nr_items, GFP_NOFS);
881 	BUG_ON(ret < 0);
882 }
883 
884 static noinline void
885 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
886 			  struct extent_buffer *eb, int slot, int atomic)
887 {
888 	int ret;
889 
890 	ret = tree_mod_log_insert_key(fs_info, eb, slot,
891 					MOD_LOG_KEY_REPLACE,
892 					atomic ? GFP_ATOMIC : GFP_NOFS);
893 	BUG_ON(ret < 0);
894 }
895 
896 static noinline int
897 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
898 {
899 	struct tree_mod_elem **tm_list = NULL;
900 	int nritems = 0;
901 	int i;
902 	int ret = 0;
903 
904 	if (btrfs_header_level(eb) == 0)
905 		return 0;
906 
907 	if (!tree_mod_need_log(fs_info, NULL))
908 		return 0;
909 
910 	nritems = btrfs_header_nritems(eb);
911 	tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
912 			  GFP_NOFS);
913 	if (!tm_list)
914 		return -ENOMEM;
915 
916 	for (i = 0; i < nritems; i++) {
917 		tm_list[i] = alloc_tree_mod_elem(eb, i,
918 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
919 		if (!tm_list[i]) {
920 			ret = -ENOMEM;
921 			goto free_tms;
922 		}
923 	}
924 
925 	if (tree_mod_dont_log(fs_info, eb))
926 		goto free_tms;
927 
928 	ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
929 	tree_mod_log_write_unlock(fs_info);
930 	if (ret)
931 		goto free_tms;
932 	kfree(tm_list);
933 
934 	return 0;
935 
936 free_tms:
937 	for (i = 0; i < nritems; i++)
938 		kfree(tm_list[i]);
939 	kfree(tm_list);
940 
941 	return ret;
942 }
943 
944 static noinline void
945 tree_mod_log_set_root_pointer(struct btrfs_root *root,
946 			      struct extent_buffer *new_root_node,
947 			      int log_removal)
948 {
949 	int ret;
950 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
951 				       new_root_node, GFP_NOFS, log_removal);
952 	BUG_ON(ret < 0);
953 }
954 
955 /*
956  * check if the tree block can be shared by multiple trees
957  */
958 int btrfs_block_can_be_shared(struct btrfs_root *root,
959 			      struct extent_buffer *buf)
960 {
961 	/*
962 	 * Tree blocks not in refernece counted trees and tree roots
963 	 * are never shared. If a block was allocated after the last
964 	 * snapshot and the block was not allocated by tree relocation,
965 	 * we know the block is not shared.
966 	 */
967 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
968 	    buf != root->node && buf != root->commit_root &&
969 	    (btrfs_header_generation(buf) <=
970 	     btrfs_root_last_snapshot(&root->root_item) ||
971 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
972 		return 1;
973 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
974 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
975 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
976 		return 1;
977 #endif
978 	return 0;
979 }
980 
981 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
982 				       struct btrfs_root *root,
983 				       struct extent_buffer *buf,
984 				       struct extent_buffer *cow,
985 				       int *last_ref)
986 {
987 	u64 refs;
988 	u64 owner;
989 	u64 flags;
990 	u64 new_flags = 0;
991 	int ret;
992 
993 	/*
994 	 * Backrefs update rules:
995 	 *
996 	 * Always use full backrefs for extent pointers in tree block
997 	 * allocated by tree relocation.
998 	 *
999 	 * If a shared tree block is no longer referenced by its owner
1000 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
1001 	 * use full backrefs for extent pointers in tree block.
1002 	 *
1003 	 * If a tree block is been relocating
1004 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1005 	 * use full backrefs for extent pointers in tree block.
1006 	 * The reason for this is some operations (such as drop tree)
1007 	 * are only allowed for blocks use full backrefs.
1008 	 */
1009 
1010 	if (btrfs_block_can_be_shared(root, buf)) {
1011 		ret = btrfs_lookup_extent_info(trans, root, buf->start,
1012 					       btrfs_header_level(buf), 1,
1013 					       &refs, &flags);
1014 		if (ret)
1015 			return ret;
1016 		if (refs == 0) {
1017 			ret = -EROFS;
1018 			btrfs_std_error(root->fs_info, ret);
1019 			return ret;
1020 		}
1021 	} else {
1022 		refs = 1;
1023 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1024 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1025 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1026 		else
1027 			flags = 0;
1028 	}
1029 
1030 	owner = btrfs_header_owner(buf);
1031 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1032 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1033 
1034 	if (refs > 1) {
1035 		if ((owner == root->root_key.objectid ||
1036 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1037 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1038 			ret = btrfs_inc_ref(trans, root, buf, 1);
1039 			BUG_ON(ret); /* -ENOMEM */
1040 
1041 			if (root->root_key.objectid ==
1042 			    BTRFS_TREE_RELOC_OBJECTID) {
1043 				ret = btrfs_dec_ref(trans, root, buf, 0);
1044 				BUG_ON(ret); /* -ENOMEM */
1045 				ret = btrfs_inc_ref(trans, root, cow, 1);
1046 				BUG_ON(ret); /* -ENOMEM */
1047 			}
1048 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1049 		} else {
1050 
1051 			if (root->root_key.objectid ==
1052 			    BTRFS_TREE_RELOC_OBJECTID)
1053 				ret = btrfs_inc_ref(trans, root, cow, 1);
1054 			else
1055 				ret = btrfs_inc_ref(trans, root, cow, 0);
1056 			BUG_ON(ret); /* -ENOMEM */
1057 		}
1058 		if (new_flags != 0) {
1059 			int level = btrfs_header_level(buf);
1060 
1061 			ret = btrfs_set_disk_extent_flags(trans, root,
1062 							  buf->start,
1063 							  buf->len,
1064 							  new_flags, level, 0);
1065 			if (ret)
1066 				return ret;
1067 		}
1068 	} else {
1069 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1070 			if (root->root_key.objectid ==
1071 			    BTRFS_TREE_RELOC_OBJECTID)
1072 				ret = btrfs_inc_ref(trans, root, cow, 1);
1073 			else
1074 				ret = btrfs_inc_ref(trans, root, cow, 0);
1075 			BUG_ON(ret); /* -ENOMEM */
1076 			ret = btrfs_dec_ref(trans, root, buf, 1);
1077 			BUG_ON(ret); /* -ENOMEM */
1078 		}
1079 		clean_tree_block(trans, root, buf);
1080 		*last_ref = 1;
1081 	}
1082 	return 0;
1083 }
1084 
1085 /*
1086  * does the dirty work in cow of a single block.  The parent block (if
1087  * supplied) is updated to point to the new cow copy.  The new buffer is marked
1088  * dirty and returned locked.  If you modify the block it needs to be marked
1089  * dirty again.
1090  *
1091  * search_start -- an allocation hint for the new block
1092  *
1093  * empty_size -- a hint that you plan on doing more cow.  This is the size in
1094  * bytes the allocator should try to find free next to the block it returns.
1095  * This is just a hint and may be ignored by the allocator.
1096  */
1097 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1098 			     struct btrfs_root *root,
1099 			     struct extent_buffer *buf,
1100 			     struct extent_buffer *parent, int parent_slot,
1101 			     struct extent_buffer **cow_ret,
1102 			     u64 search_start, u64 empty_size)
1103 {
1104 	struct btrfs_disk_key disk_key;
1105 	struct extent_buffer *cow;
1106 	int level, ret;
1107 	int last_ref = 0;
1108 	int unlock_orig = 0;
1109 	u64 parent_start;
1110 
1111 	if (*cow_ret == buf)
1112 		unlock_orig = 1;
1113 
1114 	btrfs_assert_tree_locked(buf);
1115 
1116 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1117 		trans->transid != root->fs_info->running_transaction->transid);
1118 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1119 		trans->transid != root->last_trans);
1120 
1121 	level = btrfs_header_level(buf);
1122 
1123 	if (level == 0)
1124 		btrfs_item_key(buf, &disk_key, 0);
1125 	else
1126 		btrfs_node_key(buf, &disk_key, 0);
1127 
1128 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1129 		if (parent)
1130 			parent_start = parent->start;
1131 		else
1132 			parent_start = 0;
1133 	} else
1134 		parent_start = 0;
1135 
1136 	cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
1137 				     root->root_key.objectid, &disk_key,
1138 				     level, search_start, empty_size);
1139 	if (IS_ERR(cow))
1140 		return PTR_ERR(cow);
1141 
1142 	/* cow is set to blocking by btrfs_init_new_buffer */
1143 
1144 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
1145 	btrfs_set_header_bytenr(cow, cow->start);
1146 	btrfs_set_header_generation(cow, trans->transid);
1147 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1148 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1149 				     BTRFS_HEADER_FLAG_RELOC);
1150 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1151 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1152 	else
1153 		btrfs_set_header_owner(cow, root->root_key.objectid);
1154 
1155 	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1156 			    BTRFS_FSID_SIZE);
1157 
1158 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1159 	if (ret) {
1160 		btrfs_abort_transaction(trans, root, ret);
1161 		return ret;
1162 	}
1163 
1164 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1165 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1166 		if (ret)
1167 			return ret;
1168 	}
1169 
1170 	if (buf == root->node) {
1171 		WARN_ON(parent && parent != buf);
1172 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1173 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1174 			parent_start = buf->start;
1175 		else
1176 			parent_start = 0;
1177 
1178 		extent_buffer_get(cow);
1179 		tree_mod_log_set_root_pointer(root, cow, 1);
1180 		rcu_assign_pointer(root->node, cow);
1181 
1182 		btrfs_free_tree_block(trans, root, buf, parent_start,
1183 				      last_ref);
1184 		free_extent_buffer(buf);
1185 		add_root_to_dirty_list(root);
1186 	} else {
1187 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1188 			parent_start = parent->start;
1189 		else
1190 			parent_start = 0;
1191 
1192 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1193 		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1194 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1195 		btrfs_set_node_blockptr(parent, parent_slot,
1196 					cow->start);
1197 		btrfs_set_node_ptr_generation(parent, parent_slot,
1198 					      trans->transid);
1199 		btrfs_mark_buffer_dirty(parent);
1200 		if (last_ref) {
1201 			ret = tree_mod_log_free_eb(root->fs_info, buf);
1202 			if (ret) {
1203 				btrfs_abort_transaction(trans, root, ret);
1204 				return ret;
1205 			}
1206 		}
1207 		btrfs_free_tree_block(trans, root, buf, parent_start,
1208 				      last_ref);
1209 	}
1210 	if (unlock_orig)
1211 		btrfs_tree_unlock(buf);
1212 	free_extent_buffer_stale(buf);
1213 	btrfs_mark_buffer_dirty(cow);
1214 	*cow_ret = cow;
1215 	return 0;
1216 }
1217 
1218 /*
1219  * returns the logical address of the oldest predecessor of the given root.
1220  * entries older than time_seq are ignored.
1221  */
1222 static struct tree_mod_elem *
1223 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1224 			   struct extent_buffer *eb_root, u64 time_seq)
1225 {
1226 	struct tree_mod_elem *tm;
1227 	struct tree_mod_elem *found = NULL;
1228 	u64 root_logical = eb_root->start;
1229 	int looped = 0;
1230 
1231 	if (!time_seq)
1232 		return NULL;
1233 
1234 	/*
1235 	 * the very last operation that's logged for a root is the replacement
1236 	 * operation (if it is replaced at all). this has the index of the *new*
1237 	 * root, making it the very first operation that's logged for this root.
1238 	 */
1239 	while (1) {
1240 		tm = tree_mod_log_search_oldest(fs_info, root_logical,
1241 						time_seq);
1242 		if (!looped && !tm)
1243 			return NULL;
1244 		/*
1245 		 * if there are no tree operation for the oldest root, we simply
1246 		 * return it. this should only happen if that (old) root is at
1247 		 * level 0.
1248 		 */
1249 		if (!tm)
1250 			break;
1251 
1252 		/*
1253 		 * if there's an operation that's not a root replacement, we
1254 		 * found the oldest version of our root. normally, we'll find a
1255 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1256 		 */
1257 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1258 			break;
1259 
1260 		found = tm;
1261 		root_logical = tm->old_root.logical;
1262 		looped = 1;
1263 	}
1264 
1265 	/* if there's no old root to return, return what we found instead */
1266 	if (!found)
1267 		found = tm;
1268 
1269 	return found;
1270 }
1271 
1272 /*
1273  * tm is a pointer to the first operation to rewind within eb. then, all
1274  * previous operations will be rewinded (until we reach something older than
1275  * time_seq).
1276  */
1277 static void
1278 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1279 		      u64 time_seq, struct tree_mod_elem *first_tm)
1280 {
1281 	u32 n;
1282 	struct rb_node *next;
1283 	struct tree_mod_elem *tm = first_tm;
1284 	unsigned long o_dst;
1285 	unsigned long o_src;
1286 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1287 
1288 	n = btrfs_header_nritems(eb);
1289 	tree_mod_log_read_lock(fs_info);
1290 	while (tm && tm->seq >= time_seq) {
1291 		/*
1292 		 * all the operations are recorded with the operator used for
1293 		 * the modification. as we're going backwards, we do the
1294 		 * opposite of each operation here.
1295 		 */
1296 		switch (tm->op) {
1297 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1298 			BUG_ON(tm->slot < n);
1299 			/* Fallthrough */
1300 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1301 		case MOD_LOG_KEY_REMOVE:
1302 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1303 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1304 			btrfs_set_node_ptr_generation(eb, tm->slot,
1305 						      tm->generation);
1306 			n++;
1307 			break;
1308 		case MOD_LOG_KEY_REPLACE:
1309 			BUG_ON(tm->slot >= n);
1310 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1311 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1312 			btrfs_set_node_ptr_generation(eb, tm->slot,
1313 						      tm->generation);
1314 			break;
1315 		case MOD_LOG_KEY_ADD:
1316 			/* if a move operation is needed it's in the log */
1317 			n--;
1318 			break;
1319 		case MOD_LOG_MOVE_KEYS:
1320 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1321 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1322 			memmove_extent_buffer(eb, o_dst, o_src,
1323 					      tm->move.nr_items * p_size);
1324 			break;
1325 		case MOD_LOG_ROOT_REPLACE:
1326 			/*
1327 			 * this operation is special. for roots, this must be
1328 			 * handled explicitly before rewinding.
1329 			 * for non-roots, this operation may exist if the node
1330 			 * was a root: root A -> child B; then A gets empty and
1331 			 * B is promoted to the new root. in the mod log, we'll
1332 			 * have a root-replace operation for B, a tree block
1333 			 * that is no root. we simply ignore that operation.
1334 			 */
1335 			break;
1336 		}
1337 		next = rb_next(&tm->node);
1338 		if (!next)
1339 			break;
1340 		tm = container_of(next, struct tree_mod_elem, node);
1341 		if (tm->index != first_tm->index)
1342 			break;
1343 	}
1344 	tree_mod_log_read_unlock(fs_info);
1345 	btrfs_set_header_nritems(eb, n);
1346 }
1347 
1348 /*
1349  * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1350  * is returned. If rewind operations happen, a fresh buffer is returned. The
1351  * returned buffer is always read-locked. If the returned buffer is not the
1352  * input buffer, the lock on the input buffer is released and the input buffer
1353  * is freed (its refcount is decremented).
1354  */
1355 static struct extent_buffer *
1356 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1357 		    struct extent_buffer *eb, u64 time_seq)
1358 {
1359 	struct extent_buffer *eb_rewin;
1360 	struct tree_mod_elem *tm;
1361 
1362 	if (!time_seq)
1363 		return eb;
1364 
1365 	if (btrfs_header_level(eb) == 0)
1366 		return eb;
1367 
1368 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1369 	if (!tm)
1370 		return eb;
1371 
1372 	btrfs_set_path_blocking(path);
1373 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1374 
1375 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1376 		BUG_ON(tm->slot != 0);
1377 		eb_rewin = alloc_dummy_extent_buffer(eb->start,
1378 						fs_info->tree_root->nodesize);
1379 		if (!eb_rewin) {
1380 			btrfs_tree_read_unlock_blocking(eb);
1381 			free_extent_buffer(eb);
1382 			return NULL;
1383 		}
1384 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1385 		btrfs_set_header_backref_rev(eb_rewin,
1386 					     btrfs_header_backref_rev(eb));
1387 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1388 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1389 	} else {
1390 		eb_rewin = btrfs_clone_extent_buffer(eb);
1391 		if (!eb_rewin) {
1392 			btrfs_tree_read_unlock_blocking(eb);
1393 			free_extent_buffer(eb);
1394 			return NULL;
1395 		}
1396 	}
1397 
1398 	btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1399 	btrfs_tree_read_unlock_blocking(eb);
1400 	free_extent_buffer(eb);
1401 
1402 	extent_buffer_get(eb_rewin);
1403 	btrfs_tree_read_lock(eb_rewin);
1404 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1405 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1406 		BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1407 
1408 	return eb_rewin;
1409 }
1410 
1411 /*
1412  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1413  * value. If there are no changes, the current root->root_node is returned. If
1414  * anything changed in between, there's a fresh buffer allocated on which the
1415  * rewind operations are done. In any case, the returned buffer is read locked.
1416  * Returns NULL on error (with no locks held).
1417  */
1418 static inline struct extent_buffer *
1419 get_old_root(struct btrfs_root *root, u64 time_seq)
1420 {
1421 	struct tree_mod_elem *tm;
1422 	struct extent_buffer *eb = NULL;
1423 	struct extent_buffer *eb_root;
1424 	struct extent_buffer *old;
1425 	struct tree_mod_root *old_root = NULL;
1426 	u64 old_generation = 0;
1427 	u64 logical;
1428 	u32 blocksize;
1429 
1430 	eb_root = btrfs_read_lock_root_node(root);
1431 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1432 	if (!tm)
1433 		return eb_root;
1434 
1435 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1436 		old_root = &tm->old_root;
1437 		old_generation = tm->generation;
1438 		logical = old_root->logical;
1439 	} else {
1440 		logical = eb_root->start;
1441 	}
1442 
1443 	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1444 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1445 		btrfs_tree_read_unlock(eb_root);
1446 		free_extent_buffer(eb_root);
1447 		blocksize = btrfs_level_size(root, old_root->level);
1448 		old = read_tree_block(root, logical, blocksize, 0);
1449 		if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1450 			free_extent_buffer(old);
1451 			btrfs_warn(root->fs_info,
1452 				"failed to read tree block %llu from get_old_root", logical);
1453 		} else {
1454 			eb = btrfs_clone_extent_buffer(old);
1455 			free_extent_buffer(old);
1456 		}
1457 	} else if (old_root) {
1458 		btrfs_tree_read_unlock(eb_root);
1459 		free_extent_buffer(eb_root);
1460 		eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1461 	} else {
1462 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1463 		eb = btrfs_clone_extent_buffer(eb_root);
1464 		btrfs_tree_read_unlock_blocking(eb_root);
1465 		free_extent_buffer(eb_root);
1466 	}
1467 
1468 	if (!eb)
1469 		return NULL;
1470 	extent_buffer_get(eb);
1471 	btrfs_tree_read_lock(eb);
1472 	if (old_root) {
1473 		btrfs_set_header_bytenr(eb, eb->start);
1474 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1475 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1476 		btrfs_set_header_level(eb, old_root->level);
1477 		btrfs_set_header_generation(eb, old_generation);
1478 	}
1479 	if (tm)
1480 		__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1481 	else
1482 		WARN_ON(btrfs_header_level(eb) != 0);
1483 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1484 
1485 	return eb;
1486 }
1487 
1488 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1489 {
1490 	struct tree_mod_elem *tm;
1491 	int level;
1492 	struct extent_buffer *eb_root = btrfs_root_node(root);
1493 
1494 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1495 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1496 		level = tm->old_root.level;
1497 	} else {
1498 		level = btrfs_header_level(eb_root);
1499 	}
1500 	free_extent_buffer(eb_root);
1501 
1502 	return level;
1503 }
1504 
1505 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1506 				   struct btrfs_root *root,
1507 				   struct extent_buffer *buf)
1508 {
1509 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1510 	if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
1511 		return 0;
1512 #endif
1513 	/* ensure we can see the force_cow */
1514 	smp_rmb();
1515 
1516 	/*
1517 	 * We do not need to cow a block if
1518 	 * 1) this block is not created or changed in this transaction;
1519 	 * 2) this block does not belong to TREE_RELOC tree;
1520 	 * 3) the root is not forced COW.
1521 	 *
1522 	 * What is forced COW:
1523 	 *    when we create snapshot during commiting the transaction,
1524 	 *    after we've finished coping src root, we must COW the shared
1525 	 *    block to ensure the metadata consistency.
1526 	 */
1527 	if (btrfs_header_generation(buf) == trans->transid &&
1528 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1529 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1530 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1531 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1532 		return 0;
1533 	return 1;
1534 }
1535 
1536 /*
1537  * cows a single block, see __btrfs_cow_block for the real work.
1538  * This version of it has extra checks so that a block isn't cow'd more than
1539  * once per transaction, as long as it hasn't been written yet
1540  */
1541 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1542 		    struct btrfs_root *root, struct extent_buffer *buf,
1543 		    struct extent_buffer *parent, int parent_slot,
1544 		    struct extent_buffer **cow_ret)
1545 {
1546 	u64 search_start;
1547 	int ret;
1548 
1549 	if (trans->transaction != root->fs_info->running_transaction)
1550 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1551 		       trans->transid,
1552 		       root->fs_info->running_transaction->transid);
1553 
1554 	if (trans->transid != root->fs_info->generation)
1555 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1556 		       trans->transid, root->fs_info->generation);
1557 
1558 	if (!should_cow_block(trans, root, buf)) {
1559 		*cow_ret = buf;
1560 		return 0;
1561 	}
1562 
1563 	search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1564 
1565 	if (parent)
1566 		btrfs_set_lock_blocking(parent);
1567 	btrfs_set_lock_blocking(buf);
1568 
1569 	ret = __btrfs_cow_block(trans, root, buf, parent,
1570 				 parent_slot, cow_ret, search_start, 0);
1571 
1572 	trace_btrfs_cow_block(root, buf, *cow_ret);
1573 
1574 	return ret;
1575 }
1576 
1577 /*
1578  * helper function for defrag to decide if two blocks pointed to by a
1579  * node are actually close by
1580  */
1581 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1582 {
1583 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1584 		return 1;
1585 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1586 		return 1;
1587 	return 0;
1588 }
1589 
1590 /*
1591  * compare two keys in a memcmp fashion
1592  */
1593 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1594 {
1595 	struct btrfs_key k1;
1596 
1597 	btrfs_disk_key_to_cpu(&k1, disk);
1598 
1599 	return btrfs_comp_cpu_keys(&k1, k2);
1600 }
1601 
1602 /*
1603  * same as comp_keys only with two btrfs_key's
1604  */
1605 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1606 {
1607 	if (k1->objectid > k2->objectid)
1608 		return 1;
1609 	if (k1->objectid < k2->objectid)
1610 		return -1;
1611 	if (k1->type > k2->type)
1612 		return 1;
1613 	if (k1->type < k2->type)
1614 		return -1;
1615 	if (k1->offset > k2->offset)
1616 		return 1;
1617 	if (k1->offset < k2->offset)
1618 		return -1;
1619 	return 0;
1620 }
1621 
1622 /*
1623  * this is used by the defrag code to go through all the
1624  * leaves pointed to by a node and reallocate them so that
1625  * disk order is close to key order
1626  */
1627 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1628 		       struct btrfs_root *root, struct extent_buffer *parent,
1629 		       int start_slot, u64 *last_ret,
1630 		       struct btrfs_key *progress)
1631 {
1632 	struct extent_buffer *cur;
1633 	u64 blocknr;
1634 	u64 gen;
1635 	u64 search_start = *last_ret;
1636 	u64 last_block = 0;
1637 	u64 other;
1638 	u32 parent_nritems;
1639 	int end_slot;
1640 	int i;
1641 	int err = 0;
1642 	int parent_level;
1643 	int uptodate;
1644 	u32 blocksize;
1645 	int progress_passed = 0;
1646 	struct btrfs_disk_key disk_key;
1647 
1648 	parent_level = btrfs_header_level(parent);
1649 
1650 	WARN_ON(trans->transaction != root->fs_info->running_transaction);
1651 	WARN_ON(trans->transid != root->fs_info->generation);
1652 
1653 	parent_nritems = btrfs_header_nritems(parent);
1654 	blocksize = btrfs_level_size(root, parent_level - 1);
1655 	end_slot = parent_nritems;
1656 
1657 	if (parent_nritems == 1)
1658 		return 0;
1659 
1660 	btrfs_set_lock_blocking(parent);
1661 
1662 	for (i = start_slot; i < end_slot; i++) {
1663 		int close = 1;
1664 
1665 		btrfs_node_key(parent, &disk_key, i);
1666 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1667 			continue;
1668 
1669 		progress_passed = 1;
1670 		blocknr = btrfs_node_blockptr(parent, i);
1671 		gen = btrfs_node_ptr_generation(parent, i);
1672 		if (last_block == 0)
1673 			last_block = blocknr;
1674 
1675 		if (i > 0) {
1676 			other = btrfs_node_blockptr(parent, i - 1);
1677 			close = close_blocks(blocknr, other, blocksize);
1678 		}
1679 		if (!close && i < end_slot - 2) {
1680 			other = btrfs_node_blockptr(parent, i + 1);
1681 			close = close_blocks(blocknr, other, blocksize);
1682 		}
1683 		if (close) {
1684 			last_block = blocknr;
1685 			continue;
1686 		}
1687 
1688 		cur = btrfs_find_tree_block(root, blocknr, blocksize);
1689 		if (cur)
1690 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1691 		else
1692 			uptodate = 0;
1693 		if (!cur || !uptodate) {
1694 			if (!cur) {
1695 				cur = read_tree_block(root, blocknr,
1696 							 blocksize, gen);
1697 				if (!cur || !extent_buffer_uptodate(cur)) {
1698 					free_extent_buffer(cur);
1699 					return -EIO;
1700 				}
1701 			} else if (!uptodate) {
1702 				err = btrfs_read_buffer(cur, gen);
1703 				if (err) {
1704 					free_extent_buffer(cur);
1705 					return err;
1706 				}
1707 			}
1708 		}
1709 		if (search_start == 0)
1710 			search_start = last_block;
1711 
1712 		btrfs_tree_lock(cur);
1713 		btrfs_set_lock_blocking(cur);
1714 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1715 					&cur, search_start,
1716 					min(16 * blocksize,
1717 					    (end_slot - i) * blocksize));
1718 		if (err) {
1719 			btrfs_tree_unlock(cur);
1720 			free_extent_buffer(cur);
1721 			break;
1722 		}
1723 		search_start = cur->start;
1724 		last_block = cur->start;
1725 		*last_ret = search_start;
1726 		btrfs_tree_unlock(cur);
1727 		free_extent_buffer(cur);
1728 	}
1729 	return err;
1730 }
1731 
1732 /*
1733  * The leaf data grows from end-to-front in the node.
1734  * this returns the address of the start of the last item,
1735  * which is the stop of the leaf data stack
1736  */
1737 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1738 					 struct extent_buffer *leaf)
1739 {
1740 	u32 nr = btrfs_header_nritems(leaf);
1741 	if (nr == 0)
1742 		return BTRFS_LEAF_DATA_SIZE(root);
1743 	return btrfs_item_offset_nr(leaf, nr - 1);
1744 }
1745 
1746 
1747 /*
1748  * search for key in the extent_buffer.  The items start at offset p,
1749  * and they are item_size apart.  There are 'max' items in p.
1750  *
1751  * the slot in the array is returned via slot, and it points to
1752  * the place where you would insert key if it is not found in
1753  * the array.
1754  *
1755  * slot may point to max if the key is bigger than all of the keys
1756  */
1757 static noinline int generic_bin_search(struct extent_buffer *eb,
1758 				       unsigned long p,
1759 				       int item_size, struct btrfs_key *key,
1760 				       int max, int *slot)
1761 {
1762 	int low = 0;
1763 	int high = max;
1764 	int mid;
1765 	int ret;
1766 	struct btrfs_disk_key *tmp = NULL;
1767 	struct btrfs_disk_key unaligned;
1768 	unsigned long offset;
1769 	char *kaddr = NULL;
1770 	unsigned long map_start = 0;
1771 	unsigned long map_len = 0;
1772 	int err;
1773 
1774 	while (low < high) {
1775 		mid = (low + high) / 2;
1776 		offset = p + mid * item_size;
1777 
1778 		if (!kaddr || offset < map_start ||
1779 		    (offset + sizeof(struct btrfs_disk_key)) >
1780 		    map_start + map_len) {
1781 
1782 			err = map_private_extent_buffer(eb, offset,
1783 						sizeof(struct btrfs_disk_key),
1784 						&kaddr, &map_start, &map_len);
1785 
1786 			if (!err) {
1787 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1788 							map_start);
1789 			} else {
1790 				read_extent_buffer(eb, &unaligned,
1791 						   offset, sizeof(unaligned));
1792 				tmp = &unaligned;
1793 			}
1794 
1795 		} else {
1796 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1797 							map_start);
1798 		}
1799 		ret = comp_keys(tmp, key);
1800 
1801 		if (ret < 0)
1802 			low = mid + 1;
1803 		else if (ret > 0)
1804 			high = mid;
1805 		else {
1806 			*slot = mid;
1807 			return 0;
1808 		}
1809 	}
1810 	*slot = low;
1811 	return 1;
1812 }
1813 
1814 /*
1815  * simple bin_search frontend that does the right thing for
1816  * leaves vs nodes
1817  */
1818 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1819 		      int level, int *slot)
1820 {
1821 	if (level == 0)
1822 		return generic_bin_search(eb,
1823 					  offsetof(struct btrfs_leaf, items),
1824 					  sizeof(struct btrfs_item),
1825 					  key, btrfs_header_nritems(eb),
1826 					  slot);
1827 	else
1828 		return generic_bin_search(eb,
1829 					  offsetof(struct btrfs_node, ptrs),
1830 					  sizeof(struct btrfs_key_ptr),
1831 					  key, btrfs_header_nritems(eb),
1832 					  slot);
1833 }
1834 
1835 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1836 		     int level, int *slot)
1837 {
1838 	return bin_search(eb, key, level, slot);
1839 }
1840 
1841 static void root_add_used(struct btrfs_root *root, u32 size)
1842 {
1843 	spin_lock(&root->accounting_lock);
1844 	btrfs_set_root_used(&root->root_item,
1845 			    btrfs_root_used(&root->root_item) + size);
1846 	spin_unlock(&root->accounting_lock);
1847 }
1848 
1849 static void root_sub_used(struct btrfs_root *root, u32 size)
1850 {
1851 	spin_lock(&root->accounting_lock);
1852 	btrfs_set_root_used(&root->root_item,
1853 			    btrfs_root_used(&root->root_item) - size);
1854 	spin_unlock(&root->accounting_lock);
1855 }
1856 
1857 /* given a node and slot number, this reads the blocks it points to.  The
1858  * extent buffer is returned with a reference taken (but unlocked).
1859  * NULL is returned on error.
1860  */
1861 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1862 				   struct extent_buffer *parent, int slot)
1863 {
1864 	int level = btrfs_header_level(parent);
1865 	struct extent_buffer *eb;
1866 
1867 	if (slot < 0)
1868 		return NULL;
1869 	if (slot >= btrfs_header_nritems(parent))
1870 		return NULL;
1871 
1872 	BUG_ON(level == 0);
1873 
1874 	eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1875 			     btrfs_level_size(root, level - 1),
1876 			     btrfs_node_ptr_generation(parent, slot));
1877 	if (eb && !extent_buffer_uptodate(eb)) {
1878 		free_extent_buffer(eb);
1879 		eb = NULL;
1880 	}
1881 
1882 	return eb;
1883 }
1884 
1885 /*
1886  * node level balancing, used to make sure nodes are in proper order for
1887  * item deletion.  We balance from the top down, so we have to make sure
1888  * that a deletion won't leave an node completely empty later on.
1889  */
1890 static noinline int balance_level(struct btrfs_trans_handle *trans,
1891 			 struct btrfs_root *root,
1892 			 struct btrfs_path *path, int level)
1893 {
1894 	struct extent_buffer *right = NULL;
1895 	struct extent_buffer *mid;
1896 	struct extent_buffer *left = NULL;
1897 	struct extent_buffer *parent = NULL;
1898 	int ret = 0;
1899 	int wret;
1900 	int pslot;
1901 	int orig_slot = path->slots[level];
1902 	u64 orig_ptr;
1903 
1904 	if (level == 0)
1905 		return 0;
1906 
1907 	mid = path->nodes[level];
1908 
1909 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1910 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1911 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1912 
1913 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1914 
1915 	if (level < BTRFS_MAX_LEVEL - 1) {
1916 		parent = path->nodes[level + 1];
1917 		pslot = path->slots[level + 1];
1918 	}
1919 
1920 	/*
1921 	 * deal with the case where there is only one pointer in the root
1922 	 * by promoting the node below to a root
1923 	 */
1924 	if (!parent) {
1925 		struct extent_buffer *child;
1926 
1927 		if (btrfs_header_nritems(mid) != 1)
1928 			return 0;
1929 
1930 		/* promote the child to a root */
1931 		child = read_node_slot(root, mid, 0);
1932 		if (!child) {
1933 			ret = -EROFS;
1934 			btrfs_std_error(root->fs_info, ret);
1935 			goto enospc;
1936 		}
1937 
1938 		btrfs_tree_lock(child);
1939 		btrfs_set_lock_blocking(child);
1940 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1941 		if (ret) {
1942 			btrfs_tree_unlock(child);
1943 			free_extent_buffer(child);
1944 			goto enospc;
1945 		}
1946 
1947 		tree_mod_log_set_root_pointer(root, child, 1);
1948 		rcu_assign_pointer(root->node, child);
1949 
1950 		add_root_to_dirty_list(root);
1951 		btrfs_tree_unlock(child);
1952 
1953 		path->locks[level] = 0;
1954 		path->nodes[level] = NULL;
1955 		clean_tree_block(trans, root, mid);
1956 		btrfs_tree_unlock(mid);
1957 		/* once for the path */
1958 		free_extent_buffer(mid);
1959 
1960 		root_sub_used(root, mid->len);
1961 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1962 		/* once for the root ptr */
1963 		free_extent_buffer_stale(mid);
1964 		return 0;
1965 	}
1966 	if (btrfs_header_nritems(mid) >
1967 	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1968 		return 0;
1969 
1970 	left = read_node_slot(root, parent, pslot - 1);
1971 	if (left) {
1972 		btrfs_tree_lock(left);
1973 		btrfs_set_lock_blocking(left);
1974 		wret = btrfs_cow_block(trans, root, left,
1975 				       parent, pslot - 1, &left);
1976 		if (wret) {
1977 			ret = wret;
1978 			goto enospc;
1979 		}
1980 	}
1981 	right = read_node_slot(root, parent, pslot + 1);
1982 	if (right) {
1983 		btrfs_tree_lock(right);
1984 		btrfs_set_lock_blocking(right);
1985 		wret = btrfs_cow_block(trans, root, right,
1986 				       parent, pslot + 1, &right);
1987 		if (wret) {
1988 			ret = wret;
1989 			goto enospc;
1990 		}
1991 	}
1992 
1993 	/* first, try to make some room in the middle buffer */
1994 	if (left) {
1995 		orig_slot += btrfs_header_nritems(left);
1996 		wret = push_node_left(trans, root, left, mid, 1);
1997 		if (wret < 0)
1998 			ret = wret;
1999 	}
2000 
2001 	/*
2002 	 * then try to empty the right most buffer into the middle
2003 	 */
2004 	if (right) {
2005 		wret = push_node_left(trans, root, mid, right, 1);
2006 		if (wret < 0 && wret != -ENOSPC)
2007 			ret = wret;
2008 		if (btrfs_header_nritems(right) == 0) {
2009 			clean_tree_block(trans, root, right);
2010 			btrfs_tree_unlock(right);
2011 			del_ptr(root, path, level + 1, pslot + 1);
2012 			root_sub_used(root, right->len);
2013 			btrfs_free_tree_block(trans, root, right, 0, 1);
2014 			free_extent_buffer_stale(right);
2015 			right = NULL;
2016 		} else {
2017 			struct btrfs_disk_key right_key;
2018 			btrfs_node_key(right, &right_key, 0);
2019 			tree_mod_log_set_node_key(root->fs_info, parent,
2020 						  pslot + 1, 0);
2021 			btrfs_set_node_key(parent, &right_key, pslot + 1);
2022 			btrfs_mark_buffer_dirty(parent);
2023 		}
2024 	}
2025 	if (btrfs_header_nritems(mid) == 1) {
2026 		/*
2027 		 * we're not allowed to leave a node with one item in the
2028 		 * tree during a delete.  A deletion from lower in the tree
2029 		 * could try to delete the only pointer in this node.
2030 		 * So, pull some keys from the left.
2031 		 * There has to be a left pointer at this point because
2032 		 * otherwise we would have pulled some pointers from the
2033 		 * right
2034 		 */
2035 		if (!left) {
2036 			ret = -EROFS;
2037 			btrfs_std_error(root->fs_info, ret);
2038 			goto enospc;
2039 		}
2040 		wret = balance_node_right(trans, root, mid, left);
2041 		if (wret < 0) {
2042 			ret = wret;
2043 			goto enospc;
2044 		}
2045 		if (wret == 1) {
2046 			wret = push_node_left(trans, root, left, mid, 1);
2047 			if (wret < 0)
2048 				ret = wret;
2049 		}
2050 		BUG_ON(wret == 1);
2051 	}
2052 	if (btrfs_header_nritems(mid) == 0) {
2053 		clean_tree_block(trans, root, mid);
2054 		btrfs_tree_unlock(mid);
2055 		del_ptr(root, path, level + 1, pslot);
2056 		root_sub_used(root, mid->len);
2057 		btrfs_free_tree_block(trans, root, mid, 0, 1);
2058 		free_extent_buffer_stale(mid);
2059 		mid = NULL;
2060 	} else {
2061 		/* update the parent key to reflect our changes */
2062 		struct btrfs_disk_key mid_key;
2063 		btrfs_node_key(mid, &mid_key, 0);
2064 		tree_mod_log_set_node_key(root->fs_info, parent,
2065 					  pslot, 0);
2066 		btrfs_set_node_key(parent, &mid_key, pslot);
2067 		btrfs_mark_buffer_dirty(parent);
2068 	}
2069 
2070 	/* update the path */
2071 	if (left) {
2072 		if (btrfs_header_nritems(left) > orig_slot) {
2073 			extent_buffer_get(left);
2074 			/* left was locked after cow */
2075 			path->nodes[level] = left;
2076 			path->slots[level + 1] -= 1;
2077 			path->slots[level] = orig_slot;
2078 			if (mid) {
2079 				btrfs_tree_unlock(mid);
2080 				free_extent_buffer(mid);
2081 			}
2082 		} else {
2083 			orig_slot -= btrfs_header_nritems(left);
2084 			path->slots[level] = orig_slot;
2085 		}
2086 	}
2087 	/* double check we haven't messed things up */
2088 	if (orig_ptr !=
2089 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2090 		BUG();
2091 enospc:
2092 	if (right) {
2093 		btrfs_tree_unlock(right);
2094 		free_extent_buffer(right);
2095 	}
2096 	if (left) {
2097 		if (path->nodes[level] != left)
2098 			btrfs_tree_unlock(left);
2099 		free_extent_buffer(left);
2100 	}
2101 	return ret;
2102 }
2103 
2104 /* Node balancing for insertion.  Here we only split or push nodes around
2105  * when they are completely full.  This is also done top down, so we
2106  * have to be pessimistic.
2107  */
2108 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2109 					  struct btrfs_root *root,
2110 					  struct btrfs_path *path, int level)
2111 {
2112 	struct extent_buffer *right = NULL;
2113 	struct extent_buffer *mid;
2114 	struct extent_buffer *left = NULL;
2115 	struct extent_buffer *parent = NULL;
2116 	int ret = 0;
2117 	int wret;
2118 	int pslot;
2119 	int orig_slot = path->slots[level];
2120 
2121 	if (level == 0)
2122 		return 1;
2123 
2124 	mid = path->nodes[level];
2125 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2126 
2127 	if (level < BTRFS_MAX_LEVEL - 1) {
2128 		parent = path->nodes[level + 1];
2129 		pslot = path->slots[level + 1];
2130 	}
2131 
2132 	if (!parent)
2133 		return 1;
2134 
2135 	left = read_node_slot(root, parent, pslot - 1);
2136 
2137 	/* first, try to make some room in the middle buffer */
2138 	if (left) {
2139 		u32 left_nr;
2140 
2141 		btrfs_tree_lock(left);
2142 		btrfs_set_lock_blocking(left);
2143 
2144 		left_nr = btrfs_header_nritems(left);
2145 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2146 			wret = 1;
2147 		} else {
2148 			ret = btrfs_cow_block(trans, root, left, parent,
2149 					      pslot - 1, &left);
2150 			if (ret)
2151 				wret = 1;
2152 			else {
2153 				wret = push_node_left(trans, root,
2154 						      left, mid, 0);
2155 			}
2156 		}
2157 		if (wret < 0)
2158 			ret = wret;
2159 		if (wret == 0) {
2160 			struct btrfs_disk_key disk_key;
2161 			orig_slot += left_nr;
2162 			btrfs_node_key(mid, &disk_key, 0);
2163 			tree_mod_log_set_node_key(root->fs_info, parent,
2164 						  pslot, 0);
2165 			btrfs_set_node_key(parent, &disk_key, pslot);
2166 			btrfs_mark_buffer_dirty(parent);
2167 			if (btrfs_header_nritems(left) > orig_slot) {
2168 				path->nodes[level] = left;
2169 				path->slots[level + 1] -= 1;
2170 				path->slots[level] = orig_slot;
2171 				btrfs_tree_unlock(mid);
2172 				free_extent_buffer(mid);
2173 			} else {
2174 				orig_slot -=
2175 					btrfs_header_nritems(left);
2176 				path->slots[level] = orig_slot;
2177 				btrfs_tree_unlock(left);
2178 				free_extent_buffer(left);
2179 			}
2180 			return 0;
2181 		}
2182 		btrfs_tree_unlock(left);
2183 		free_extent_buffer(left);
2184 	}
2185 	right = read_node_slot(root, parent, pslot + 1);
2186 
2187 	/*
2188 	 * then try to empty the right most buffer into the middle
2189 	 */
2190 	if (right) {
2191 		u32 right_nr;
2192 
2193 		btrfs_tree_lock(right);
2194 		btrfs_set_lock_blocking(right);
2195 
2196 		right_nr = btrfs_header_nritems(right);
2197 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2198 			wret = 1;
2199 		} else {
2200 			ret = btrfs_cow_block(trans, root, right,
2201 					      parent, pslot + 1,
2202 					      &right);
2203 			if (ret)
2204 				wret = 1;
2205 			else {
2206 				wret = balance_node_right(trans, root,
2207 							  right, mid);
2208 			}
2209 		}
2210 		if (wret < 0)
2211 			ret = wret;
2212 		if (wret == 0) {
2213 			struct btrfs_disk_key disk_key;
2214 
2215 			btrfs_node_key(right, &disk_key, 0);
2216 			tree_mod_log_set_node_key(root->fs_info, parent,
2217 						  pslot + 1, 0);
2218 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2219 			btrfs_mark_buffer_dirty(parent);
2220 
2221 			if (btrfs_header_nritems(mid) <= orig_slot) {
2222 				path->nodes[level] = right;
2223 				path->slots[level + 1] += 1;
2224 				path->slots[level] = orig_slot -
2225 					btrfs_header_nritems(mid);
2226 				btrfs_tree_unlock(mid);
2227 				free_extent_buffer(mid);
2228 			} else {
2229 				btrfs_tree_unlock(right);
2230 				free_extent_buffer(right);
2231 			}
2232 			return 0;
2233 		}
2234 		btrfs_tree_unlock(right);
2235 		free_extent_buffer(right);
2236 	}
2237 	return 1;
2238 }
2239 
2240 /*
2241  * readahead one full node of leaves, finding things that are close
2242  * to the block in 'slot', and triggering ra on them.
2243  */
2244 static void reada_for_search(struct btrfs_root *root,
2245 			     struct btrfs_path *path,
2246 			     int level, int slot, u64 objectid)
2247 {
2248 	struct extent_buffer *node;
2249 	struct btrfs_disk_key disk_key;
2250 	u32 nritems;
2251 	u64 search;
2252 	u64 target;
2253 	u64 nread = 0;
2254 	u64 gen;
2255 	int direction = path->reada;
2256 	struct extent_buffer *eb;
2257 	u32 nr;
2258 	u32 blocksize;
2259 	u32 nscan = 0;
2260 
2261 	if (level != 1)
2262 		return;
2263 
2264 	if (!path->nodes[level])
2265 		return;
2266 
2267 	node = path->nodes[level];
2268 
2269 	search = btrfs_node_blockptr(node, slot);
2270 	blocksize = btrfs_level_size(root, level - 1);
2271 	eb = btrfs_find_tree_block(root, search, blocksize);
2272 	if (eb) {
2273 		free_extent_buffer(eb);
2274 		return;
2275 	}
2276 
2277 	target = search;
2278 
2279 	nritems = btrfs_header_nritems(node);
2280 	nr = slot;
2281 
2282 	while (1) {
2283 		if (direction < 0) {
2284 			if (nr == 0)
2285 				break;
2286 			nr--;
2287 		} else if (direction > 0) {
2288 			nr++;
2289 			if (nr >= nritems)
2290 				break;
2291 		}
2292 		if (path->reada < 0 && objectid) {
2293 			btrfs_node_key(node, &disk_key, nr);
2294 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2295 				break;
2296 		}
2297 		search = btrfs_node_blockptr(node, nr);
2298 		if ((search <= target && target - search <= 65536) ||
2299 		    (search > target && search - target <= 65536)) {
2300 			gen = btrfs_node_ptr_generation(node, nr);
2301 			readahead_tree_block(root, search, blocksize, gen);
2302 			nread += blocksize;
2303 		}
2304 		nscan++;
2305 		if ((nread > 65536 || nscan > 32))
2306 			break;
2307 	}
2308 }
2309 
2310 static noinline void reada_for_balance(struct btrfs_root *root,
2311 				       struct btrfs_path *path, int level)
2312 {
2313 	int slot;
2314 	int nritems;
2315 	struct extent_buffer *parent;
2316 	struct extent_buffer *eb;
2317 	u64 gen;
2318 	u64 block1 = 0;
2319 	u64 block2 = 0;
2320 	int blocksize;
2321 
2322 	parent = path->nodes[level + 1];
2323 	if (!parent)
2324 		return;
2325 
2326 	nritems = btrfs_header_nritems(parent);
2327 	slot = path->slots[level + 1];
2328 	blocksize = btrfs_level_size(root, level);
2329 
2330 	if (slot > 0) {
2331 		block1 = btrfs_node_blockptr(parent, slot - 1);
2332 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2333 		eb = btrfs_find_tree_block(root, block1, blocksize);
2334 		/*
2335 		 * if we get -eagain from btrfs_buffer_uptodate, we
2336 		 * don't want to return eagain here.  That will loop
2337 		 * forever
2338 		 */
2339 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2340 			block1 = 0;
2341 		free_extent_buffer(eb);
2342 	}
2343 	if (slot + 1 < nritems) {
2344 		block2 = btrfs_node_blockptr(parent, slot + 1);
2345 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2346 		eb = btrfs_find_tree_block(root, block2, blocksize);
2347 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2348 			block2 = 0;
2349 		free_extent_buffer(eb);
2350 	}
2351 
2352 	if (block1)
2353 		readahead_tree_block(root, block1, blocksize, 0);
2354 	if (block2)
2355 		readahead_tree_block(root, block2, blocksize, 0);
2356 }
2357 
2358 
2359 /*
2360  * when we walk down the tree, it is usually safe to unlock the higher layers
2361  * in the tree.  The exceptions are when our path goes through slot 0, because
2362  * operations on the tree might require changing key pointers higher up in the
2363  * tree.
2364  *
2365  * callers might also have set path->keep_locks, which tells this code to keep
2366  * the lock if the path points to the last slot in the block.  This is part of
2367  * walking through the tree, and selecting the next slot in the higher block.
2368  *
2369  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2370  * if lowest_unlock is 1, level 0 won't be unlocked
2371  */
2372 static noinline void unlock_up(struct btrfs_path *path, int level,
2373 			       int lowest_unlock, int min_write_lock_level,
2374 			       int *write_lock_level)
2375 {
2376 	int i;
2377 	int skip_level = level;
2378 	int no_skips = 0;
2379 	struct extent_buffer *t;
2380 
2381 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2382 		if (!path->nodes[i])
2383 			break;
2384 		if (!path->locks[i])
2385 			break;
2386 		if (!no_skips && path->slots[i] == 0) {
2387 			skip_level = i + 1;
2388 			continue;
2389 		}
2390 		if (!no_skips && path->keep_locks) {
2391 			u32 nritems;
2392 			t = path->nodes[i];
2393 			nritems = btrfs_header_nritems(t);
2394 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2395 				skip_level = i + 1;
2396 				continue;
2397 			}
2398 		}
2399 		if (skip_level < i && i >= lowest_unlock)
2400 			no_skips = 1;
2401 
2402 		t = path->nodes[i];
2403 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2404 			btrfs_tree_unlock_rw(t, path->locks[i]);
2405 			path->locks[i] = 0;
2406 			if (write_lock_level &&
2407 			    i > min_write_lock_level &&
2408 			    i <= *write_lock_level) {
2409 				*write_lock_level = i - 1;
2410 			}
2411 		}
2412 	}
2413 }
2414 
2415 /*
2416  * This releases any locks held in the path starting at level and
2417  * going all the way up to the root.
2418  *
2419  * btrfs_search_slot will keep the lock held on higher nodes in a few
2420  * corner cases, such as COW of the block at slot zero in the node.  This
2421  * ignores those rules, and it should only be called when there are no
2422  * more updates to be done higher up in the tree.
2423  */
2424 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2425 {
2426 	int i;
2427 
2428 	if (path->keep_locks)
2429 		return;
2430 
2431 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2432 		if (!path->nodes[i])
2433 			continue;
2434 		if (!path->locks[i])
2435 			continue;
2436 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2437 		path->locks[i] = 0;
2438 	}
2439 }
2440 
2441 /*
2442  * helper function for btrfs_search_slot.  The goal is to find a block
2443  * in cache without setting the path to blocking.  If we find the block
2444  * we return zero and the path is unchanged.
2445  *
2446  * If we can't find the block, we set the path blocking and do some
2447  * reada.  -EAGAIN is returned and the search must be repeated.
2448  */
2449 static int
2450 read_block_for_search(struct btrfs_trans_handle *trans,
2451 		       struct btrfs_root *root, struct btrfs_path *p,
2452 		       struct extent_buffer **eb_ret, int level, int slot,
2453 		       struct btrfs_key *key, u64 time_seq)
2454 {
2455 	u64 blocknr;
2456 	u64 gen;
2457 	u32 blocksize;
2458 	struct extent_buffer *b = *eb_ret;
2459 	struct extent_buffer *tmp;
2460 	int ret;
2461 
2462 	blocknr = btrfs_node_blockptr(b, slot);
2463 	gen = btrfs_node_ptr_generation(b, slot);
2464 	blocksize = btrfs_level_size(root, level - 1);
2465 
2466 	tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2467 	if (tmp) {
2468 		/* first we do an atomic uptodate check */
2469 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2470 			*eb_ret = tmp;
2471 			return 0;
2472 		}
2473 
2474 		/* the pages were up to date, but we failed
2475 		 * the generation number check.  Do a full
2476 		 * read for the generation number that is correct.
2477 		 * We must do this without dropping locks so
2478 		 * we can trust our generation number
2479 		 */
2480 		btrfs_set_path_blocking(p);
2481 
2482 		/* now we're allowed to do a blocking uptodate check */
2483 		ret = btrfs_read_buffer(tmp, gen);
2484 		if (!ret) {
2485 			*eb_ret = tmp;
2486 			return 0;
2487 		}
2488 		free_extent_buffer(tmp);
2489 		btrfs_release_path(p);
2490 		return -EIO;
2491 	}
2492 
2493 	/*
2494 	 * reduce lock contention at high levels
2495 	 * of the btree by dropping locks before
2496 	 * we read.  Don't release the lock on the current
2497 	 * level because we need to walk this node to figure
2498 	 * out which blocks to read.
2499 	 */
2500 	btrfs_unlock_up_safe(p, level + 1);
2501 	btrfs_set_path_blocking(p);
2502 
2503 	free_extent_buffer(tmp);
2504 	if (p->reada)
2505 		reada_for_search(root, p, level, slot, key->objectid);
2506 
2507 	btrfs_release_path(p);
2508 
2509 	ret = -EAGAIN;
2510 	tmp = read_tree_block(root, blocknr, blocksize, 0);
2511 	if (tmp) {
2512 		/*
2513 		 * If the read above didn't mark this buffer up to date,
2514 		 * it will never end up being up to date.  Set ret to EIO now
2515 		 * and give up so that our caller doesn't loop forever
2516 		 * on our EAGAINs.
2517 		 */
2518 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2519 			ret = -EIO;
2520 		free_extent_buffer(tmp);
2521 	}
2522 	return ret;
2523 }
2524 
2525 /*
2526  * helper function for btrfs_search_slot.  This does all of the checks
2527  * for node-level blocks and does any balancing required based on
2528  * the ins_len.
2529  *
2530  * If no extra work was required, zero is returned.  If we had to
2531  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2532  * start over
2533  */
2534 static int
2535 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2536 		       struct btrfs_root *root, struct btrfs_path *p,
2537 		       struct extent_buffer *b, int level, int ins_len,
2538 		       int *write_lock_level)
2539 {
2540 	int ret;
2541 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2542 	    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2543 		int sret;
2544 
2545 		if (*write_lock_level < level + 1) {
2546 			*write_lock_level = level + 1;
2547 			btrfs_release_path(p);
2548 			goto again;
2549 		}
2550 
2551 		btrfs_set_path_blocking(p);
2552 		reada_for_balance(root, p, level);
2553 		sret = split_node(trans, root, p, level);
2554 		btrfs_clear_path_blocking(p, NULL, 0);
2555 
2556 		BUG_ON(sret > 0);
2557 		if (sret) {
2558 			ret = sret;
2559 			goto done;
2560 		}
2561 		b = p->nodes[level];
2562 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2563 		   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2564 		int sret;
2565 
2566 		if (*write_lock_level < level + 1) {
2567 			*write_lock_level = level + 1;
2568 			btrfs_release_path(p);
2569 			goto again;
2570 		}
2571 
2572 		btrfs_set_path_blocking(p);
2573 		reada_for_balance(root, p, level);
2574 		sret = balance_level(trans, root, p, level);
2575 		btrfs_clear_path_blocking(p, NULL, 0);
2576 
2577 		if (sret) {
2578 			ret = sret;
2579 			goto done;
2580 		}
2581 		b = p->nodes[level];
2582 		if (!b) {
2583 			btrfs_release_path(p);
2584 			goto again;
2585 		}
2586 		BUG_ON(btrfs_header_nritems(b) == 1);
2587 	}
2588 	return 0;
2589 
2590 again:
2591 	ret = -EAGAIN;
2592 done:
2593 	return ret;
2594 }
2595 
2596 static void key_search_validate(struct extent_buffer *b,
2597 				struct btrfs_key *key,
2598 				int level)
2599 {
2600 #ifdef CONFIG_BTRFS_ASSERT
2601 	struct btrfs_disk_key disk_key;
2602 
2603 	btrfs_cpu_key_to_disk(&disk_key, key);
2604 
2605 	if (level == 0)
2606 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2607 		    offsetof(struct btrfs_leaf, items[0].key),
2608 		    sizeof(disk_key)));
2609 	else
2610 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2611 		    offsetof(struct btrfs_node, ptrs[0].key),
2612 		    sizeof(disk_key)));
2613 #endif
2614 }
2615 
2616 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2617 		      int level, int *prev_cmp, int *slot)
2618 {
2619 	if (*prev_cmp != 0) {
2620 		*prev_cmp = bin_search(b, key, level, slot);
2621 		return *prev_cmp;
2622 	}
2623 
2624 	key_search_validate(b, key, level);
2625 	*slot = 0;
2626 
2627 	return 0;
2628 }
2629 
2630 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
2631 		u64 iobjectid, u64 ioff, u8 key_type,
2632 		struct btrfs_key *found_key)
2633 {
2634 	int ret;
2635 	struct btrfs_key key;
2636 	struct extent_buffer *eb;
2637 	struct btrfs_path *path;
2638 
2639 	key.type = key_type;
2640 	key.objectid = iobjectid;
2641 	key.offset = ioff;
2642 
2643 	if (found_path == NULL) {
2644 		path = btrfs_alloc_path();
2645 		if (!path)
2646 			return -ENOMEM;
2647 	} else
2648 		path = found_path;
2649 
2650 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2651 	if ((ret < 0) || (found_key == NULL)) {
2652 		if (path != found_path)
2653 			btrfs_free_path(path);
2654 		return ret;
2655 	}
2656 
2657 	eb = path->nodes[0];
2658 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2659 		ret = btrfs_next_leaf(fs_root, path);
2660 		if (ret)
2661 			return ret;
2662 		eb = path->nodes[0];
2663 	}
2664 
2665 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2666 	if (found_key->type != key.type ||
2667 			found_key->objectid != key.objectid)
2668 		return 1;
2669 
2670 	return 0;
2671 }
2672 
2673 /*
2674  * look for key in the tree.  path is filled in with nodes along the way
2675  * if key is found, we return zero and you can find the item in the leaf
2676  * level of the path (level 0)
2677  *
2678  * If the key isn't found, the path points to the slot where it should
2679  * be inserted, and 1 is returned.  If there are other errors during the
2680  * search a negative error number is returned.
2681  *
2682  * if ins_len > 0, nodes and leaves will be split as we walk down the
2683  * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
2684  * possible)
2685  */
2686 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2687 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
2688 		      ins_len, int cow)
2689 {
2690 	struct extent_buffer *b;
2691 	int slot;
2692 	int ret;
2693 	int err;
2694 	int level;
2695 	int lowest_unlock = 1;
2696 	int root_lock;
2697 	/* everything at write_lock_level or lower must be write locked */
2698 	int write_lock_level = 0;
2699 	u8 lowest_level = 0;
2700 	int min_write_lock_level;
2701 	int prev_cmp;
2702 
2703 	lowest_level = p->lowest_level;
2704 	WARN_ON(lowest_level && ins_len > 0);
2705 	WARN_ON(p->nodes[0] != NULL);
2706 	BUG_ON(!cow && ins_len);
2707 
2708 	if (ins_len < 0) {
2709 		lowest_unlock = 2;
2710 
2711 		/* when we are removing items, we might have to go up to level
2712 		 * two as we update tree pointers  Make sure we keep write
2713 		 * for those levels as well
2714 		 */
2715 		write_lock_level = 2;
2716 	} else if (ins_len > 0) {
2717 		/*
2718 		 * for inserting items, make sure we have a write lock on
2719 		 * level 1 so we can update keys
2720 		 */
2721 		write_lock_level = 1;
2722 	}
2723 
2724 	if (!cow)
2725 		write_lock_level = -1;
2726 
2727 	if (cow && (p->keep_locks || p->lowest_level))
2728 		write_lock_level = BTRFS_MAX_LEVEL;
2729 
2730 	min_write_lock_level = write_lock_level;
2731 
2732 again:
2733 	prev_cmp = -1;
2734 	/*
2735 	 * we try very hard to do read locks on the root
2736 	 */
2737 	root_lock = BTRFS_READ_LOCK;
2738 	level = 0;
2739 	if (p->search_commit_root) {
2740 		/*
2741 		 * the commit roots are read only
2742 		 * so we always do read locks
2743 		 */
2744 		if (p->need_commit_sem)
2745 			down_read(&root->fs_info->commit_root_sem);
2746 		b = root->commit_root;
2747 		extent_buffer_get(b);
2748 		level = btrfs_header_level(b);
2749 		if (p->need_commit_sem)
2750 			up_read(&root->fs_info->commit_root_sem);
2751 		if (!p->skip_locking)
2752 			btrfs_tree_read_lock(b);
2753 	} else {
2754 		if (p->skip_locking) {
2755 			b = btrfs_root_node(root);
2756 			level = btrfs_header_level(b);
2757 		} else {
2758 			/* we don't know the level of the root node
2759 			 * until we actually have it read locked
2760 			 */
2761 			b = btrfs_read_lock_root_node(root);
2762 			level = btrfs_header_level(b);
2763 			if (level <= write_lock_level) {
2764 				/* whoops, must trade for write lock */
2765 				btrfs_tree_read_unlock(b);
2766 				free_extent_buffer(b);
2767 				b = btrfs_lock_root_node(root);
2768 				root_lock = BTRFS_WRITE_LOCK;
2769 
2770 				/* the level might have changed, check again */
2771 				level = btrfs_header_level(b);
2772 			}
2773 		}
2774 	}
2775 	p->nodes[level] = b;
2776 	if (!p->skip_locking)
2777 		p->locks[level] = root_lock;
2778 
2779 	while (b) {
2780 		level = btrfs_header_level(b);
2781 
2782 		/*
2783 		 * setup the path here so we can release it under lock
2784 		 * contention with the cow code
2785 		 */
2786 		if (cow) {
2787 			/*
2788 			 * if we don't really need to cow this block
2789 			 * then we don't want to set the path blocking,
2790 			 * so we test it here
2791 			 */
2792 			if (!should_cow_block(trans, root, b))
2793 				goto cow_done;
2794 
2795 			btrfs_set_path_blocking(p);
2796 
2797 			/*
2798 			 * must have write locks on this node and the
2799 			 * parent
2800 			 */
2801 			if (level > write_lock_level ||
2802 			    (level + 1 > write_lock_level &&
2803 			    level + 1 < BTRFS_MAX_LEVEL &&
2804 			    p->nodes[level + 1])) {
2805 				write_lock_level = level + 1;
2806 				btrfs_release_path(p);
2807 				goto again;
2808 			}
2809 
2810 			err = btrfs_cow_block(trans, root, b,
2811 					      p->nodes[level + 1],
2812 					      p->slots[level + 1], &b);
2813 			if (err) {
2814 				ret = err;
2815 				goto done;
2816 			}
2817 		}
2818 cow_done:
2819 		p->nodes[level] = b;
2820 		btrfs_clear_path_blocking(p, NULL, 0);
2821 
2822 		/*
2823 		 * we have a lock on b and as long as we aren't changing
2824 		 * the tree, there is no way to for the items in b to change.
2825 		 * It is safe to drop the lock on our parent before we
2826 		 * go through the expensive btree search on b.
2827 		 *
2828 		 * If we're inserting or deleting (ins_len != 0), then we might
2829 		 * be changing slot zero, which may require changing the parent.
2830 		 * So, we can't drop the lock until after we know which slot
2831 		 * we're operating on.
2832 		 */
2833 		if (!ins_len && !p->keep_locks) {
2834 			int u = level + 1;
2835 
2836 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2837 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2838 				p->locks[u] = 0;
2839 			}
2840 		}
2841 
2842 		ret = key_search(b, key, level, &prev_cmp, &slot);
2843 
2844 		if (level != 0) {
2845 			int dec = 0;
2846 			if (ret && slot > 0) {
2847 				dec = 1;
2848 				slot -= 1;
2849 			}
2850 			p->slots[level] = slot;
2851 			err = setup_nodes_for_search(trans, root, p, b, level,
2852 					     ins_len, &write_lock_level);
2853 			if (err == -EAGAIN)
2854 				goto again;
2855 			if (err) {
2856 				ret = err;
2857 				goto done;
2858 			}
2859 			b = p->nodes[level];
2860 			slot = p->slots[level];
2861 
2862 			/*
2863 			 * slot 0 is special, if we change the key
2864 			 * we have to update the parent pointer
2865 			 * which means we must have a write lock
2866 			 * on the parent
2867 			 */
2868 			if (slot == 0 && ins_len &&
2869 			    write_lock_level < level + 1) {
2870 				write_lock_level = level + 1;
2871 				btrfs_release_path(p);
2872 				goto again;
2873 			}
2874 
2875 			unlock_up(p, level, lowest_unlock,
2876 				  min_write_lock_level, &write_lock_level);
2877 
2878 			if (level == lowest_level) {
2879 				if (dec)
2880 					p->slots[level]++;
2881 				goto done;
2882 			}
2883 
2884 			err = read_block_for_search(trans, root, p,
2885 						    &b, level, slot, key, 0);
2886 			if (err == -EAGAIN)
2887 				goto again;
2888 			if (err) {
2889 				ret = err;
2890 				goto done;
2891 			}
2892 
2893 			if (!p->skip_locking) {
2894 				level = btrfs_header_level(b);
2895 				if (level <= write_lock_level) {
2896 					err = btrfs_try_tree_write_lock(b);
2897 					if (!err) {
2898 						btrfs_set_path_blocking(p);
2899 						btrfs_tree_lock(b);
2900 						btrfs_clear_path_blocking(p, b,
2901 								  BTRFS_WRITE_LOCK);
2902 					}
2903 					p->locks[level] = BTRFS_WRITE_LOCK;
2904 				} else {
2905 					err = btrfs_try_tree_read_lock(b);
2906 					if (!err) {
2907 						btrfs_set_path_blocking(p);
2908 						btrfs_tree_read_lock(b);
2909 						btrfs_clear_path_blocking(p, b,
2910 								  BTRFS_READ_LOCK);
2911 					}
2912 					p->locks[level] = BTRFS_READ_LOCK;
2913 				}
2914 				p->nodes[level] = b;
2915 			}
2916 		} else {
2917 			p->slots[level] = slot;
2918 			if (ins_len > 0 &&
2919 			    btrfs_leaf_free_space(root, b) < ins_len) {
2920 				if (write_lock_level < 1) {
2921 					write_lock_level = 1;
2922 					btrfs_release_path(p);
2923 					goto again;
2924 				}
2925 
2926 				btrfs_set_path_blocking(p);
2927 				err = split_leaf(trans, root, key,
2928 						 p, ins_len, ret == 0);
2929 				btrfs_clear_path_blocking(p, NULL, 0);
2930 
2931 				BUG_ON(err > 0);
2932 				if (err) {
2933 					ret = err;
2934 					goto done;
2935 				}
2936 			}
2937 			if (!p->search_for_split)
2938 				unlock_up(p, level, lowest_unlock,
2939 					  min_write_lock_level, &write_lock_level);
2940 			goto done;
2941 		}
2942 	}
2943 	ret = 1;
2944 done:
2945 	/*
2946 	 * we don't really know what they plan on doing with the path
2947 	 * from here on, so for now just mark it as blocking
2948 	 */
2949 	if (!p->leave_spinning)
2950 		btrfs_set_path_blocking(p);
2951 	if (ret < 0)
2952 		btrfs_release_path(p);
2953 	return ret;
2954 }
2955 
2956 /*
2957  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2958  * current state of the tree together with the operations recorded in the tree
2959  * modification log to search for the key in a previous version of this tree, as
2960  * denoted by the time_seq parameter.
2961  *
2962  * Naturally, there is no support for insert, delete or cow operations.
2963  *
2964  * The resulting path and return value will be set up as if we called
2965  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2966  */
2967 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2968 			  struct btrfs_path *p, u64 time_seq)
2969 {
2970 	struct extent_buffer *b;
2971 	int slot;
2972 	int ret;
2973 	int err;
2974 	int level;
2975 	int lowest_unlock = 1;
2976 	u8 lowest_level = 0;
2977 	int prev_cmp = -1;
2978 
2979 	lowest_level = p->lowest_level;
2980 	WARN_ON(p->nodes[0] != NULL);
2981 
2982 	if (p->search_commit_root) {
2983 		BUG_ON(time_seq);
2984 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2985 	}
2986 
2987 again:
2988 	b = get_old_root(root, time_seq);
2989 	level = btrfs_header_level(b);
2990 	p->locks[level] = BTRFS_READ_LOCK;
2991 
2992 	while (b) {
2993 		level = btrfs_header_level(b);
2994 		p->nodes[level] = b;
2995 		btrfs_clear_path_blocking(p, NULL, 0);
2996 
2997 		/*
2998 		 * we have a lock on b and as long as we aren't changing
2999 		 * the tree, there is no way to for the items in b to change.
3000 		 * It is safe to drop the lock on our parent before we
3001 		 * go through the expensive btree search on b.
3002 		 */
3003 		btrfs_unlock_up_safe(p, level + 1);
3004 
3005 		/*
3006 		 * Since we can unwind eb's we want to do a real search every
3007 		 * time.
3008 		 */
3009 		prev_cmp = -1;
3010 		ret = key_search(b, key, level, &prev_cmp, &slot);
3011 
3012 		if (level != 0) {
3013 			int dec = 0;
3014 			if (ret && slot > 0) {
3015 				dec = 1;
3016 				slot -= 1;
3017 			}
3018 			p->slots[level] = slot;
3019 			unlock_up(p, level, lowest_unlock, 0, NULL);
3020 
3021 			if (level == lowest_level) {
3022 				if (dec)
3023 					p->slots[level]++;
3024 				goto done;
3025 			}
3026 
3027 			err = read_block_for_search(NULL, root, p, &b, level,
3028 						    slot, key, time_seq);
3029 			if (err == -EAGAIN)
3030 				goto again;
3031 			if (err) {
3032 				ret = err;
3033 				goto done;
3034 			}
3035 
3036 			level = btrfs_header_level(b);
3037 			err = btrfs_try_tree_read_lock(b);
3038 			if (!err) {
3039 				btrfs_set_path_blocking(p);
3040 				btrfs_tree_read_lock(b);
3041 				btrfs_clear_path_blocking(p, b,
3042 							  BTRFS_READ_LOCK);
3043 			}
3044 			b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3045 			if (!b) {
3046 				ret = -ENOMEM;
3047 				goto done;
3048 			}
3049 			p->locks[level] = BTRFS_READ_LOCK;
3050 			p->nodes[level] = b;
3051 		} else {
3052 			p->slots[level] = slot;
3053 			unlock_up(p, level, lowest_unlock, 0, NULL);
3054 			goto done;
3055 		}
3056 	}
3057 	ret = 1;
3058 done:
3059 	if (!p->leave_spinning)
3060 		btrfs_set_path_blocking(p);
3061 	if (ret < 0)
3062 		btrfs_release_path(p);
3063 
3064 	return ret;
3065 }
3066 
3067 /*
3068  * helper to use instead of search slot if no exact match is needed but
3069  * instead the next or previous item should be returned.
3070  * When find_higher is true, the next higher item is returned, the next lower
3071  * otherwise.
3072  * When return_any and find_higher are both true, and no higher item is found,
3073  * return the next lower instead.
3074  * When return_any is true and find_higher is false, and no lower item is found,
3075  * return the next higher instead.
3076  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3077  * < 0 on error
3078  */
3079 int btrfs_search_slot_for_read(struct btrfs_root *root,
3080 			       struct btrfs_key *key, struct btrfs_path *p,
3081 			       int find_higher, int return_any)
3082 {
3083 	int ret;
3084 	struct extent_buffer *leaf;
3085 
3086 again:
3087 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3088 	if (ret <= 0)
3089 		return ret;
3090 	/*
3091 	 * a return value of 1 means the path is at the position where the
3092 	 * item should be inserted. Normally this is the next bigger item,
3093 	 * but in case the previous item is the last in a leaf, path points
3094 	 * to the first free slot in the previous leaf, i.e. at an invalid
3095 	 * item.
3096 	 */
3097 	leaf = p->nodes[0];
3098 
3099 	if (find_higher) {
3100 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3101 			ret = btrfs_next_leaf(root, p);
3102 			if (ret <= 0)
3103 				return ret;
3104 			if (!return_any)
3105 				return 1;
3106 			/*
3107 			 * no higher item found, return the next
3108 			 * lower instead
3109 			 */
3110 			return_any = 0;
3111 			find_higher = 0;
3112 			btrfs_release_path(p);
3113 			goto again;
3114 		}
3115 	} else {
3116 		if (p->slots[0] == 0) {
3117 			ret = btrfs_prev_leaf(root, p);
3118 			if (ret < 0)
3119 				return ret;
3120 			if (!ret) {
3121 				leaf = p->nodes[0];
3122 				if (p->slots[0] == btrfs_header_nritems(leaf))
3123 					p->slots[0]--;
3124 				return 0;
3125 			}
3126 			if (!return_any)
3127 				return 1;
3128 			/*
3129 			 * no lower item found, return the next
3130 			 * higher instead
3131 			 */
3132 			return_any = 0;
3133 			find_higher = 1;
3134 			btrfs_release_path(p);
3135 			goto again;
3136 		} else {
3137 			--p->slots[0];
3138 		}
3139 	}
3140 	return 0;
3141 }
3142 
3143 /*
3144  * adjust the pointers going up the tree, starting at level
3145  * making sure the right key of each node is points to 'key'.
3146  * This is used after shifting pointers to the left, so it stops
3147  * fixing up pointers when a given leaf/node is not in slot 0 of the
3148  * higher levels
3149  *
3150  */
3151 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
3152 			   struct btrfs_disk_key *key, int level)
3153 {
3154 	int i;
3155 	struct extent_buffer *t;
3156 
3157 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3158 		int tslot = path->slots[i];
3159 		if (!path->nodes[i])
3160 			break;
3161 		t = path->nodes[i];
3162 		tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
3163 		btrfs_set_node_key(t, key, tslot);
3164 		btrfs_mark_buffer_dirty(path->nodes[i]);
3165 		if (tslot != 0)
3166 			break;
3167 	}
3168 }
3169 
3170 /*
3171  * update item key.
3172  *
3173  * This function isn't completely safe. It's the caller's responsibility
3174  * that the new key won't break the order
3175  */
3176 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
3177 			     struct btrfs_key *new_key)
3178 {
3179 	struct btrfs_disk_key disk_key;
3180 	struct extent_buffer *eb;
3181 	int slot;
3182 
3183 	eb = path->nodes[0];
3184 	slot = path->slots[0];
3185 	if (slot > 0) {
3186 		btrfs_item_key(eb, &disk_key, slot - 1);
3187 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3188 	}
3189 	if (slot < btrfs_header_nritems(eb) - 1) {
3190 		btrfs_item_key(eb, &disk_key, slot + 1);
3191 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3192 	}
3193 
3194 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3195 	btrfs_set_item_key(eb, &disk_key, slot);
3196 	btrfs_mark_buffer_dirty(eb);
3197 	if (slot == 0)
3198 		fixup_low_keys(root, path, &disk_key, 1);
3199 }
3200 
3201 /*
3202  * try to push data from one node into the next node left in the
3203  * tree.
3204  *
3205  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3206  * error, and > 0 if there was no room in the left hand block.
3207  */
3208 static int push_node_left(struct btrfs_trans_handle *trans,
3209 			  struct btrfs_root *root, struct extent_buffer *dst,
3210 			  struct extent_buffer *src, int empty)
3211 {
3212 	int push_items = 0;
3213 	int src_nritems;
3214 	int dst_nritems;
3215 	int ret = 0;
3216 
3217 	src_nritems = btrfs_header_nritems(src);
3218 	dst_nritems = btrfs_header_nritems(dst);
3219 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3220 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3221 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3222 
3223 	if (!empty && src_nritems <= 8)
3224 		return 1;
3225 
3226 	if (push_items <= 0)
3227 		return 1;
3228 
3229 	if (empty) {
3230 		push_items = min(src_nritems, push_items);
3231 		if (push_items < src_nritems) {
3232 			/* leave at least 8 pointers in the node if
3233 			 * we aren't going to empty it
3234 			 */
3235 			if (src_nritems - push_items < 8) {
3236 				if (push_items <= 8)
3237 					return 1;
3238 				push_items -= 8;
3239 			}
3240 		}
3241 	} else
3242 		push_items = min(src_nritems - 8, push_items);
3243 
3244 	ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3245 				   push_items);
3246 	if (ret) {
3247 		btrfs_abort_transaction(trans, root, ret);
3248 		return ret;
3249 	}
3250 	copy_extent_buffer(dst, src,
3251 			   btrfs_node_key_ptr_offset(dst_nritems),
3252 			   btrfs_node_key_ptr_offset(0),
3253 			   push_items * sizeof(struct btrfs_key_ptr));
3254 
3255 	if (push_items < src_nritems) {
3256 		/*
3257 		 * don't call tree_mod_log_eb_move here, key removal was already
3258 		 * fully logged by tree_mod_log_eb_copy above.
3259 		 */
3260 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3261 				      btrfs_node_key_ptr_offset(push_items),
3262 				      (src_nritems - push_items) *
3263 				      sizeof(struct btrfs_key_ptr));
3264 	}
3265 	btrfs_set_header_nritems(src, src_nritems - push_items);
3266 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3267 	btrfs_mark_buffer_dirty(src);
3268 	btrfs_mark_buffer_dirty(dst);
3269 
3270 	return ret;
3271 }
3272 
3273 /*
3274  * try to push data from one node into the next node right in the
3275  * tree.
3276  *
3277  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3278  * error, and > 0 if there was no room in the right hand block.
3279  *
3280  * this will  only push up to 1/2 the contents of the left node over
3281  */
3282 static int balance_node_right(struct btrfs_trans_handle *trans,
3283 			      struct btrfs_root *root,
3284 			      struct extent_buffer *dst,
3285 			      struct extent_buffer *src)
3286 {
3287 	int push_items = 0;
3288 	int max_push;
3289 	int src_nritems;
3290 	int dst_nritems;
3291 	int ret = 0;
3292 
3293 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3294 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3295 
3296 	src_nritems = btrfs_header_nritems(src);
3297 	dst_nritems = btrfs_header_nritems(dst);
3298 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3299 	if (push_items <= 0)
3300 		return 1;
3301 
3302 	if (src_nritems < 4)
3303 		return 1;
3304 
3305 	max_push = src_nritems / 2 + 1;
3306 	/* don't try to empty the node */
3307 	if (max_push >= src_nritems)
3308 		return 1;
3309 
3310 	if (max_push < push_items)
3311 		push_items = max_push;
3312 
3313 	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3314 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3315 				      btrfs_node_key_ptr_offset(0),
3316 				      (dst_nritems) *
3317 				      sizeof(struct btrfs_key_ptr));
3318 
3319 	ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3320 				   src_nritems - push_items, push_items);
3321 	if (ret) {
3322 		btrfs_abort_transaction(trans, root, ret);
3323 		return ret;
3324 	}
3325 	copy_extent_buffer(dst, src,
3326 			   btrfs_node_key_ptr_offset(0),
3327 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3328 			   push_items * sizeof(struct btrfs_key_ptr));
3329 
3330 	btrfs_set_header_nritems(src, src_nritems - push_items);
3331 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3332 
3333 	btrfs_mark_buffer_dirty(src);
3334 	btrfs_mark_buffer_dirty(dst);
3335 
3336 	return ret;
3337 }
3338 
3339 /*
3340  * helper function to insert a new root level in the tree.
3341  * A new node is allocated, and a single item is inserted to
3342  * point to the existing root
3343  *
3344  * returns zero on success or < 0 on failure.
3345  */
3346 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3347 			   struct btrfs_root *root,
3348 			   struct btrfs_path *path, int level)
3349 {
3350 	u64 lower_gen;
3351 	struct extent_buffer *lower;
3352 	struct extent_buffer *c;
3353 	struct extent_buffer *old;
3354 	struct btrfs_disk_key lower_key;
3355 
3356 	BUG_ON(path->nodes[level]);
3357 	BUG_ON(path->nodes[level-1] != root->node);
3358 
3359 	lower = path->nodes[level-1];
3360 	if (level == 1)
3361 		btrfs_item_key(lower, &lower_key, 0);
3362 	else
3363 		btrfs_node_key(lower, &lower_key, 0);
3364 
3365 	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3366 				   root->root_key.objectid, &lower_key,
3367 				   level, root->node->start, 0);
3368 	if (IS_ERR(c))
3369 		return PTR_ERR(c);
3370 
3371 	root_add_used(root, root->nodesize);
3372 
3373 	memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3374 	btrfs_set_header_nritems(c, 1);
3375 	btrfs_set_header_level(c, level);
3376 	btrfs_set_header_bytenr(c, c->start);
3377 	btrfs_set_header_generation(c, trans->transid);
3378 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3379 	btrfs_set_header_owner(c, root->root_key.objectid);
3380 
3381 	write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3382 			    BTRFS_FSID_SIZE);
3383 
3384 	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3385 			    btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3386 
3387 	btrfs_set_node_key(c, &lower_key, 0);
3388 	btrfs_set_node_blockptr(c, 0, lower->start);
3389 	lower_gen = btrfs_header_generation(lower);
3390 	WARN_ON(lower_gen != trans->transid);
3391 
3392 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3393 
3394 	btrfs_mark_buffer_dirty(c);
3395 
3396 	old = root->node;
3397 	tree_mod_log_set_root_pointer(root, c, 0);
3398 	rcu_assign_pointer(root->node, c);
3399 
3400 	/* the super has an extra ref to root->node */
3401 	free_extent_buffer(old);
3402 
3403 	add_root_to_dirty_list(root);
3404 	extent_buffer_get(c);
3405 	path->nodes[level] = c;
3406 	path->locks[level] = BTRFS_WRITE_LOCK;
3407 	path->slots[level] = 0;
3408 	return 0;
3409 }
3410 
3411 /*
3412  * worker function to insert a single pointer in a node.
3413  * the node should have enough room for the pointer already
3414  *
3415  * slot and level indicate where you want the key to go, and
3416  * blocknr is the block the key points to.
3417  */
3418 static void insert_ptr(struct btrfs_trans_handle *trans,
3419 		       struct btrfs_root *root, struct btrfs_path *path,
3420 		       struct btrfs_disk_key *key, u64 bytenr,
3421 		       int slot, int level)
3422 {
3423 	struct extent_buffer *lower;
3424 	int nritems;
3425 	int ret;
3426 
3427 	BUG_ON(!path->nodes[level]);
3428 	btrfs_assert_tree_locked(path->nodes[level]);
3429 	lower = path->nodes[level];
3430 	nritems = btrfs_header_nritems(lower);
3431 	BUG_ON(slot > nritems);
3432 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3433 	if (slot != nritems) {
3434 		if (level)
3435 			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3436 					     slot, nritems - slot);
3437 		memmove_extent_buffer(lower,
3438 			      btrfs_node_key_ptr_offset(slot + 1),
3439 			      btrfs_node_key_ptr_offset(slot),
3440 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3441 	}
3442 	if (level) {
3443 		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3444 					      MOD_LOG_KEY_ADD, GFP_NOFS);
3445 		BUG_ON(ret < 0);
3446 	}
3447 	btrfs_set_node_key(lower, key, slot);
3448 	btrfs_set_node_blockptr(lower, slot, bytenr);
3449 	WARN_ON(trans->transid == 0);
3450 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3451 	btrfs_set_header_nritems(lower, nritems + 1);
3452 	btrfs_mark_buffer_dirty(lower);
3453 }
3454 
3455 /*
3456  * split the node at the specified level in path in two.
3457  * The path is corrected to point to the appropriate node after the split
3458  *
3459  * Before splitting this tries to make some room in the node by pushing
3460  * left and right, if either one works, it returns right away.
3461  *
3462  * returns 0 on success and < 0 on failure
3463  */
3464 static noinline int split_node(struct btrfs_trans_handle *trans,
3465 			       struct btrfs_root *root,
3466 			       struct btrfs_path *path, int level)
3467 {
3468 	struct extent_buffer *c;
3469 	struct extent_buffer *split;
3470 	struct btrfs_disk_key disk_key;
3471 	int mid;
3472 	int ret;
3473 	u32 c_nritems;
3474 
3475 	c = path->nodes[level];
3476 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3477 	if (c == root->node) {
3478 		/*
3479 		 * trying to split the root, lets make a new one
3480 		 *
3481 		 * tree mod log: We don't log_removal old root in
3482 		 * insert_new_root, because that root buffer will be kept as a
3483 		 * normal node. We are going to log removal of half of the
3484 		 * elements below with tree_mod_log_eb_copy. We're holding a
3485 		 * tree lock on the buffer, which is why we cannot race with
3486 		 * other tree_mod_log users.
3487 		 */
3488 		ret = insert_new_root(trans, root, path, level + 1);
3489 		if (ret)
3490 			return ret;
3491 	} else {
3492 		ret = push_nodes_for_insert(trans, root, path, level);
3493 		c = path->nodes[level];
3494 		if (!ret && btrfs_header_nritems(c) <
3495 		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3496 			return 0;
3497 		if (ret < 0)
3498 			return ret;
3499 	}
3500 
3501 	c_nritems = btrfs_header_nritems(c);
3502 	mid = (c_nritems + 1) / 2;
3503 	btrfs_node_key(c, &disk_key, mid);
3504 
3505 	split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3506 					root->root_key.objectid,
3507 					&disk_key, level, c->start, 0);
3508 	if (IS_ERR(split))
3509 		return PTR_ERR(split);
3510 
3511 	root_add_used(root, root->nodesize);
3512 
3513 	memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3514 	btrfs_set_header_level(split, btrfs_header_level(c));
3515 	btrfs_set_header_bytenr(split, split->start);
3516 	btrfs_set_header_generation(split, trans->transid);
3517 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3518 	btrfs_set_header_owner(split, root->root_key.objectid);
3519 	write_extent_buffer(split, root->fs_info->fsid,
3520 			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
3521 	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3522 			    btrfs_header_chunk_tree_uuid(split),
3523 			    BTRFS_UUID_SIZE);
3524 
3525 	ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3526 				   mid, c_nritems - mid);
3527 	if (ret) {
3528 		btrfs_abort_transaction(trans, root, ret);
3529 		return ret;
3530 	}
3531 	copy_extent_buffer(split, c,
3532 			   btrfs_node_key_ptr_offset(0),
3533 			   btrfs_node_key_ptr_offset(mid),
3534 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3535 	btrfs_set_header_nritems(split, c_nritems - mid);
3536 	btrfs_set_header_nritems(c, mid);
3537 	ret = 0;
3538 
3539 	btrfs_mark_buffer_dirty(c);
3540 	btrfs_mark_buffer_dirty(split);
3541 
3542 	insert_ptr(trans, root, path, &disk_key, split->start,
3543 		   path->slots[level + 1] + 1, level + 1);
3544 
3545 	if (path->slots[level] >= mid) {
3546 		path->slots[level] -= mid;
3547 		btrfs_tree_unlock(c);
3548 		free_extent_buffer(c);
3549 		path->nodes[level] = split;
3550 		path->slots[level + 1] += 1;
3551 	} else {
3552 		btrfs_tree_unlock(split);
3553 		free_extent_buffer(split);
3554 	}
3555 	return ret;
3556 }
3557 
3558 /*
3559  * how many bytes are required to store the items in a leaf.  start
3560  * and nr indicate which items in the leaf to check.  This totals up the
3561  * space used both by the item structs and the item data
3562  */
3563 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3564 {
3565 	struct btrfs_item *start_item;
3566 	struct btrfs_item *end_item;
3567 	struct btrfs_map_token token;
3568 	int data_len;
3569 	int nritems = btrfs_header_nritems(l);
3570 	int end = min(nritems, start + nr) - 1;
3571 
3572 	if (!nr)
3573 		return 0;
3574 	btrfs_init_map_token(&token);
3575 	start_item = btrfs_item_nr(start);
3576 	end_item = btrfs_item_nr(end);
3577 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3578 		btrfs_token_item_size(l, start_item, &token);
3579 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3580 	data_len += sizeof(struct btrfs_item) * nr;
3581 	WARN_ON(data_len < 0);
3582 	return data_len;
3583 }
3584 
3585 /*
3586  * The space between the end of the leaf items and
3587  * the start of the leaf data.  IOW, how much room
3588  * the leaf has left for both items and data
3589  */
3590 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3591 				   struct extent_buffer *leaf)
3592 {
3593 	int nritems = btrfs_header_nritems(leaf);
3594 	int ret;
3595 	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3596 	if (ret < 0) {
3597 		btrfs_crit(root->fs_info,
3598 			"leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3599 		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3600 		       leaf_space_used(leaf, 0, nritems), nritems);
3601 	}
3602 	return ret;
3603 }
3604 
3605 /*
3606  * min slot controls the lowest index we're willing to push to the
3607  * right.  We'll push up to and including min_slot, but no lower
3608  */
3609 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3610 				      struct btrfs_root *root,
3611 				      struct btrfs_path *path,
3612 				      int data_size, int empty,
3613 				      struct extent_buffer *right,
3614 				      int free_space, u32 left_nritems,
3615 				      u32 min_slot)
3616 {
3617 	struct extent_buffer *left = path->nodes[0];
3618 	struct extent_buffer *upper = path->nodes[1];
3619 	struct btrfs_map_token token;
3620 	struct btrfs_disk_key disk_key;
3621 	int slot;
3622 	u32 i;
3623 	int push_space = 0;
3624 	int push_items = 0;
3625 	struct btrfs_item *item;
3626 	u32 nr;
3627 	u32 right_nritems;
3628 	u32 data_end;
3629 	u32 this_item_size;
3630 
3631 	btrfs_init_map_token(&token);
3632 
3633 	if (empty)
3634 		nr = 0;
3635 	else
3636 		nr = max_t(u32, 1, min_slot);
3637 
3638 	if (path->slots[0] >= left_nritems)
3639 		push_space += data_size;
3640 
3641 	slot = path->slots[1];
3642 	i = left_nritems - 1;
3643 	while (i >= nr) {
3644 		item = btrfs_item_nr(i);
3645 
3646 		if (!empty && push_items > 0) {
3647 			if (path->slots[0] > i)
3648 				break;
3649 			if (path->slots[0] == i) {
3650 				int space = btrfs_leaf_free_space(root, left);
3651 				if (space + push_space * 2 > free_space)
3652 					break;
3653 			}
3654 		}
3655 
3656 		if (path->slots[0] == i)
3657 			push_space += data_size;
3658 
3659 		this_item_size = btrfs_item_size(left, item);
3660 		if (this_item_size + sizeof(*item) + push_space > free_space)
3661 			break;
3662 
3663 		push_items++;
3664 		push_space += this_item_size + sizeof(*item);
3665 		if (i == 0)
3666 			break;
3667 		i--;
3668 	}
3669 
3670 	if (push_items == 0)
3671 		goto out_unlock;
3672 
3673 	WARN_ON(!empty && push_items == left_nritems);
3674 
3675 	/* push left to right */
3676 	right_nritems = btrfs_header_nritems(right);
3677 
3678 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3679 	push_space -= leaf_data_end(root, left);
3680 
3681 	/* make room in the right data area */
3682 	data_end = leaf_data_end(root, right);
3683 	memmove_extent_buffer(right,
3684 			      btrfs_leaf_data(right) + data_end - push_space,
3685 			      btrfs_leaf_data(right) + data_end,
3686 			      BTRFS_LEAF_DATA_SIZE(root) - data_end);
3687 
3688 	/* copy from the left data area */
3689 	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3690 		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
3691 		     btrfs_leaf_data(left) + leaf_data_end(root, left),
3692 		     push_space);
3693 
3694 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3695 			      btrfs_item_nr_offset(0),
3696 			      right_nritems * sizeof(struct btrfs_item));
3697 
3698 	/* copy the items from left to right */
3699 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3700 		   btrfs_item_nr_offset(left_nritems - push_items),
3701 		   push_items * sizeof(struct btrfs_item));
3702 
3703 	/* update the item pointers */
3704 	right_nritems += push_items;
3705 	btrfs_set_header_nritems(right, right_nritems);
3706 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3707 	for (i = 0; i < right_nritems; i++) {
3708 		item = btrfs_item_nr(i);
3709 		push_space -= btrfs_token_item_size(right, item, &token);
3710 		btrfs_set_token_item_offset(right, item, push_space, &token);
3711 	}
3712 
3713 	left_nritems -= push_items;
3714 	btrfs_set_header_nritems(left, left_nritems);
3715 
3716 	if (left_nritems)
3717 		btrfs_mark_buffer_dirty(left);
3718 	else
3719 		clean_tree_block(trans, root, left);
3720 
3721 	btrfs_mark_buffer_dirty(right);
3722 
3723 	btrfs_item_key(right, &disk_key, 0);
3724 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3725 	btrfs_mark_buffer_dirty(upper);
3726 
3727 	/* then fixup the leaf pointer in the path */
3728 	if (path->slots[0] >= left_nritems) {
3729 		path->slots[0] -= left_nritems;
3730 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3731 			clean_tree_block(trans, root, path->nodes[0]);
3732 		btrfs_tree_unlock(path->nodes[0]);
3733 		free_extent_buffer(path->nodes[0]);
3734 		path->nodes[0] = right;
3735 		path->slots[1] += 1;
3736 	} else {
3737 		btrfs_tree_unlock(right);
3738 		free_extent_buffer(right);
3739 	}
3740 	return 0;
3741 
3742 out_unlock:
3743 	btrfs_tree_unlock(right);
3744 	free_extent_buffer(right);
3745 	return 1;
3746 }
3747 
3748 /*
3749  * push some data in the path leaf to the right, trying to free up at
3750  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3751  *
3752  * returns 1 if the push failed because the other node didn't have enough
3753  * room, 0 if everything worked out and < 0 if there were major errors.
3754  *
3755  * this will push starting from min_slot to the end of the leaf.  It won't
3756  * push any slot lower than min_slot
3757  */
3758 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3759 			   *root, struct btrfs_path *path,
3760 			   int min_data_size, int data_size,
3761 			   int empty, u32 min_slot)
3762 {
3763 	struct extent_buffer *left = path->nodes[0];
3764 	struct extent_buffer *right;
3765 	struct extent_buffer *upper;
3766 	int slot;
3767 	int free_space;
3768 	u32 left_nritems;
3769 	int ret;
3770 
3771 	if (!path->nodes[1])
3772 		return 1;
3773 
3774 	slot = path->slots[1];
3775 	upper = path->nodes[1];
3776 	if (slot >= btrfs_header_nritems(upper) - 1)
3777 		return 1;
3778 
3779 	btrfs_assert_tree_locked(path->nodes[1]);
3780 
3781 	right = read_node_slot(root, upper, slot + 1);
3782 	if (right == NULL)
3783 		return 1;
3784 
3785 	btrfs_tree_lock(right);
3786 	btrfs_set_lock_blocking(right);
3787 
3788 	free_space = btrfs_leaf_free_space(root, right);
3789 	if (free_space < data_size)
3790 		goto out_unlock;
3791 
3792 	/* cow and double check */
3793 	ret = btrfs_cow_block(trans, root, right, upper,
3794 			      slot + 1, &right);
3795 	if (ret)
3796 		goto out_unlock;
3797 
3798 	free_space = btrfs_leaf_free_space(root, right);
3799 	if (free_space < data_size)
3800 		goto out_unlock;
3801 
3802 	left_nritems = btrfs_header_nritems(left);
3803 	if (left_nritems == 0)
3804 		goto out_unlock;
3805 
3806 	if (path->slots[0] == left_nritems && !empty) {
3807 		/* Key greater than all keys in the leaf, right neighbor has
3808 		 * enough room for it and we're not emptying our leaf to delete
3809 		 * it, therefore use right neighbor to insert the new item and
3810 		 * no need to touch/dirty our left leaft. */
3811 		btrfs_tree_unlock(left);
3812 		free_extent_buffer(left);
3813 		path->nodes[0] = right;
3814 		path->slots[0] = 0;
3815 		path->slots[1]++;
3816 		return 0;
3817 	}
3818 
3819 	return __push_leaf_right(trans, root, path, min_data_size, empty,
3820 				right, free_space, left_nritems, min_slot);
3821 out_unlock:
3822 	btrfs_tree_unlock(right);
3823 	free_extent_buffer(right);
3824 	return 1;
3825 }
3826 
3827 /*
3828  * push some data in the path leaf to the left, trying to free up at
3829  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3830  *
3831  * max_slot can put a limit on how far into the leaf we'll push items.  The
3832  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3833  * items
3834  */
3835 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3836 				     struct btrfs_root *root,
3837 				     struct btrfs_path *path, int data_size,
3838 				     int empty, struct extent_buffer *left,
3839 				     int free_space, u32 right_nritems,
3840 				     u32 max_slot)
3841 {
3842 	struct btrfs_disk_key disk_key;
3843 	struct extent_buffer *right = path->nodes[0];
3844 	int i;
3845 	int push_space = 0;
3846 	int push_items = 0;
3847 	struct btrfs_item *item;
3848 	u32 old_left_nritems;
3849 	u32 nr;
3850 	int ret = 0;
3851 	u32 this_item_size;
3852 	u32 old_left_item_size;
3853 	struct btrfs_map_token token;
3854 
3855 	btrfs_init_map_token(&token);
3856 
3857 	if (empty)
3858 		nr = min(right_nritems, max_slot);
3859 	else
3860 		nr = min(right_nritems - 1, max_slot);
3861 
3862 	for (i = 0; i < nr; i++) {
3863 		item = btrfs_item_nr(i);
3864 
3865 		if (!empty && push_items > 0) {
3866 			if (path->slots[0] < i)
3867 				break;
3868 			if (path->slots[0] == i) {
3869 				int space = btrfs_leaf_free_space(root, right);
3870 				if (space + push_space * 2 > free_space)
3871 					break;
3872 			}
3873 		}
3874 
3875 		if (path->slots[0] == i)
3876 			push_space += data_size;
3877 
3878 		this_item_size = btrfs_item_size(right, item);
3879 		if (this_item_size + sizeof(*item) + push_space > free_space)
3880 			break;
3881 
3882 		push_items++;
3883 		push_space += this_item_size + sizeof(*item);
3884 	}
3885 
3886 	if (push_items == 0) {
3887 		ret = 1;
3888 		goto out;
3889 	}
3890 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3891 
3892 	/* push data from right to left */
3893 	copy_extent_buffer(left, right,
3894 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3895 			   btrfs_item_nr_offset(0),
3896 			   push_items * sizeof(struct btrfs_item));
3897 
3898 	push_space = BTRFS_LEAF_DATA_SIZE(root) -
3899 		     btrfs_item_offset_nr(right, push_items - 1);
3900 
3901 	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3902 		     leaf_data_end(root, left) - push_space,
3903 		     btrfs_leaf_data(right) +
3904 		     btrfs_item_offset_nr(right, push_items - 1),
3905 		     push_space);
3906 	old_left_nritems = btrfs_header_nritems(left);
3907 	BUG_ON(old_left_nritems <= 0);
3908 
3909 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3910 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3911 		u32 ioff;
3912 
3913 		item = btrfs_item_nr(i);
3914 
3915 		ioff = btrfs_token_item_offset(left, item, &token);
3916 		btrfs_set_token_item_offset(left, item,
3917 		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3918 		      &token);
3919 	}
3920 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3921 
3922 	/* fixup right node */
3923 	if (push_items > right_nritems)
3924 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3925 		       right_nritems);
3926 
3927 	if (push_items < right_nritems) {
3928 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3929 						  leaf_data_end(root, right);
3930 		memmove_extent_buffer(right, btrfs_leaf_data(right) +
3931 				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
3932 				      btrfs_leaf_data(right) +
3933 				      leaf_data_end(root, right), push_space);
3934 
3935 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3936 			      btrfs_item_nr_offset(push_items),
3937 			     (btrfs_header_nritems(right) - push_items) *
3938 			     sizeof(struct btrfs_item));
3939 	}
3940 	right_nritems -= push_items;
3941 	btrfs_set_header_nritems(right, right_nritems);
3942 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3943 	for (i = 0; i < right_nritems; i++) {
3944 		item = btrfs_item_nr(i);
3945 
3946 		push_space = push_space - btrfs_token_item_size(right,
3947 								item, &token);
3948 		btrfs_set_token_item_offset(right, item, push_space, &token);
3949 	}
3950 
3951 	btrfs_mark_buffer_dirty(left);
3952 	if (right_nritems)
3953 		btrfs_mark_buffer_dirty(right);
3954 	else
3955 		clean_tree_block(trans, root, right);
3956 
3957 	btrfs_item_key(right, &disk_key, 0);
3958 	fixup_low_keys(root, path, &disk_key, 1);
3959 
3960 	/* then fixup the leaf pointer in the path */
3961 	if (path->slots[0] < push_items) {
3962 		path->slots[0] += old_left_nritems;
3963 		btrfs_tree_unlock(path->nodes[0]);
3964 		free_extent_buffer(path->nodes[0]);
3965 		path->nodes[0] = left;
3966 		path->slots[1] -= 1;
3967 	} else {
3968 		btrfs_tree_unlock(left);
3969 		free_extent_buffer(left);
3970 		path->slots[0] -= push_items;
3971 	}
3972 	BUG_ON(path->slots[0] < 0);
3973 	return ret;
3974 out:
3975 	btrfs_tree_unlock(left);
3976 	free_extent_buffer(left);
3977 	return ret;
3978 }
3979 
3980 /*
3981  * push some data in the path leaf to the left, trying to free up at
3982  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3983  *
3984  * max_slot can put a limit on how far into the leaf we'll push items.  The
3985  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3986  * items
3987  */
3988 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3989 			  *root, struct btrfs_path *path, int min_data_size,
3990 			  int data_size, int empty, u32 max_slot)
3991 {
3992 	struct extent_buffer *right = path->nodes[0];
3993 	struct extent_buffer *left;
3994 	int slot;
3995 	int free_space;
3996 	u32 right_nritems;
3997 	int ret = 0;
3998 
3999 	slot = path->slots[1];
4000 	if (slot == 0)
4001 		return 1;
4002 	if (!path->nodes[1])
4003 		return 1;
4004 
4005 	right_nritems = btrfs_header_nritems(right);
4006 	if (right_nritems == 0)
4007 		return 1;
4008 
4009 	btrfs_assert_tree_locked(path->nodes[1]);
4010 
4011 	left = read_node_slot(root, path->nodes[1], slot - 1);
4012 	if (left == NULL)
4013 		return 1;
4014 
4015 	btrfs_tree_lock(left);
4016 	btrfs_set_lock_blocking(left);
4017 
4018 	free_space = btrfs_leaf_free_space(root, left);
4019 	if (free_space < data_size) {
4020 		ret = 1;
4021 		goto out;
4022 	}
4023 
4024 	/* cow and double check */
4025 	ret = btrfs_cow_block(trans, root, left,
4026 			      path->nodes[1], slot - 1, &left);
4027 	if (ret) {
4028 		/* we hit -ENOSPC, but it isn't fatal here */
4029 		if (ret == -ENOSPC)
4030 			ret = 1;
4031 		goto out;
4032 	}
4033 
4034 	free_space = btrfs_leaf_free_space(root, left);
4035 	if (free_space < data_size) {
4036 		ret = 1;
4037 		goto out;
4038 	}
4039 
4040 	return __push_leaf_left(trans, root, path, min_data_size,
4041 			       empty, left, free_space, right_nritems,
4042 			       max_slot);
4043 out:
4044 	btrfs_tree_unlock(left);
4045 	free_extent_buffer(left);
4046 	return ret;
4047 }
4048 
4049 /*
4050  * split the path's leaf in two, making sure there is at least data_size
4051  * available for the resulting leaf level of the path.
4052  */
4053 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4054 				    struct btrfs_root *root,
4055 				    struct btrfs_path *path,
4056 				    struct extent_buffer *l,
4057 				    struct extent_buffer *right,
4058 				    int slot, int mid, int nritems)
4059 {
4060 	int data_copy_size;
4061 	int rt_data_off;
4062 	int i;
4063 	struct btrfs_disk_key disk_key;
4064 	struct btrfs_map_token token;
4065 
4066 	btrfs_init_map_token(&token);
4067 
4068 	nritems = nritems - mid;
4069 	btrfs_set_header_nritems(right, nritems);
4070 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4071 
4072 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4073 			   btrfs_item_nr_offset(mid),
4074 			   nritems * sizeof(struct btrfs_item));
4075 
4076 	copy_extent_buffer(right, l,
4077 		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4078 		     data_copy_size, btrfs_leaf_data(l) +
4079 		     leaf_data_end(root, l), data_copy_size);
4080 
4081 	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4082 		      btrfs_item_end_nr(l, mid);
4083 
4084 	for (i = 0; i < nritems; i++) {
4085 		struct btrfs_item *item = btrfs_item_nr(i);
4086 		u32 ioff;
4087 
4088 		ioff = btrfs_token_item_offset(right, item, &token);
4089 		btrfs_set_token_item_offset(right, item,
4090 					    ioff + rt_data_off, &token);
4091 	}
4092 
4093 	btrfs_set_header_nritems(l, mid);
4094 	btrfs_item_key(right, &disk_key, 0);
4095 	insert_ptr(trans, root, path, &disk_key, right->start,
4096 		   path->slots[1] + 1, 1);
4097 
4098 	btrfs_mark_buffer_dirty(right);
4099 	btrfs_mark_buffer_dirty(l);
4100 	BUG_ON(path->slots[0] != slot);
4101 
4102 	if (mid <= slot) {
4103 		btrfs_tree_unlock(path->nodes[0]);
4104 		free_extent_buffer(path->nodes[0]);
4105 		path->nodes[0] = right;
4106 		path->slots[0] -= mid;
4107 		path->slots[1] += 1;
4108 	} else {
4109 		btrfs_tree_unlock(right);
4110 		free_extent_buffer(right);
4111 	}
4112 
4113 	BUG_ON(path->slots[0] < 0);
4114 }
4115 
4116 /*
4117  * double splits happen when we need to insert a big item in the middle
4118  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4119  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4120  *          A                 B                 C
4121  *
4122  * We avoid this by trying to push the items on either side of our target
4123  * into the adjacent leaves.  If all goes well we can avoid the double split
4124  * completely.
4125  */
4126 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4127 					  struct btrfs_root *root,
4128 					  struct btrfs_path *path,
4129 					  int data_size)
4130 {
4131 	int ret;
4132 	int progress = 0;
4133 	int slot;
4134 	u32 nritems;
4135 	int space_needed = data_size;
4136 
4137 	slot = path->slots[0];
4138 	if (slot < btrfs_header_nritems(path->nodes[0]))
4139 		space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4140 
4141 	/*
4142 	 * try to push all the items after our slot into the
4143 	 * right leaf
4144 	 */
4145 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4146 	if (ret < 0)
4147 		return ret;
4148 
4149 	if (ret == 0)
4150 		progress++;
4151 
4152 	nritems = btrfs_header_nritems(path->nodes[0]);
4153 	/*
4154 	 * our goal is to get our slot at the start or end of a leaf.  If
4155 	 * we've done so we're done
4156 	 */
4157 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4158 		return 0;
4159 
4160 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4161 		return 0;
4162 
4163 	/* try to push all the items before our slot into the next leaf */
4164 	slot = path->slots[0];
4165 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4166 	if (ret < 0)
4167 		return ret;
4168 
4169 	if (ret == 0)
4170 		progress++;
4171 
4172 	if (progress)
4173 		return 0;
4174 	return 1;
4175 }
4176 
4177 /*
4178  * split the path's leaf in two, making sure there is at least data_size
4179  * available for the resulting leaf level of the path.
4180  *
4181  * returns 0 if all went well and < 0 on failure.
4182  */
4183 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4184 			       struct btrfs_root *root,
4185 			       struct btrfs_key *ins_key,
4186 			       struct btrfs_path *path, int data_size,
4187 			       int extend)
4188 {
4189 	struct btrfs_disk_key disk_key;
4190 	struct extent_buffer *l;
4191 	u32 nritems;
4192 	int mid;
4193 	int slot;
4194 	struct extent_buffer *right;
4195 	int ret = 0;
4196 	int wret;
4197 	int split;
4198 	int num_doubles = 0;
4199 	int tried_avoid_double = 0;
4200 
4201 	l = path->nodes[0];
4202 	slot = path->slots[0];
4203 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4204 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4205 		return -EOVERFLOW;
4206 
4207 	/* first try to make some room by pushing left and right */
4208 	if (data_size && path->nodes[1]) {
4209 		int space_needed = data_size;
4210 
4211 		if (slot < btrfs_header_nritems(l))
4212 			space_needed -= btrfs_leaf_free_space(root, l);
4213 
4214 		wret = push_leaf_right(trans, root, path, space_needed,
4215 				       space_needed, 0, 0);
4216 		if (wret < 0)
4217 			return wret;
4218 		if (wret) {
4219 			wret = push_leaf_left(trans, root, path, space_needed,
4220 					      space_needed, 0, (u32)-1);
4221 			if (wret < 0)
4222 				return wret;
4223 		}
4224 		l = path->nodes[0];
4225 
4226 		/* did the pushes work? */
4227 		if (btrfs_leaf_free_space(root, l) >= data_size)
4228 			return 0;
4229 	}
4230 
4231 	if (!path->nodes[1]) {
4232 		ret = insert_new_root(trans, root, path, 1);
4233 		if (ret)
4234 			return ret;
4235 	}
4236 again:
4237 	split = 1;
4238 	l = path->nodes[0];
4239 	slot = path->slots[0];
4240 	nritems = btrfs_header_nritems(l);
4241 	mid = (nritems + 1) / 2;
4242 
4243 	if (mid <= slot) {
4244 		if (nritems == 1 ||
4245 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4246 			BTRFS_LEAF_DATA_SIZE(root)) {
4247 			if (slot >= nritems) {
4248 				split = 0;
4249 			} else {
4250 				mid = slot;
4251 				if (mid != nritems &&
4252 				    leaf_space_used(l, mid, nritems - mid) +
4253 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4254 					if (data_size && !tried_avoid_double)
4255 						goto push_for_double;
4256 					split = 2;
4257 				}
4258 			}
4259 		}
4260 	} else {
4261 		if (leaf_space_used(l, 0, mid) + data_size >
4262 			BTRFS_LEAF_DATA_SIZE(root)) {
4263 			if (!extend && data_size && slot == 0) {
4264 				split = 0;
4265 			} else if ((extend || !data_size) && slot == 0) {
4266 				mid = 1;
4267 			} else {
4268 				mid = slot;
4269 				if (mid != nritems &&
4270 				    leaf_space_used(l, mid, nritems - mid) +
4271 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4272 					if (data_size && !tried_avoid_double)
4273 						goto push_for_double;
4274 					split = 2;
4275 				}
4276 			}
4277 		}
4278 	}
4279 
4280 	if (split == 0)
4281 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4282 	else
4283 		btrfs_item_key(l, &disk_key, mid);
4284 
4285 	right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4286 					root->root_key.objectid,
4287 					&disk_key, 0, l->start, 0);
4288 	if (IS_ERR(right))
4289 		return PTR_ERR(right);
4290 
4291 	root_add_used(root, root->leafsize);
4292 
4293 	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4294 	btrfs_set_header_bytenr(right, right->start);
4295 	btrfs_set_header_generation(right, trans->transid);
4296 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4297 	btrfs_set_header_owner(right, root->root_key.objectid);
4298 	btrfs_set_header_level(right, 0);
4299 	write_extent_buffer(right, root->fs_info->fsid,
4300 			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
4301 
4302 	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4303 			    btrfs_header_chunk_tree_uuid(right),
4304 			    BTRFS_UUID_SIZE);
4305 
4306 	if (split == 0) {
4307 		if (mid <= slot) {
4308 			btrfs_set_header_nritems(right, 0);
4309 			insert_ptr(trans, root, path, &disk_key, right->start,
4310 				   path->slots[1] + 1, 1);
4311 			btrfs_tree_unlock(path->nodes[0]);
4312 			free_extent_buffer(path->nodes[0]);
4313 			path->nodes[0] = right;
4314 			path->slots[0] = 0;
4315 			path->slots[1] += 1;
4316 		} else {
4317 			btrfs_set_header_nritems(right, 0);
4318 			insert_ptr(trans, root, path, &disk_key, right->start,
4319 					  path->slots[1], 1);
4320 			btrfs_tree_unlock(path->nodes[0]);
4321 			free_extent_buffer(path->nodes[0]);
4322 			path->nodes[0] = right;
4323 			path->slots[0] = 0;
4324 			if (path->slots[1] == 0)
4325 				fixup_low_keys(root, path, &disk_key, 1);
4326 		}
4327 		btrfs_mark_buffer_dirty(right);
4328 		return ret;
4329 	}
4330 
4331 	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4332 
4333 	if (split == 2) {
4334 		BUG_ON(num_doubles != 0);
4335 		num_doubles++;
4336 		goto again;
4337 	}
4338 
4339 	return 0;
4340 
4341 push_for_double:
4342 	push_for_double_split(trans, root, path, data_size);
4343 	tried_avoid_double = 1;
4344 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4345 		return 0;
4346 	goto again;
4347 }
4348 
4349 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4350 					 struct btrfs_root *root,
4351 					 struct btrfs_path *path, int ins_len)
4352 {
4353 	struct btrfs_key key;
4354 	struct extent_buffer *leaf;
4355 	struct btrfs_file_extent_item *fi;
4356 	u64 extent_len = 0;
4357 	u32 item_size;
4358 	int ret;
4359 
4360 	leaf = path->nodes[0];
4361 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4362 
4363 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4364 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4365 
4366 	if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4367 		return 0;
4368 
4369 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4370 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4371 		fi = btrfs_item_ptr(leaf, path->slots[0],
4372 				    struct btrfs_file_extent_item);
4373 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4374 	}
4375 	btrfs_release_path(path);
4376 
4377 	path->keep_locks = 1;
4378 	path->search_for_split = 1;
4379 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4380 	path->search_for_split = 0;
4381 	if (ret < 0)
4382 		goto err;
4383 
4384 	ret = -EAGAIN;
4385 	leaf = path->nodes[0];
4386 	/* if our item isn't there or got smaller, return now */
4387 	if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4388 		goto err;
4389 
4390 	/* the leaf has  changed, it now has room.  return now */
4391 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4392 		goto err;
4393 
4394 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4395 		fi = btrfs_item_ptr(leaf, path->slots[0],
4396 				    struct btrfs_file_extent_item);
4397 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4398 			goto err;
4399 	}
4400 
4401 	btrfs_set_path_blocking(path);
4402 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4403 	if (ret)
4404 		goto err;
4405 
4406 	path->keep_locks = 0;
4407 	btrfs_unlock_up_safe(path, 1);
4408 	return 0;
4409 err:
4410 	path->keep_locks = 0;
4411 	return ret;
4412 }
4413 
4414 static noinline int split_item(struct btrfs_trans_handle *trans,
4415 			       struct btrfs_root *root,
4416 			       struct btrfs_path *path,
4417 			       struct btrfs_key *new_key,
4418 			       unsigned long split_offset)
4419 {
4420 	struct extent_buffer *leaf;
4421 	struct btrfs_item *item;
4422 	struct btrfs_item *new_item;
4423 	int slot;
4424 	char *buf;
4425 	u32 nritems;
4426 	u32 item_size;
4427 	u32 orig_offset;
4428 	struct btrfs_disk_key disk_key;
4429 
4430 	leaf = path->nodes[0];
4431 	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4432 
4433 	btrfs_set_path_blocking(path);
4434 
4435 	item = btrfs_item_nr(path->slots[0]);
4436 	orig_offset = btrfs_item_offset(leaf, item);
4437 	item_size = btrfs_item_size(leaf, item);
4438 
4439 	buf = kmalloc(item_size, GFP_NOFS);
4440 	if (!buf)
4441 		return -ENOMEM;
4442 
4443 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4444 			    path->slots[0]), item_size);
4445 
4446 	slot = path->slots[0] + 1;
4447 	nritems = btrfs_header_nritems(leaf);
4448 	if (slot != nritems) {
4449 		/* shift the items */
4450 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4451 				btrfs_item_nr_offset(slot),
4452 				(nritems - slot) * sizeof(struct btrfs_item));
4453 	}
4454 
4455 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4456 	btrfs_set_item_key(leaf, &disk_key, slot);
4457 
4458 	new_item = btrfs_item_nr(slot);
4459 
4460 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4461 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4462 
4463 	btrfs_set_item_offset(leaf, item,
4464 			      orig_offset + item_size - split_offset);
4465 	btrfs_set_item_size(leaf, item, split_offset);
4466 
4467 	btrfs_set_header_nritems(leaf, nritems + 1);
4468 
4469 	/* write the data for the start of the original item */
4470 	write_extent_buffer(leaf, buf,
4471 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4472 			    split_offset);
4473 
4474 	/* write the data for the new item */
4475 	write_extent_buffer(leaf, buf + split_offset,
4476 			    btrfs_item_ptr_offset(leaf, slot),
4477 			    item_size - split_offset);
4478 	btrfs_mark_buffer_dirty(leaf);
4479 
4480 	BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4481 	kfree(buf);
4482 	return 0;
4483 }
4484 
4485 /*
4486  * This function splits a single item into two items,
4487  * giving 'new_key' to the new item and splitting the
4488  * old one at split_offset (from the start of the item).
4489  *
4490  * The path may be released by this operation.  After
4491  * the split, the path is pointing to the old item.  The
4492  * new item is going to be in the same node as the old one.
4493  *
4494  * Note, the item being split must be smaller enough to live alone on
4495  * a tree block with room for one extra struct btrfs_item
4496  *
4497  * This allows us to split the item in place, keeping a lock on the
4498  * leaf the entire time.
4499  */
4500 int btrfs_split_item(struct btrfs_trans_handle *trans,
4501 		     struct btrfs_root *root,
4502 		     struct btrfs_path *path,
4503 		     struct btrfs_key *new_key,
4504 		     unsigned long split_offset)
4505 {
4506 	int ret;
4507 	ret = setup_leaf_for_split(trans, root, path,
4508 				   sizeof(struct btrfs_item));
4509 	if (ret)
4510 		return ret;
4511 
4512 	ret = split_item(trans, root, path, new_key, split_offset);
4513 	return ret;
4514 }
4515 
4516 /*
4517  * This function duplicate a item, giving 'new_key' to the new item.
4518  * It guarantees both items live in the same tree leaf and the new item
4519  * is contiguous with the original item.
4520  *
4521  * This allows us to split file extent in place, keeping a lock on the
4522  * leaf the entire time.
4523  */
4524 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4525 			 struct btrfs_root *root,
4526 			 struct btrfs_path *path,
4527 			 struct btrfs_key *new_key)
4528 {
4529 	struct extent_buffer *leaf;
4530 	int ret;
4531 	u32 item_size;
4532 
4533 	leaf = path->nodes[0];
4534 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4535 	ret = setup_leaf_for_split(trans, root, path,
4536 				   item_size + sizeof(struct btrfs_item));
4537 	if (ret)
4538 		return ret;
4539 
4540 	path->slots[0]++;
4541 	setup_items_for_insert(root, path, new_key, &item_size,
4542 			       item_size, item_size +
4543 			       sizeof(struct btrfs_item), 1);
4544 	leaf = path->nodes[0];
4545 	memcpy_extent_buffer(leaf,
4546 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4547 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4548 			     item_size);
4549 	return 0;
4550 }
4551 
4552 /*
4553  * make the item pointed to by the path smaller.  new_size indicates
4554  * how small to make it, and from_end tells us if we just chop bytes
4555  * off the end of the item or if we shift the item to chop bytes off
4556  * the front.
4557  */
4558 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4559 			 u32 new_size, int from_end)
4560 {
4561 	int slot;
4562 	struct extent_buffer *leaf;
4563 	struct btrfs_item *item;
4564 	u32 nritems;
4565 	unsigned int data_end;
4566 	unsigned int old_data_start;
4567 	unsigned int old_size;
4568 	unsigned int size_diff;
4569 	int i;
4570 	struct btrfs_map_token token;
4571 
4572 	btrfs_init_map_token(&token);
4573 
4574 	leaf = path->nodes[0];
4575 	slot = path->slots[0];
4576 
4577 	old_size = btrfs_item_size_nr(leaf, slot);
4578 	if (old_size == new_size)
4579 		return;
4580 
4581 	nritems = btrfs_header_nritems(leaf);
4582 	data_end = leaf_data_end(root, leaf);
4583 
4584 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4585 
4586 	size_diff = old_size - new_size;
4587 
4588 	BUG_ON(slot < 0);
4589 	BUG_ON(slot >= nritems);
4590 
4591 	/*
4592 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4593 	 */
4594 	/* first correct the data pointers */
4595 	for (i = slot; i < nritems; i++) {
4596 		u32 ioff;
4597 		item = btrfs_item_nr(i);
4598 
4599 		ioff = btrfs_token_item_offset(leaf, item, &token);
4600 		btrfs_set_token_item_offset(leaf, item,
4601 					    ioff + size_diff, &token);
4602 	}
4603 
4604 	/* shift the data */
4605 	if (from_end) {
4606 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4607 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4608 			      data_end, old_data_start + new_size - data_end);
4609 	} else {
4610 		struct btrfs_disk_key disk_key;
4611 		u64 offset;
4612 
4613 		btrfs_item_key(leaf, &disk_key, slot);
4614 
4615 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4616 			unsigned long ptr;
4617 			struct btrfs_file_extent_item *fi;
4618 
4619 			fi = btrfs_item_ptr(leaf, slot,
4620 					    struct btrfs_file_extent_item);
4621 			fi = (struct btrfs_file_extent_item *)(
4622 			     (unsigned long)fi - size_diff);
4623 
4624 			if (btrfs_file_extent_type(leaf, fi) ==
4625 			    BTRFS_FILE_EXTENT_INLINE) {
4626 				ptr = btrfs_item_ptr_offset(leaf, slot);
4627 				memmove_extent_buffer(leaf, ptr,
4628 				      (unsigned long)fi,
4629 				      offsetof(struct btrfs_file_extent_item,
4630 						 disk_bytenr));
4631 			}
4632 		}
4633 
4634 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4635 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4636 			      data_end, old_data_start - data_end);
4637 
4638 		offset = btrfs_disk_key_offset(&disk_key);
4639 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4640 		btrfs_set_item_key(leaf, &disk_key, slot);
4641 		if (slot == 0)
4642 			fixup_low_keys(root, path, &disk_key, 1);
4643 	}
4644 
4645 	item = btrfs_item_nr(slot);
4646 	btrfs_set_item_size(leaf, item, new_size);
4647 	btrfs_mark_buffer_dirty(leaf);
4648 
4649 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4650 		btrfs_print_leaf(root, leaf);
4651 		BUG();
4652 	}
4653 }
4654 
4655 /*
4656  * make the item pointed to by the path bigger, data_size is the added size.
4657  */
4658 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4659 		       u32 data_size)
4660 {
4661 	int slot;
4662 	struct extent_buffer *leaf;
4663 	struct btrfs_item *item;
4664 	u32 nritems;
4665 	unsigned int data_end;
4666 	unsigned int old_data;
4667 	unsigned int old_size;
4668 	int i;
4669 	struct btrfs_map_token token;
4670 
4671 	btrfs_init_map_token(&token);
4672 
4673 	leaf = path->nodes[0];
4674 
4675 	nritems = btrfs_header_nritems(leaf);
4676 	data_end = leaf_data_end(root, leaf);
4677 
4678 	if (btrfs_leaf_free_space(root, leaf) < data_size) {
4679 		btrfs_print_leaf(root, leaf);
4680 		BUG();
4681 	}
4682 	slot = path->slots[0];
4683 	old_data = btrfs_item_end_nr(leaf, slot);
4684 
4685 	BUG_ON(slot < 0);
4686 	if (slot >= nritems) {
4687 		btrfs_print_leaf(root, leaf);
4688 		btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4689 		       slot, nritems);
4690 		BUG_ON(1);
4691 	}
4692 
4693 	/*
4694 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4695 	 */
4696 	/* first correct the data pointers */
4697 	for (i = slot; i < nritems; i++) {
4698 		u32 ioff;
4699 		item = btrfs_item_nr(i);
4700 
4701 		ioff = btrfs_token_item_offset(leaf, item, &token);
4702 		btrfs_set_token_item_offset(leaf, item,
4703 					    ioff - data_size, &token);
4704 	}
4705 
4706 	/* shift the data */
4707 	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4708 		      data_end - data_size, btrfs_leaf_data(leaf) +
4709 		      data_end, old_data - data_end);
4710 
4711 	data_end = old_data;
4712 	old_size = btrfs_item_size_nr(leaf, slot);
4713 	item = btrfs_item_nr(slot);
4714 	btrfs_set_item_size(leaf, item, old_size + data_size);
4715 	btrfs_mark_buffer_dirty(leaf);
4716 
4717 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4718 		btrfs_print_leaf(root, leaf);
4719 		BUG();
4720 	}
4721 }
4722 
4723 /*
4724  * this is a helper for btrfs_insert_empty_items, the main goal here is
4725  * to save stack depth by doing the bulk of the work in a function
4726  * that doesn't call btrfs_search_slot
4727  */
4728 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4729 			    struct btrfs_key *cpu_key, u32 *data_size,
4730 			    u32 total_data, u32 total_size, int nr)
4731 {
4732 	struct btrfs_item *item;
4733 	int i;
4734 	u32 nritems;
4735 	unsigned int data_end;
4736 	struct btrfs_disk_key disk_key;
4737 	struct extent_buffer *leaf;
4738 	int slot;
4739 	struct btrfs_map_token token;
4740 
4741 	btrfs_init_map_token(&token);
4742 
4743 	leaf = path->nodes[0];
4744 	slot = path->slots[0];
4745 
4746 	nritems = btrfs_header_nritems(leaf);
4747 	data_end = leaf_data_end(root, leaf);
4748 
4749 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
4750 		btrfs_print_leaf(root, leaf);
4751 		btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4752 		       total_size, btrfs_leaf_free_space(root, leaf));
4753 		BUG();
4754 	}
4755 
4756 	if (slot != nritems) {
4757 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4758 
4759 		if (old_data < data_end) {
4760 			btrfs_print_leaf(root, leaf);
4761 			btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4762 			       slot, old_data, data_end);
4763 			BUG_ON(1);
4764 		}
4765 		/*
4766 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4767 		 */
4768 		/* first correct the data pointers */
4769 		for (i = slot; i < nritems; i++) {
4770 			u32 ioff;
4771 
4772 			item = btrfs_item_nr( i);
4773 			ioff = btrfs_token_item_offset(leaf, item, &token);
4774 			btrfs_set_token_item_offset(leaf, item,
4775 						    ioff - total_data, &token);
4776 		}
4777 		/* shift the items */
4778 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4779 			      btrfs_item_nr_offset(slot),
4780 			      (nritems - slot) * sizeof(struct btrfs_item));
4781 
4782 		/* shift the data */
4783 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4784 			      data_end - total_data, btrfs_leaf_data(leaf) +
4785 			      data_end, old_data - data_end);
4786 		data_end = old_data;
4787 	}
4788 
4789 	/* setup the item for the new data */
4790 	for (i = 0; i < nr; i++) {
4791 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4792 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4793 		item = btrfs_item_nr(slot + i);
4794 		btrfs_set_token_item_offset(leaf, item,
4795 					    data_end - data_size[i], &token);
4796 		data_end -= data_size[i];
4797 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4798 	}
4799 
4800 	btrfs_set_header_nritems(leaf, nritems + nr);
4801 
4802 	if (slot == 0) {
4803 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4804 		fixup_low_keys(root, path, &disk_key, 1);
4805 	}
4806 	btrfs_unlock_up_safe(path, 1);
4807 	btrfs_mark_buffer_dirty(leaf);
4808 
4809 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4810 		btrfs_print_leaf(root, leaf);
4811 		BUG();
4812 	}
4813 }
4814 
4815 /*
4816  * Given a key and some data, insert items into the tree.
4817  * This does all the path init required, making room in the tree if needed.
4818  */
4819 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4820 			    struct btrfs_root *root,
4821 			    struct btrfs_path *path,
4822 			    struct btrfs_key *cpu_key, u32 *data_size,
4823 			    int nr)
4824 {
4825 	int ret = 0;
4826 	int slot;
4827 	int i;
4828 	u32 total_size = 0;
4829 	u32 total_data = 0;
4830 
4831 	for (i = 0; i < nr; i++)
4832 		total_data += data_size[i];
4833 
4834 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4835 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4836 	if (ret == 0)
4837 		return -EEXIST;
4838 	if (ret < 0)
4839 		return ret;
4840 
4841 	slot = path->slots[0];
4842 	BUG_ON(slot < 0);
4843 
4844 	setup_items_for_insert(root, path, cpu_key, data_size,
4845 			       total_data, total_size, nr);
4846 	return 0;
4847 }
4848 
4849 /*
4850  * Given a key and some data, insert an item into the tree.
4851  * This does all the path init required, making room in the tree if needed.
4852  */
4853 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4854 		      *root, struct btrfs_key *cpu_key, void *data, u32
4855 		      data_size)
4856 {
4857 	int ret = 0;
4858 	struct btrfs_path *path;
4859 	struct extent_buffer *leaf;
4860 	unsigned long ptr;
4861 
4862 	path = btrfs_alloc_path();
4863 	if (!path)
4864 		return -ENOMEM;
4865 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4866 	if (!ret) {
4867 		leaf = path->nodes[0];
4868 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4869 		write_extent_buffer(leaf, data, ptr, data_size);
4870 		btrfs_mark_buffer_dirty(leaf);
4871 	}
4872 	btrfs_free_path(path);
4873 	return ret;
4874 }
4875 
4876 /*
4877  * delete the pointer from a given node.
4878  *
4879  * the tree should have been previously balanced so the deletion does not
4880  * empty a node.
4881  */
4882 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4883 		    int level, int slot)
4884 {
4885 	struct extent_buffer *parent = path->nodes[level];
4886 	u32 nritems;
4887 	int ret;
4888 
4889 	nritems = btrfs_header_nritems(parent);
4890 	if (slot != nritems - 1) {
4891 		if (level)
4892 			tree_mod_log_eb_move(root->fs_info, parent, slot,
4893 					     slot + 1, nritems - slot - 1);
4894 		memmove_extent_buffer(parent,
4895 			      btrfs_node_key_ptr_offset(slot),
4896 			      btrfs_node_key_ptr_offset(slot + 1),
4897 			      sizeof(struct btrfs_key_ptr) *
4898 			      (nritems - slot - 1));
4899 	} else if (level) {
4900 		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4901 					      MOD_LOG_KEY_REMOVE, GFP_NOFS);
4902 		BUG_ON(ret < 0);
4903 	}
4904 
4905 	nritems--;
4906 	btrfs_set_header_nritems(parent, nritems);
4907 	if (nritems == 0 && parent == root->node) {
4908 		BUG_ON(btrfs_header_level(root->node) != 1);
4909 		/* just turn the root into a leaf and break */
4910 		btrfs_set_header_level(root->node, 0);
4911 	} else if (slot == 0) {
4912 		struct btrfs_disk_key disk_key;
4913 
4914 		btrfs_node_key(parent, &disk_key, 0);
4915 		fixup_low_keys(root, path, &disk_key, level + 1);
4916 	}
4917 	btrfs_mark_buffer_dirty(parent);
4918 }
4919 
4920 /*
4921  * a helper function to delete the leaf pointed to by path->slots[1] and
4922  * path->nodes[1].
4923  *
4924  * This deletes the pointer in path->nodes[1] and frees the leaf
4925  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4926  *
4927  * The path must have already been setup for deleting the leaf, including
4928  * all the proper balancing.  path->nodes[1] must be locked.
4929  */
4930 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4931 				    struct btrfs_root *root,
4932 				    struct btrfs_path *path,
4933 				    struct extent_buffer *leaf)
4934 {
4935 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4936 	del_ptr(root, path, 1, path->slots[1]);
4937 
4938 	/*
4939 	 * btrfs_free_extent is expensive, we want to make sure we
4940 	 * aren't holding any locks when we call it
4941 	 */
4942 	btrfs_unlock_up_safe(path, 0);
4943 
4944 	root_sub_used(root, leaf->len);
4945 
4946 	extent_buffer_get(leaf);
4947 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4948 	free_extent_buffer_stale(leaf);
4949 }
4950 /*
4951  * delete the item at the leaf level in path.  If that empties
4952  * the leaf, remove it from the tree
4953  */
4954 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4955 		    struct btrfs_path *path, int slot, int nr)
4956 {
4957 	struct extent_buffer *leaf;
4958 	struct btrfs_item *item;
4959 	int last_off;
4960 	int dsize = 0;
4961 	int ret = 0;
4962 	int wret;
4963 	int i;
4964 	u32 nritems;
4965 	struct btrfs_map_token token;
4966 
4967 	btrfs_init_map_token(&token);
4968 
4969 	leaf = path->nodes[0];
4970 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4971 
4972 	for (i = 0; i < nr; i++)
4973 		dsize += btrfs_item_size_nr(leaf, slot + i);
4974 
4975 	nritems = btrfs_header_nritems(leaf);
4976 
4977 	if (slot + nr != nritems) {
4978 		int data_end = leaf_data_end(root, leaf);
4979 
4980 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4981 			      data_end + dsize,
4982 			      btrfs_leaf_data(leaf) + data_end,
4983 			      last_off - data_end);
4984 
4985 		for (i = slot + nr; i < nritems; i++) {
4986 			u32 ioff;
4987 
4988 			item = btrfs_item_nr(i);
4989 			ioff = btrfs_token_item_offset(leaf, item, &token);
4990 			btrfs_set_token_item_offset(leaf, item,
4991 						    ioff + dsize, &token);
4992 		}
4993 
4994 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4995 			      btrfs_item_nr_offset(slot + nr),
4996 			      sizeof(struct btrfs_item) *
4997 			      (nritems - slot - nr));
4998 	}
4999 	btrfs_set_header_nritems(leaf, nritems - nr);
5000 	nritems -= nr;
5001 
5002 	/* delete the leaf if we've emptied it */
5003 	if (nritems == 0) {
5004 		if (leaf == root->node) {
5005 			btrfs_set_header_level(leaf, 0);
5006 		} else {
5007 			btrfs_set_path_blocking(path);
5008 			clean_tree_block(trans, root, leaf);
5009 			btrfs_del_leaf(trans, root, path, leaf);
5010 		}
5011 	} else {
5012 		int used = leaf_space_used(leaf, 0, nritems);
5013 		if (slot == 0) {
5014 			struct btrfs_disk_key disk_key;
5015 
5016 			btrfs_item_key(leaf, &disk_key, 0);
5017 			fixup_low_keys(root, path, &disk_key, 1);
5018 		}
5019 
5020 		/* delete the leaf if it is mostly empty */
5021 		if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5022 			/* push_leaf_left fixes the path.
5023 			 * make sure the path still points to our leaf
5024 			 * for possible call to del_ptr below
5025 			 */
5026 			slot = path->slots[1];
5027 			extent_buffer_get(leaf);
5028 
5029 			btrfs_set_path_blocking(path);
5030 			wret = push_leaf_left(trans, root, path, 1, 1,
5031 					      1, (u32)-1);
5032 			if (wret < 0 && wret != -ENOSPC)
5033 				ret = wret;
5034 
5035 			if (path->nodes[0] == leaf &&
5036 			    btrfs_header_nritems(leaf)) {
5037 				wret = push_leaf_right(trans, root, path, 1,
5038 						       1, 1, 0);
5039 				if (wret < 0 && wret != -ENOSPC)
5040 					ret = wret;
5041 			}
5042 
5043 			if (btrfs_header_nritems(leaf) == 0) {
5044 				path->slots[1] = slot;
5045 				btrfs_del_leaf(trans, root, path, leaf);
5046 				free_extent_buffer(leaf);
5047 				ret = 0;
5048 			} else {
5049 				/* if we're still in the path, make sure
5050 				 * we're dirty.  Otherwise, one of the
5051 				 * push_leaf functions must have already
5052 				 * dirtied this buffer
5053 				 */
5054 				if (path->nodes[0] == leaf)
5055 					btrfs_mark_buffer_dirty(leaf);
5056 				free_extent_buffer(leaf);
5057 			}
5058 		} else {
5059 			btrfs_mark_buffer_dirty(leaf);
5060 		}
5061 	}
5062 	return ret;
5063 }
5064 
5065 /*
5066  * search the tree again to find a leaf with lesser keys
5067  * returns 0 if it found something or 1 if there are no lesser leaves.
5068  * returns < 0 on io errors.
5069  *
5070  * This may release the path, and so you may lose any locks held at the
5071  * time you call it.
5072  */
5073 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5074 {
5075 	struct btrfs_key key;
5076 	struct btrfs_disk_key found_key;
5077 	int ret;
5078 
5079 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5080 
5081 	if (key.offset > 0) {
5082 		key.offset--;
5083 	} else if (key.type > 0) {
5084 		key.type--;
5085 		key.offset = (u64)-1;
5086 	} else if (key.objectid > 0) {
5087 		key.objectid--;
5088 		key.type = (u8)-1;
5089 		key.offset = (u64)-1;
5090 	} else {
5091 		return 1;
5092 	}
5093 
5094 	btrfs_release_path(path);
5095 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5096 	if (ret < 0)
5097 		return ret;
5098 	btrfs_item_key(path->nodes[0], &found_key, 0);
5099 	ret = comp_keys(&found_key, &key);
5100 	/*
5101 	 * We might have had an item with the previous key in the tree right
5102 	 * before we released our path. And after we released our path, that
5103 	 * item might have been pushed to the first slot (0) of the leaf we
5104 	 * were holding due to a tree balance. Alternatively, an item with the
5105 	 * previous key can exist as the only element of a leaf (big fat item).
5106 	 * Therefore account for these 2 cases, so that our callers (like
5107 	 * btrfs_previous_item) don't miss an existing item with a key matching
5108 	 * the previous key we computed above.
5109 	 */
5110 	if (ret <= 0)
5111 		return 0;
5112 	return 1;
5113 }
5114 
5115 /*
5116  * A helper function to walk down the tree starting at min_key, and looking
5117  * for nodes or leaves that are have a minimum transaction id.
5118  * This is used by the btree defrag code, and tree logging
5119  *
5120  * This does not cow, but it does stuff the starting key it finds back
5121  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5122  * key and get a writable path.
5123  *
5124  * This does lock as it descends, and path->keep_locks should be set
5125  * to 1 by the caller.
5126  *
5127  * This honors path->lowest_level to prevent descent past a given level
5128  * of the tree.
5129  *
5130  * min_trans indicates the oldest transaction that you are interested
5131  * in walking through.  Any nodes or leaves older than min_trans are
5132  * skipped over (without reading them).
5133  *
5134  * returns zero if something useful was found, < 0 on error and 1 if there
5135  * was nothing in the tree that matched the search criteria.
5136  */
5137 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5138 			 struct btrfs_path *path,
5139 			 u64 min_trans)
5140 {
5141 	struct extent_buffer *cur;
5142 	struct btrfs_key found_key;
5143 	int slot;
5144 	int sret;
5145 	u32 nritems;
5146 	int level;
5147 	int ret = 1;
5148 
5149 	WARN_ON(!path->keep_locks);
5150 again:
5151 	cur = btrfs_read_lock_root_node(root);
5152 	level = btrfs_header_level(cur);
5153 	WARN_ON(path->nodes[level]);
5154 	path->nodes[level] = cur;
5155 	path->locks[level] = BTRFS_READ_LOCK;
5156 
5157 	if (btrfs_header_generation(cur) < min_trans) {
5158 		ret = 1;
5159 		goto out;
5160 	}
5161 	while (1) {
5162 		nritems = btrfs_header_nritems(cur);
5163 		level = btrfs_header_level(cur);
5164 		sret = bin_search(cur, min_key, level, &slot);
5165 
5166 		/* at the lowest level, we're done, setup the path and exit */
5167 		if (level == path->lowest_level) {
5168 			if (slot >= nritems)
5169 				goto find_next_key;
5170 			ret = 0;
5171 			path->slots[level] = slot;
5172 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5173 			goto out;
5174 		}
5175 		if (sret && slot > 0)
5176 			slot--;
5177 		/*
5178 		 * check this node pointer against the min_trans parameters.
5179 		 * If it is too old, old, skip to the next one.
5180 		 */
5181 		while (slot < nritems) {
5182 			u64 gen;
5183 
5184 			gen = btrfs_node_ptr_generation(cur, slot);
5185 			if (gen < min_trans) {
5186 				slot++;
5187 				continue;
5188 			}
5189 			break;
5190 		}
5191 find_next_key:
5192 		/*
5193 		 * we didn't find a candidate key in this node, walk forward
5194 		 * and find another one
5195 		 */
5196 		if (slot >= nritems) {
5197 			path->slots[level] = slot;
5198 			btrfs_set_path_blocking(path);
5199 			sret = btrfs_find_next_key(root, path, min_key, level,
5200 						  min_trans);
5201 			if (sret == 0) {
5202 				btrfs_release_path(path);
5203 				goto again;
5204 			} else {
5205 				goto out;
5206 			}
5207 		}
5208 		/* save our key for returning back */
5209 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5210 		path->slots[level] = slot;
5211 		if (level == path->lowest_level) {
5212 			ret = 0;
5213 			unlock_up(path, level, 1, 0, NULL);
5214 			goto out;
5215 		}
5216 		btrfs_set_path_blocking(path);
5217 		cur = read_node_slot(root, cur, slot);
5218 		BUG_ON(!cur); /* -ENOMEM */
5219 
5220 		btrfs_tree_read_lock(cur);
5221 
5222 		path->locks[level - 1] = BTRFS_READ_LOCK;
5223 		path->nodes[level - 1] = cur;
5224 		unlock_up(path, level, 1, 0, NULL);
5225 		btrfs_clear_path_blocking(path, NULL, 0);
5226 	}
5227 out:
5228 	if (ret == 0)
5229 		memcpy(min_key, &found_key, sizeof(found_key));
5230 	btrfs_set_path_blocking(path);
5231 	return ret;
5232 }
5233 
5234 static void tree_move_down(struct btrfs_root *root,
5235 			   struct btrfs_path *path,
5236 			   int *level, int root_level)
5237 {
5238 	BUG_ON(*level == 0);
5239 	path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5240 					path->slots[*level]);
5241 	path->slots[*level - 1] = 0;
5242 	(*level)--;
5243 }
5244 
5245 static int tree_move_next_or_upnext(struct btrfs_root *root,
5246 				    struct btrfs_path *path,
5247 				    int *level, int root_level)
5248 {
5249 	int ret = 0;
5250 	int nritems;
5251 	nritems = btrfs_header_nritems(path->nodes[*level]);
5252 
5253 	path->slots[*level]++;
5254 
5255 	while (path->slots[*level] >= nritems) {
5256 		if (*level == root_level)
5257 			return -1;
5258 
5259 		/* move upnext */
5260 		path->slots[*level] = 0;
5261 		free_extent_buffer(path->nodes[*level]);
5262 		path->nodes[*level] = NULL;
5263 		(*level)++;
5264 		path->slots[*level]++;
5265 
5266 		nritems = btrfs_header_nritems(path->nodes[*level]);
5267 		ret = 1;
5268 	}
5269 	return ret;
5270 }
5271 
5272 /*
5273  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5274  * or down.
5275  */
5276 static int tree_advance(struct btrfs_root *root,
5277 			struct btrfs_path *path,
5278 			int *level, int root_level,
5279 			int allow_down,
5280 			struct btrfs_key *key)
5281 {
5282 	int ret;
5283 
5284 	if (*level == 0 || !allow_down) {
5285 		ret = tree_move_next_or_upnext(root, path, level, root_level);
5286 	} else {
5287 		tree_move_down(root, path, level, root_level);
5288 		ret = 0;
5289 	}
5290 	if (ret >= 0) {
5291 		if (*level == 0)
5292 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5293 					path->slots[*level]);
5294 		else
5295 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5296 					path->slots[*level]);
5297 	}
5298 	return ret;
5299 }
5300 
5301 static int tree_compare_item(struct btrfs_root *left_root,
5302 			     struct btrfs_path *left_path,
5303 			     struct btrfs_path *right_path,
5304 			     char *tmp_buf)
5305 {
5306 	int cmp;
5307 	int len1, len2;
5308 	unsigned long off1, off2;
5309 
5310 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5311 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5312 	if (len1 != len2)
5313 		return 1;
5314 
5315 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5316 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5317 				right_path->slots[0]);
5318 
5319 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5320 
5321 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5322 	if (cmp)
5323 		return 1;
5324 	return 0;
5325 }
5326 
5327 #define ADVANCE 1
5328 #define ADVANCE_ONLY_NEXT -1
5329 
5330 /*
5331  * This function compares two trees and calls the provided callback for
5332  * every changed/new/deleted item it finds.
5333  * If shared tree blocks are encountered, whole subtrees are skipped, making
5334  * the compare pretty fast on snapshotted subvolumes.
5335  *
5336  * This currently works on commit roots only. As commit roots are read only,
5337  * we don't do any locking. The commit roots are protected with transactions.
5338  * Transactions are ended and rejoined when a commit is tried in between.
5339  *
5340  * This function checks for modifications done to the trees while comparing.
5341  * If it detects a change, it aborts immediately.
5342  */
5343 int btrfs_compare_trees(struct btrfs_root *left_root,
5344 			struct btrfs_root *right_root,
5345 			btrfs_changed_cb_t changed_cb, void *ctx)
5346 {
5347 	int ret;
5348 	int cmp;
5349 	struct btrfs_path *left_path = NULL;
5350 	struct btrfs_path *right_path = NULL;
5351 	struct btrfs_key left_key;
5352 	struct btrfs_key right_key;
5353 	char *tmp_buf = NULL;
5354 	int left_root_level;
5355 	int right_root_level;
5356 	int left_level;
5357 	int right_level;
5358 	int left_end_reached;
5359 	int right_end_reached;
5360 	int advance_left;
5361 	int advance_right;
5362 	u64 left_blockptr;
5363 	u64 right_blockptr;
5364 	u64 left_gen;
5365 	u64 right_gen;
5366 
5367 	left_path = btrfs_alloc_path();
5368 	if (!left_path) {
5369 		ret = -ENOMEM;
5370 		goto out;
5371 	}
5372 	right_path = btrfs_alloc_path();
5373 	if (!right_path) {
5374 		ret = -ENOMEM;
5375 		goto out;
5376 	}
5377 
5378 	tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5379 	if (!tmp_buf) {
5380 		ret = -ENOMEM;
5381 		goto out;
5382 	}
5383 
5384 	left_path->search_commit_root = 1;
5385 	left_path->skip_locking = 1;
5386 	right_path->search_commit_root = 1;
5387 	right_path->skip_locking = 1;
5388 
5389 	/*
5390 	 * Strategy: Go to the first items of both trees. Then do
5391 	 *
5392 	 * If both trees are at level 0
5393 	 *   Compare keys of current items
5394 	 *     If left < right treat left item as new, advance left tree
5395 	 *       and repeat
5396 	 *     If left > right treat right item as deleted, advance right tree
5397 	 *       and repeat
5398 	 *     If left == right do deep compare of items, treat as changed if
5399 	 *       needed, advance both trees and repeat
5400 	 * If both trees are at the same level but not at level 0
5401 	 *   Compare keys of current nodes/leafs
5402 	 *     If left < right advance left tree and repeat
5403 	 *     If left > right advance right tree and repeat
5404 	 *     If left == right compare blockptrs of the next nodes/leafs
5405 	 *       If they match advance both trees but stay at the same level
5406 	 *         and repeat
5407 	 *       If they don't match advance both trees while allowing to go
5408 	 *         deeper and repeat
5409 	 * If tree levels are different
5410 	 *   Advance the tree that needs it and repeat
5411 	 *
5412 	 * Advancing a tree means:
5413 	 *   If we are at level 0, try to go to the next slot. If that's not
5414 	 *   possible, go one level up and repeat. Stop when we found a level
5415 	 *   where we could go to the next slot. We may at this point be on a
5416 	 *   node or a leaf.
5417 	 *
5418 	 *   If we are not at level 0 and not on shared tree blocks, go one
5419 	 *   level deeper.
5420 	 *
5421 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5422 	 *   the right if possible or go up and right.
5423 	 */
5424 
5425 	down_read(&left_root->fs_info->commit_root_sem);
5426 	left_level = btrfs_header_level(left_root->commit_root);
5427 	left_root_level = left_level;
5428 	left_path->nodes[left_level] = left_root->commit_root;
5429 	extent_buffer_get(left_path->nodes[left_level]);
5430 
5431 	right_level = btrfs_header_level(right_root->commit_root);
5432 	right_root_level = right_level;
5433 	right_path->nodes[right_level] = right_root->commit_root;
5434 	extent_buffer_get(right_path->nodes[right_level]);
5435 	up_read(&left_root->fs_info->commit_root_sem);
5436 
5437 	if (left_level == 0)
5438 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5439 				&left_key, left_path->slots[left_level]);
5440 	else
5441 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5442 				&left_key, left_path->slots[left_level]);
5443 	if (right_level == 0)
5444 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5445 				&right_key, right_path->slots[right_level]);
5446 	else
5447 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5448 				&right_key, right_path->slots[right_level]);
5449 
5450 	left_end_reached = right_end_reached = 0;
5451 	advance_left = advance_right = 0;
5452 
5453 	while (1) {
5454 		if (advance_left && !left_end_reached) {
5455 			ret = tree_advance(left_root, left_path, &left_level,
5456 					left_root_level,
5457 					advance_left != ADVANCE_ONLY_NEXT,
5458 					&left_key);
5459 			if (ret < 0)
5460 				left_end_reached = ADVANCE;
5461 			advance_left = 0;
5462 		}
5463 		if (advance_right && !right_end_reached) {
5464 			ret = tree_advance(right_root, right_path, &right_level,
5465 					right_root_level,
5466 					advance_right != ADVANCE_ONLY_NEXT,
5467 					&right_key);
5468 			if (ret < 0)
5469 				right_end_reached = ADVANCE;
5470 			advance_right = 0;
5471 		}
5472 
5473 		if (left_end_reached && right_end_reached) {
5474 			ret = 0;
5475 			goto out;
5476 		} else if (left_end_reached) {
5477 			if (right_level == 0) {
5478 				ret = changed_cb(left_root, right_root,
5479 						left_path, right_path,
5480 						&right_key,
5481 						BTRFS_COMPARE_TREE_DELETED,
5482 						ctx);
5483 				if (ret < 0)
5484 					goto out;
5485 			}
5486 			advance_right = ADVANCE;
5487 			continue;
5488 		} else if (right_end_reached) {
5489 			if (left_level == 0) {
5490 				ret = changed_cb(left_root, right_root,
5491 						left_path, right_path,
5492 						&left_key,
5493 						BTRFS_COMPARE_TREE_NEW,
5494 						ctx);
5495 				if (ret < 0)
5496 					goto out;
5497 			}
5498 			advance_left = ADVANCE;
5499 			continue;
5500 		}
5501 
5502 		if (left_level == 0 && right_level == 0) {
5503 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5504 			if (cmp < 0) {
5505 				ret = changed_cb(left_root, right_root,
5506 						left_path, right_path,
5507 						&left_key,
5508 						BTRFS_COMPARE_TREE_NEW,
5509 						ctx);
5510 				if (ret < 0)
5511 					goto out;
5512 				advance_left = ADVANCE;
5513 			} else if (cmp > 0) {
5514 				ret = changed_cb(left_root, right_root,
5515 						left_path, right_path,
5516 						&right_key,
5517 						BTRFS_COMPARE_TREE_DELETED,
5518 						ctx);
5519 				if (ret < 0)
5520 					goto out;
5521 				advance_right = ADVANCE;
5522 			} else {
5523 				enum btrfs_compare_tree_result cmp;
5524 
5525 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5526 				ret = tree_compare_item(left_root, left_path,
5527 						right_path, tmp_buf);
5528 				if (ret)
5529 					cmp = BTRFS_COMPARE_TREE_CHANGED;
5530 				else
5531 					cmp = BTRFS_COMPARE_TREE_SAME;
5532 				ret = changed_cb(left_root, right_root,
5533 						 left_path, right_path,
5534 						 &left_key, cmp, ctx);
5535 				if (ret < 0)
5536 					goto out;
5537 				advance_left = ADVANCE;
5538 				advance_right = ADVANCE;
5539 			}
5540 		} else if (left_level == right_level) {
5541 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5542 			if (cmp < 0) {
5543 				advance_left = ADVANCE;
5544 			} else if (cmp > 0) {
5545 				advance_right = ADVANCE;
5546 			} else {
5547 				left_blockptr = btrfs_node_blockptr(
5548 						left_path->nodes[left_level],
5549 						left_path->slots[left_level]);
5550 				right_blockptr = btrfs_node_blockptr(
5551 						right_path->nodes[right_level],
5552 						right_path->slots[right_level]);
5553 				left_gen = btrfs_node_ptr_generation(
5554 						left_path->nodes[left_level],
5555 						left_path->slots[left_level]);
5556 				right_gen = btrfs_node_ptr_generation(
5557 						right_path->nodes[right_level],
5558 						right_path->slots[right_level]);
5559 				if (left_blockptr == right_blockptr &&
5560 				    left_gen == right_gen) {
5561 					/*
5562 					 * As we're on a shared block, don't
5563 					 * allow to go deeper.
5564 					 */
5565 					advance_left = ADVANCE_ONLY_NEXT;
5566 					advance_right = ADVANCE_ONLY_NEXT;
5567 				} else {
5568 					advance_left = ADVANCE;
5569 					advance_right = ADVANCE;
5570 				}
5571 			}
5572 		} else if (left_level < right_level) {
5573 			advance_right = ADVANCE;
5574 		} else {
5575 			advance_left = ADVANCE;
5576 		}
5577 	}
5578 
5579 out:
5580 	btrfs_free_path(left_path);
5581 	btrfs_free_path(right_path);
5582 	kfree(tmp_buf);
5583 	return ret;
5584 }
5585 
5586 /*
5587  * this is similar to btrfs_next_leaf, but does not try to preserve
5588  * and fixup the path.  It looks for and returns the next key in the
5589  * tree based on the current path and the min_trans parameters.
5590  *
5591  * 0 is returned if another key is found, < 0 if there are any errors
5592  * and 1 is returned if there are no higher keys in the tree
5593  *
5594  * path->keep_locks should be set to 1 on the search made before
5595  * calling this function.
5596  */
5597 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5598 			struct btrfs_key *key, int level, u64 min_trans)
5599 {
5600 	int slot;
5601 	struct extent_buffer *c;
5602 
5603 	WARN_ON(!path->keep_locks);
5604 	while (level < BTRFS_MAX_LEVEL) {
5605 		if (!path->nodes[level])
5606 			return 1;
5607 
5608 		slot = path->slots[level] + 1;
5609 		c = path->nodes[level];
5610 next:
5611 		if (slot >= btrfs_header_nritems(c)) {
5612 			int ret;
5613 			int orig_lowest;
5614 			struct btrfs_key cur_key;
5615 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5616 			    !path->nodes[level + 1])
5617 				return 1;
5618 
5619 			if (path->locks[level + 1]) {
5620 				level++;
5621 				continue;
5622 			}
5623 
5624 			slot = btrfs_header_nritems(c) - 1;
5625 			if (level == 0)
5626 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5627 			else
5628 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5629 
5630 			orig_lowest = path->lowest_level;
5631 			btrfs_release_path(path);
5632 			path->lowest_level = level;
5633 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5634 						0, 0);
5635 			path->lowest_level = orig_lowest;
5636 			if (ret < 0)
5637 				return ret;
5638 
5639 			c = path->nodes[level];
5640 			slot = path->slots[level];
5641 			if (ret == 0)
5642 				slot++;
5643 			goto next;
5644 		}
5645 
5646 		if (level == 0)
5647 			btrfs_item_key_to_cpu(c, key, slot);
5648 		else {
5649 			u64 gen = btrfs_node_ptr_generation(c, slot);
5650 
5651 			if (gen < min_trans) {
5652 				slot++;
5653 				goto next;
5654 			}
5655 			btrfs_node_key_to_cpu(c, key, slot);
5656 		}
5657 		return 0;
5658 	}
5659 	return 1;
5660 }
5661 
5662 /*
5663  * search the tree again to find a leaf with greater keys
5664  * returns 0 if it found something or 1 if there are no greater leaves.
5665  * returns < 0 on io errors.
5666  */
5667 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5668 {
5669 	return btrfs_next_old_leaf(root, path, 0);
5670 }
5671 
5672 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5673 			u64 time_seq)
5674 {
5675 	int slot;
5676 	int level;
5677 	struct extent_buffer *c;
5678 	struct extent_buffer *next;
5679 	struct btrfs_key key;
5680 	u32 nritems;
5681 	int ret;
5682 	int old_spinning = path->leave_spinning;
5683 	int next_rw_lock = 0;
5684 
5685 	nritems = btrfs_header_nritems(path->nodes[0]);
5686 	if (nritems == 0)
5687 		return 1;
5688 
5689 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5690 again:
5691 	level = 1;
5692 	next = NULL;
5693 	next_rw_lock = 0;
5694 	btrfs_release_path(path);
5695 
5696 	path->keep_locks = 1;
5697 	path->leave_spinning = 1;
5698 
5699 	if (time_seq)
5700 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5701 	else
5702 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5703 	path->keep_locks = 0;
5704 
5705 	if (ret < 0)
5706 		return ret;
5707 
5708 	nritems = btrfs_header_nritems(path->nodes[0]);
5709 	/*
5710 	 * by releasing the path above we dropped all our locks.  A balance
5711 	 * could have added more items next to the key that used to be
5712 	 * at the very end of the block.  So, check again here and
5713 	 * advance the path if there are now more items available.
5714 	 */
5715 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5716 		if (ret == 0)
5717 			path->slots[0]++;
5718 		ret = 0;
5719 		goto done;
5720 	}
5721 	/*
5722 	 * So the above check misses one case:
5723 	 * - after releasing the path above, someone has removed the item that
5724 	 *   used to be at the very end of the block, and balance between leafs
5725 	 *   gets another one with bigger key.offset to replace it.
5726 	 *
5727 	 * This one should be returned as well, or we can get leaf corruption
5728 	 * later(esp. in __btrfs_drop_extents()).
5729 	 *
5730 	 * And a bit more explanation about this check,
5731 	 * with ret > 0, the key isn't found, the path points to the slot
5732 	 * where it should be inserted, so the path->slots[0] item must be the
5733 	 * bigger one.
5734 	 */
5735 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5736 		ret = 0;
5737 		goto done;
5738 	}
5739 
5740 	while (level < BTRFS_MAX_LEVEL) {
5741 		if (!path->nodes[level]) {
5742 			ret = 1;
5743 			goto done;
5744 		}
5745 
5746 		slot = path->slots[level] + 1;
5747 		c = path->nodes[level];
5748 		if (slot >= btrfs_header_nritems(c)) {
5749 			level++;
5750 			if (level == BTRFS_MAX_LEVEL) {
5751 				ret = 1;
5752 				goto done;
5753 			}
5754 			continue;
5755 		}
5756 
5757 		if (next) {
5758 			btrfs_tree_unlock_rw(next, next_rw_lock);
5759 			free_extent_buffer(next);
5760 		}
5761 
5762 		next = c;
5763 		next_rw_lock = path->locks[level];
5764 		ret = read_block_for_search(NULL, root, path, &next, level,
5765 					    slot, &key, 0);
5766 		if (ret == -EAGAIN)
5767 			goto again;
5768 
5769 		if (ret < 0) {
5770 			btrfs_release_path(path);
5771 			goto done;
5772 		}
5773 
5774 		if (!path->skip_locking) {
5775 			ret = btrfs_try_tree_read_lock(next);
5776 			if (!ret && time_seq) {
5777 				/*
5778 				 * If we don't get the lock, we may be racing
5779 				 * with push_leaf_left, holding that lock while
5780 				 * itself waiting for the leaf we've currently
5781 				 * locked. To solve this situation, we give up
5782 				 * on our lock and cycle.
5783 				 */
5784 				free_extent_buffer(next);
5785 				btrfs_release_path(path);
5786 				cond_resched();
5787 				goto again;
5788 			}
5789 			if (!ret) {
5790 				btrfs_set_path_blocking(path);
5791 				btrfs_tree_read_lock(next);
5792 				btrfs_clear_path_blocking(path, next,
5793 							  BTRFS_READ_LOCK);
5794 			}
5795 			next_rw_lock = BTRFS_READ_LOCK;
5796 		}
5797 		break;
5798 	}
5799 	path->slots[level] = slot;
5800 	while (1) {
5801 		level--;
5802 		c = path->nodes[level];
5803 		if (path->locks[level])
5804 			btrfs_tree_unlock_rw(c, path->locks[level]);
5805 
5806 		free_extent_buffer(c);
5807 		path->nodes[level] = next;
5808 		path->slots[level] = 0;
5809 		if (!path->skip_locking)
5810 			path->locks[level] = next_rw_lock;
5811 		if (!level)
5812 			break;
5813 
5814 		ret = read_block_for_search(NULL, root, path, &next, level,
5815 					    0, &key, 0);
5816 		if (ret == -EAGAIN)
5817 			goto again;
5818 
5819 		if (ret < 0) {
5820 			btrfs_release_path(path);
5821 			goto done;
5822 		}
5823 
5824 		if (!path->skip_locking) {
5825 			ret = btrfs_try_tree_read_lock(next);
5826 			if (!ret) {
5827 				btrfs_set_path_blocking(path);
5828 				btrfs_tree_read_lock(next);
5829 				btrfs_clear_path_blocking(path, next,
5830 							  BTRFS_READ_LOCK);
5831 			}
5832 			next_rw_lock = BTRFS_READ_LOCK;
5833 		}
5834 	}
5835 	ret = 0;
5836 done:
5837 	unlock_up(path, 0, 1, 0, NULL);
5838 	path->leave_spinning = old_spinning;
5839 	if (!old_spinning)
5840 		btrfs_set_path_blocking(path);
5841 
5842 	return ret;
5843 }
5844 
5845 /*
5846  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5847  * searching until it gets past min_objectid or finds an item of 'type'
5848  *
5849  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5850  */
5851 int btrfs_previous_item(struct btrfs_root *root,
5852 			struct btrfs_path *path, u64 min_objectid,
5853 			int type)
5854 {
5855 	struct btrfs_key found_key;
5856 	struct extent_buffer *leaf;
5857 	u32 nritems;
5858 	int ret;
5859 
5860 	while (1) {
5861 		if (path->slots[0] == 0) {
5862 			btrfs_set_path_blocking(path);
5863 			ret = btrfs_prev_leaf(root, path);
5864 			if (ret != 0)
5865 				return ret;
5866 		} else {
5867 			path->slots[0]--;
5868 		}
5869 		leaf = path->nodes[0];
5870 		nritems = btrfs_header_nritems(leaf);
5871 		if (nritems == 0)
5872 			return 1;
5873 		if (path->slots[0] == nritems)
5874 			path->slots[0]--;
5875 
5876 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5877 		if (found_key.objectid < min_objectid)
5878 			break;
5879 		if (found_key.type == type)
5880 			return 0;
5881 		if (found_key.objectid == min_objectid &&
5882 		    found_key.type < type)
5883 			break;
5884 	}
5885 	return 1;
5886 }
5887 
5888 /*
5889  * search in extent tree to find a previous Metadata/Data extent item with
5890  * min objecitd.
5891  *
5892  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5893  */
5894 int btrfs_previous_extent_item(struct btrfs_root *root,
5895 			struct btrfs_path *path, u64 min_objectid)
5896 {
5897 	struct btrfs_key found_key;
5898 	struct extent_buffer *leaf;
5899 	u32 nritems;
5900 	int ret;
5901 
5902 	while (1) {
5903 		if (path->slots[0] == 0) {
5904 			btrfs_set_path_blocking(path);
5905 			ret = btrfs_prev_leaf(root, path);
5906 			if (ret != 0)
5907 				return ret;
5908 		} else {
5909 			path->slots[0]--;
5910 		}
5911 		leaf = path->nodes[0];
5912 		nritems = btrfs_header_nritems(leaf);
5913 		if (nritems == 0)
5914 			return 1;
5915 		if (path->slots[0] == nritems)
5916 			path->slots[0]--;
5917 
5918 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5919 		if (found_key.objectid < min_objectid)
5920 			break;
5921 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5922 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5923 			return 0;
5924 		if (found_key.objectid == min_objectid &&
5925 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5926 			break;
5927 	}
5928 	return 1;
5929 }
5930