xref: /openbmc/linux/fs/btrfs/ctree.c (revision ee8a99bd)
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27 
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 		      *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 		      *root, struct btrfs_key *ins_key,
32 		      struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 			  struct btrfs_root *root, struct extent_buffer *dst,
35 			  struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 			      struct btrfs_root *root,
38 			      struct extent_buffer *dst_buf,
39 			      struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 		    int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 				 struct extent_buffer *eb);
44 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
45 
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 	struct btrfs_path *path;
49 	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 	return path;
51 }
52 
53 /*
54  * set all locked nodes in the path to blocking locks.  This should
55  * be done before scheduling
56  */
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 {
59 	int i;
60 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 		if (!p->nodes[i] || !p->locks[i])
62 			continue;
63 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 		if (p->locks[i] == BTRFS_READ_LOCK)
65 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 	}
69 }
70 
71 /*
72  * reset all the locked nodes in the patch to spinning locks.
73  *
74  * held is used to keep lockdep happy, when lockdep is enabled
75  * we set held to a blocking lock before we go around and
76  * retake all the spinlocks in the path.  You can safely use NULL
77  * for held
78  */
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 					struct extent_buffer *held, int held_rw)
81 {
82 	int i;
83 
84 #ifdef CONFIG_DEBUG_LOCK_ALLOC
85 	/* lockdep really cares that we take all of these spinlocks
86 	 * in the right order.  If any of the locks in the path are not
87 	 * currently blocking, it is going to complain.  So, make really
88 	 * really sure by forcing the path to blocking before we clear
89 	 * the path blocking.
90 	 */
91 	if (held) {
92 		btrfs_set_lock_blocking_rw(held, held_rw);
93 		if (held_rw == BTRFS_WRITE_LOCK)
94 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
95 		else if (held_rw == BTRFS_READ_LOCK)
96 			held_rw = BTRFS_READ_LOCK_BLOCKING;
97 	}
98 	btrfs_set_path_blocking(p);
99 #endif
100 
101 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
102 		if (p->nodes[i] && p->locks[i]) {
103 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
104 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
105 				p->locks[i] = BTRFS_WRITE_LOCK;
106 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
107 				p->locks[i] = BTRFS_READ_LOCK;
108 		}
109 	}
110 
111 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 	if (held)
113 		btrfs_clear_lock_blocking_rw(held, held_rw);
114 #endif
115 }
116 
117 /* this also releases the path */
118 void btrfs_free_path(struct btrfs_path *p)
119 {
120 	if (!p)
121 		return;
122 	btrfs_release_path(p);
123 	kmem_cache_free(btrfs_path_cachep, p);
124 }
125 
126 /*
127  * path release drops references on the extent buffers in the path
128  * and it drops any locks held by this path
129  *
130  * It is safe to call this on paths that no locks or extent buffers held.
131  */
132 noinline void btrfs_release_path(struct btrfs_path *p)
133 {
134 	int i;
135 
136 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
137 		p->slots[i] = 0;
138 		if (!p->nodes[i])
139 			continue;
140 		if (p->locks[i]) {
141 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
142 			p->locks[i] = 0;
143 		}
144 		free_extent_buffer(p->nodes[i]);
145 		p->nodes[i] = NULL;
146 	}
147 }
148 
149 /*
150  * safely gets a reference on the root node of a tree.  A lock
151  * is not taken, so a concurrent writer may put a different node
152  * at the root of the tree.  See btrfs_lock_root_node for the
153  * looping required.
154  *
155  * The extent buffer returned by this has a reference taken, so
156  * it won't disappear.  It may stop being the root of the tree
157  * at any time because there are no locks held.
158  */
159 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
160 {
161 	struct extent_buffer *eb;
162 
163 	while (1) {
164 		rcu_read_lock();
165 		eb = rcu_dereference(root->node);
166 
167 		/*
168 		 * RCU really hurts here, we could free up the root node because
169 		 * it was cow'ed but we may not get the new root node yet so do
170 		 * the inc_not_zero dance and if it doesn't work then
171 		 * synchronize_rcu and try again.
172 		 */
173 		if (atomic_inc_not_zero(&eb->refs)) {
174 			rcu_read_unlock();
175 			break;
176 		}
177 		rcu_read_unlock();
178 		synchronize_rcu();
179 	}
180 	return eb;
181 }
182 
183 /* loop around taking references on and locking the root node of the
184  * tree until you end up with a lock on the root.  A locked buffer
185  * is returned, with a reference held.
186  */
187 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
188 {
189 	struct extent_buffer *eb;
190 
191 	while (1) {
192 		eb = btrfs_root_node(root);
193 		btrfs_tree_lock(eb);
194 		if (eb == root->node)
195 			break;
196 		btrfs_tree_unlock(eb);
197 		free_extent_buffer(eb);
198 	}
199 	return eb;
200 }
201 
202 /* loop around taking references on and locking the root node of the
203  * tree until you end up with a lock on the root.  A locked buffer
204  * is returned, with a reference held.
205  */
206 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
207 {
208 	struct extent_buffer *eb;
209 
210 	while (1) {
211 		eb = btrfs_root_node(root);
212 		btrfs_tree_read_lock(eb);
213 		if (eb == root->node)
214 			break;
215 		btrfs_tree_read_unlock(eb);
216 		free_extent_buffer(eb);
217 	}
218 	return eb;
219 }
220 
221 /* cowonly root (everything not a reference counted cow subvolume), just get
222  * put onto a simple dirty list.  transaction.c walks this to make sure they
223  * get properly updated on disk.
224  */
225 static void add_root_to_dirty_list(struct btrfs_root *root)
226 {
227 	spin_lock(&root->fs_info->trans_lock);
228 	if (root->track_dirty && list_empty(&root->dirty_list)) {
229 		list_add(&root->dirty_list,
230 			 &root->fs_info->dirty_cowonly_roots);
231 	}
232 	spin_unlock(&root->fs_info->trans_lock);
233 }
234 
235 /*
236  * used by snapshot creation to make a copy of a root for a tree with
237  * a given objectid.  The buffer with the new root node is returned in
238  * cow_ret, and this func returns zero on success or a negative error code.
239  */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 		      struct btrfs_root *root,
242 		      struct extent_buffer *buf,
243 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 	struct extent_buffer *cow;
246 	int ret = 0;
247 	int level;
248 	struct btrfs_disk_key disk_key;
249 
250 	WARN_ON(root->ref_cows && trans->transid !=
251 		root->fs_info->running_transaction->transid);
252 	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
253 
254 	level = btrfs_header_level(buf);
255 	if (level == 0)
256 		btrfs_item_key(buf, &disk_key, 0);
257 	else
258 		btrfs_node_key(buf, &disk_key, 0);
259 
260 	cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
261 				     new_root_objectid, &disk_key, level,
262 				     buf->start, 0);
263 	if (IS_ERR(cow))
264 		return PTR_ERR(cow);
265 
266 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
267 	btrfs_set_header_bytenr(cow, cow->start);
268 	btrfs_set_header_generation(cow, trans->transid);
269 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
270 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
271 				     BTRFS_HEADER_FLAG_RELOC);
272 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
273 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 	else
275 		btrfs_set_header_owner(cow, new_root_objectid);
276 
277 	write_extent_buffer(cow, root->fs_info->fsid,
278 			    (unsigned long)btrfs_header_fsid(cow),
279 			    BTRFS_FSID_SIZE);
280 
281 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
282 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
283 		ret = btrfs_inc_ref(trans, root, cow, 1, 1);
284 	else
285 		ret = btrfs_inc_ref(trans, root, cow, 0, 1);
286 
287 	if (ret)
288 		return ret;
289 
290 	btrfs_mark_buffer_dirty(cow);
291 	*cow_ret = cow;
292 	return 0;
293 }
294 
295 enum mod_log_op {
296 	MOD_LOG_KEY_REPLACE,
297 	MOD_LOG_KEY_ADD,
298 	MOD_LOG_KEY_REMOVE,
299 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
300 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
301 	MOD_LOG_MOVE_KEYS,
302 	MOD_LOG_ROOT_REPLACE,
303 };
304 
305 struct tree_mod_move {
306 	int dst_slot;
307 	int nr_items;
308 };
309 
310 struct tree_mod_root {
311 	u64 logical;
312 	u8 level;
313 };
314 
315 struct tree_mod_elem {
316 	struct rb_node node;
317 	u64 index;		/* shifted logical */
318 	u64 seq;
319 	enum mod_log_op op;
320 
321 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 	int slot;
323 
324 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 	u64 generation;
326 
327 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
328 	struct btrfs_disk_key key;
329 	u64 blockptr;
330 
331 	/* this is used for op == MOD_LOG_MOVE_KEYS */
332 	struct tree_mod_move move;
333 
334 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
335 	struct tree_mod_root old_root;
336 };
337 
338 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
339 {
340 	read_lock(&fs_info->tree_mod_log_lock);
341 }
342 
343 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344 {
345 	read_unlock(&fs_info->tree_mod_log_lock);
346 }
347 
348 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349 {
350 	write_lock(&fs_info->tree_mod_log_lock);
351 }
352 
353 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354 {
355 	write_unlock(&fs_info->tree_mod_log_lock);
356 }
357 
358 /*
359  * Increment the upper half of tree_mod_seq, set lower half zero.
360  *
361  * Must be called with fs_info->tree_mod_seq_lock held.
362  */
363 static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
364 {
365 	u64 seq = atomic64_read(&fs_info->tree_mod_seq);
366 	seq &= 0xffffffff00000000ull;
367 	seq += 1ull << 32;
368 	atomic64_set(&fs_info->tree_mod_seq, seq);
369 	return seq;
370 }
371 
372 /*
373  * Increment the lower half of tree_mod_seq.
374  *
375  * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
376  * are generated should not technically require a spin lock here. (Rationale:
377  * incrementing the minor while incrementing the major seq number is between its
378  * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
379  * just returns a unique sequence number as usual.) We have decided to leave
380  * that requirement in here and rethink it once we notice it really imposes a
381  * problem on some workload.
382  */
383 static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
384 {
385 	return atomic64_inc_return(&fs_info->tree_mod_seq);
386 }
387 
388 /*
389  * return the last minor in the previous major tree_mod_seq number
390  */
391 u64 btrfs_tree_mod_seq_prev(u64 seq)
392 {
393 	return (seq & 0xffffffff00000000ull) - 1ull;
394 }
395 
396 /*
397  * This adds a new blocker to the tree mod log's blocker list if the @elem
398  * passed does not already have a sequence number set. So when a caller expects
399  * to record tree modifications, it should ensure to set elem->seq to zero
400  * before calling btrfs_get_tree_mod_seq.
401  * Returns a fresh, unused tree log modification sequence number, even if no new
402  * blocker was added.
403  */
404 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
405 			   struct seq_list *elem)
406 {
407 	u64 seq;
408 
409 	tree_mod_log_write_lock(fs_info);
410 	spin_lock(&fs_info->tree_mod_seq_lock);
411 	if (!elem->seq) {
412 		elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
413 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
414 	}
415 	seq = btrfs_inc_tree_mod_seq_minor(fs_info);
416 	spin_unlock(&fs_info->tree_mod_seq_lock);
417 	tree_mod_log_write_unlock(fs_info);
418 
419 	return seq;
420 }
421 
422 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
423 			    struct seq_list *elem)
424 {
425 	struct rb_root *tm_root;
426 	struct rb_node *node;
427 	struct rb_node *next;
428 	struct seq_list *cur_elem;
429 	struct tree_mod_elem *tm;
430 	u64 min_seq = (u64)-1;
431 	u64 seq_putting = elem->seq;
432 
433 	if (!seq_putting)
434 		return;
435 
436 	spin_lock(&fs_info->tree_mod_seq_lock);
437 	list_del(&elem->list);
438 	elem->seq = 0;
439 
440 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
441 		if (cur_elem->seq < min_seq) {
442 			if (seq_putting > cur_elem->seq) {
443 				/*
444 				 * blocker with lower sequence number exists, we
445 				 * cannot remove anything from the log
446 				 */
447 				spin_unlock(&fs_info->tree_mod_seq_lock);
448 				return;
449 			}
450 			min_seq = cur_elem->seq;
451 		}
452 	}
453 	spin_unlock(&fs_info->tree_mod_seq_lock);
454 
455 	/*
456 	 * anything that's lower than the lowest existing (read: blocked)
457 	 * sequence number can be removed from the tree.
458 	 */
459 	tree_mod_log_write_lock(fs_info);
460 	tm_root = &fs_info->tree_mod_log;
461 	for (node = rb_first(tm_root); node; node = next) {
462 		next = rb_next(node);
463 		tm = container_of(node, struct tree_mod_elem, node);
464 		if (tm->seq > min_seq)
465 			continue;
466 		rb_erase(node, tm_root);
467 		kfree(tm);
468 	}
469 	tree_mod_log_write_unlock(fs_info);
470 }
471 
472 /*
473  * key order of the log:
474  *       index -> sequence
475  *
476  * the index is the shifted logical of the *new* root node for root replace
477  * operations, or the shifted logical of the affected block for all other
478  * operations.
479  */
480 static noinline int
481 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
482 {
483 	struct rb_root *tm_root;
484 	struct rb_node **new;
485 	struct rb_node *parent = NULL;
486 	struct tree_mod_elem *cur;
487 
488 	BUG_ON(!tm || !tm->seq);
489 
490 	tm_root = &fs_info->tree_mod_log;
491 	new = &tm_root->rb_node;
492 	while (*new) {
493 		cur = container_of(*new, struct tree_mod_elem, node);
494 		parent = *new;
495 		if (cur->index < tm->index)
496 			new = &((*new)->rb_left);
497 		else if (cur->index > tm->index)
498 			new = &((*new)->rb_right);
499 		else if (cur->seq < tm->seq)
500 			new = &((*new)->rb_left);
501 		else if (cur->seq > tm->seq)
502 			new = &((*new)->rb_right);
503 		else {
504 			kfree(tm);
505 			return -EEXIST;
506 		}
507 	}
508 
509 	rb_link_node(&tm->node, parent, new);
510 	rb_insert_color(&tm->node, tm_root);
511 	return 0;
512 }
513 
514 /*
515  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
516  * returns zero with the tree_mod_log_lock acquired. The caller must hold
517  * this until all tree mod log insertions are recorded in the rb tree and then
518  * call tree_mod_log_write_unlock() to release.
519  */
520 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
521 				    struct extent_buffer *eb) {
522 	smp_mb();
523 	if (list_empty(&(fs_info)->tree_mod_seq_list))
524 		return 1;
525 	if (eb && btrfs_header_level(eb) == 0)
526 		return 1;
527 
528 	tree_mod_log_write_lock(fs_info);
529 	if (list_empty(&fs_info->tree_mod_seq_list)) {
530 		/*
531 		 * someone emptied the list while we were waiting for the lock.
532 		 * we must not add to the list when no blocker exists.
533 		 */
534 		tree_mod_log_write_unlock(fs_info);
535 		return 1;
536 	}
537 
538 	return 0;
539 }
540 
541 /*
542  * This allocates memory and gets a tree modification sequence number.
543  *
544  * Returns <0 on error.
545  * Returns >0 (the added sequence number) on success.
546  */
547 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
548 				 struct tree_mod_elem **tm_ret)
549 {
550 	struct tree_mod_elem *tm;
551 
552 	/*
553 	 * once we switch from spin locks to something different, we should
554 	 * honor the flags parameter here.
555 	 */
556 	tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
557 	if (!tm)
558 		return -ENOMEM;
559 
560 	spin_lock(&fs_info->tree_mod_seq_lock);
561 	tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
562 	spin_unlock(&fs_info->tree_mod_seq_lock);
563 
564 	return tm->seq;
565 }
566 
567 static inline int
568 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
569 			  struct extent_buffer *eb, int slot,
570 			  enum mod_log_op op, gfp_t flags)
571 {
572 	int ret;
573 	struct tree_mod_elem *tm;
574 
575 	ret = tree_mod_alloc(fs_info, flags, &tm);
576 	if (ret < 0)
577 		return ret;
578 
579 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
580 	if (op != MOD_LOG_KEY_ADD) {
581 		btrfs_node_key(eb, &tm->key, slot);
582 		tm->blockptr = btrfs_node_blockptr(eb, slot);
583 	}
584 	tm->op = op;
585 	tm->slot = slot;
586 	tm->generation = btrfs_node_ptr_generation(eb, slot);
587 
588 	return __tree_mod_log_insert(fs_info, tm);
589 }
590 
591 static noinline int
592 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
593 			     struct extent_buffer *eb, int slot,
594 			     enum mod_log_op op, gfp_t flags)
595 {
596 	int ret;
597 
598 	if (tree_mod_dont_log(fs_info, eb))
599 		return 0;
600 
601 	ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
602 
603 	tree_mod_log_write_unlock(fs_info);
604 	return ret;
605 }
606 
607 static noinline int
608 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
609 			int slot, enum mod_log_op op)
610 {
611 	return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
612 }
613 
614 static noinline int
615 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
616 			     struct extent_buffer *eb, int slot,
617 			     enum mod_log_op op)
618 {
619 	return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
620 }
621 
622 static noinline int
623 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
624 			 struct extent_buffer *eb, int dst_slot, int src_slot,
625 			 int nr_items, gfp_t flags)
626 {
627 	struct tree_mod_elem *tm;
628 	int ret;
629 	int i;
630 
631 	if (tree_mod_dont_log(fs_info, eb))
632 		return 0;
633 
634 	/*
635 	 * When we override something during the move, we log these removals.
636 	 * This can only happen when we move towards the beginning of the
637 	 * buffer, i.e. dst_slot < src_slot.
638 	 */
639 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
640 		ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
641 					      MOD_LOG_KEY_REMOVE_WHILE_MOVING);
642 		BUG_ON(ret < 0);
643 	}
644 
645 	ret = tree_mod_alloc(fs_info, flags, &tm);
646 	if (ret < 0)
647 		goto out;
648 
649 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
650 	tm->slot = src_slot;
651 	tm->move.dst_slot = dst_slot;
652 	tm->move.nr_items = nr_items;
653 	tm->op = MOD_LOG_MOVE_KEYS;
654 
655 	ret = __tree_mod_log_insert(fs_info, tm);
656 out:
657 	tree_mod_log_write_unlock(fs_info);
658 	return ret;
659 }
660 
661 static inline void
662 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
663 {
664 	int i;
665 	u32 nritems;
666 	int ret;
667 
668 	if (btrfs_header_level(eb) == 0)
669 		return;
670 
671 	nritems = btrfs_header_nritems(eb);
672 	for (i = nritems - 1; i >= 0; i--) {
673 		ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
674 					      MOD_LOG_KEY_REMOVE_WHILE_FREEING);
675 		BUG_ON(ret < 0);
676 	}
677 }
678 
679 static noinline int
680 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
681 			 struct extent_buffer *old_root,
682 			 struct extent_buffer *new_root, gfp_t flags,
683 			 int log_removal)
684 {
685 	struct tree_mod_elem *tm;
686 	int ret;
687 
688 	if (tree_mod_dont_log(fs_info, NULL))
689 		return 0;
690 
691 	if (log_removal)
692 		__tree_mod_log_free_eb(fs_info, old_root);
693 
694 	ret = tree_mod_alloc(fs_info, flags, &tm);
695 	if (ret < 0)
696 		goto out;
697 
698 	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
699 	tm->old_root.logical = old_root->start;
700 	tm->old_root.level = btrfs_header_level(old_root);
701 	tm->generation = btrfs_header_generation(old_root);
702 	tm->op = MOD_LOG_ROOT_REPLACE;
703 
704 	ret = __tree_mod_log_insert(fs_info, tm);
705 out:
706 	tree_mod_log_write_unlock(fs_info);
707 	return ret;
708 }
709 
710 static struct tree_mod_elem *
711 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
712 		      int smallest)
713 {
714 	struct rb_root *tm_root;
715 	struct rb_node *node;
716 	struct tree_mod_elem *cur = NULL;
717 	struct tree_mod_elem *found = NULL;
718 	u64 index = start >> PAGE_CACHE_SHIFT;
719 
720 	tree_mod_log_read_lock(fs_info);
721 	tm_root = &fs_info->tree_mod_log;
722 	node = tm_root->rb_node;
723 	while (node) {
724 		cur = container_of(node, struct tree_mod_elem, node);
725 		if (cur->index < index) {
726 			node = node->rb_left;
727 		} else if (cur->index > index) {
728 			node = node->rb_right;
729 		} else if (cur->seq < min_seq) {
730 			node = node->rb_left;
731 		} else if (!smallest) {
732 			/* we want the node with the highest seq */
733 			if (found)
734 				BUG_ON(found->seq > cur->seq);
735 			found = cur;
736 			node = node->rb_left;
737 		} else if (cur->seq > min_seq) {
738 			/* we want the node with the smallest seq */
739 			if (found)
740 				BUG_ON(found->seq < cur->seq);
741 			found = cur;
742 			node = node->rb_right;
743 		} else {
744 			found = cur;
745 			break;
746 		}
747 	}
748 	tree_mod_log_read_unlock(fs_info);
749 
750 	return found;
751 }
752 
753 /*
754  * this returns the element from the log with the smallest time sequence
755  * value that's in the log (the oldest log item). any element with a time
756  * sequence lower than min_seq will be ignored.
757  */
758 static struct tree_mod_elem *
759 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
760 			   u64 min_seq)
761 {
762 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
763 }
764 
765 /*
766  * this returns the element from the log with the largest time sequence
767  * value that's in the log (the most recent log item). any element with
768  * a time sequence lower than min_seq will be ignored.
769  */
770 static struct tree_mod_elem *
771 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
772 {
773 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
774 }
775 
776 static noinline void
777 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
778 		     struct extent_buffer *src, unsigned long dst_offset,
779 		     unsigned long src_offset, int nr_items)
780 {
781 	int ret;
782 	int i;
783 
784 	if (tree_mod_dont_log(fs_info, NULL))
785 		return;
786 
787 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
788 		tree_mod_log_write_unlock(fs_info);
789 		return;
790 	}
791 
792 	for (i = 0; i < nr_items; i++) {
793 		ret = tree_mod_log_insert_key_locked(fs_info, src,
794 						i + src_offset,
795 						MOD_LOG_KEY_REMOVE);
796 		BUG_ON(ret < 0);
797 		ret = tree_mod_log_insert_key_locked(fs_info, dst,
798 						     i + dst_offset,
799 						     MOD_LOG_KEY_ADD);
800 		BUG_ON(ret < 0);
801 	}
802 
803 	tree_mod_log_write_unlock(fs_info);
804 }
805 
806 static inline void
807 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
808 		     int dst_offset, int src_offset, int nr_items)
809 {
810 	int ret;
811 	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
812 				       nr_items, GFP_NOFS);
813 	BUG_ON(ret < 0);
814 }
815 
816 static noinline void
817 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
818 			  struct extent_buffer *eb, int slot, int atomic)
819 {
820 	int ret;
821 
822 	ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
823 					   MOD_LOG_KEY_REPLACE,
824 					   atomic ? GFP_ATOMIC : GFP_NOFS);
825 	BUG_ON(ret < 0);
826 }
827 
828 static noinline void
829 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
830 {
831 	if (tree_mod_dont_log(fs_info, eb))
832 		return;
833 
834 	__tree_mod_log_free_eb(fs_info, eb);
835 
836 	tree_mod_log_write_unlock(fs_info);
837 }
838 
839 static noinline void
840 tree_mod_log_set_root_pointer(struct btrfs_root *root,
841 			      struct extent_buffer *new_root_node,
842 			      int log_removal)
843 {
844 	int ret;
845 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
846 				       new_root_node, GFP_NOFS, log_removal);
847 	BUG_ON(ret < 0);
848 }
849 
850 /*
851  * check if the tree block can be shared by multiple trees
852  */
853 int btrfs_block_can_be_shared(struct btrfs_root *root,
854 			      struct extent_buffer *buf)
855 {
856 	/*
857 	 * Tree blocks not in refernece counted trees and tree roots
858 	 * are never shared. If a block was allocated after the last
859 	 * snapshot and the block was not allocated by tree relocation,
860 	 * we know the block is not shared.
861 	 */
862 	if (root->ref_cows &&
863 	    buf != root->node && buf != root->commit_root &&
864 	    (btrfs_header_generation(buf) <=
865 	     btrfs_root_last_snapshot(&root->root_item) ||
866 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
867 		return 1;
868 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
869 	if (root->ref_cows &&
870 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
871 		return 1;
872 #endif
873 	return 0;
874 }
875 
876 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
877 				       struct btrfs_root *root,
878 				       struct extent_buffer *buf,
879 				       struct extent_buffer *cow,
880 				       int *last_ref)
881 {
882 	u64 refs;
883 	u64 owner;
884 	u64 flags;
885 	u64 new_flags = 0;
886 	int ret;
887 
888 	/*
889 	 * Backrefs update rules:
890 	 *
891 	 * Always use full backrefs for extent pointers in tree block
892 	 * allocated by tree relocation.
893 	 *
894 	 * If a shared tree block is no longer referenced by its owner
895 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
896 	 * use full backrefs for extent pointers in tree block.
897 	 *
898 	 * If a tree block is been relocating
899 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
900 	 * use full backrefs for extent pointers in tree block.
901 	 * The reason for this is some operations (such as drop tree)
902 	 * are only allowed for blocks use full backrefs.
903 	 */
904 
905 	if (btrfs_block_can_be_shared(root, buf)) {
906 		ret = btrfs_lookup_extent_info(trans, root, buf->start,
907 					       btrfs_header_level(buf), 1,
908 					       &refs, &flags);
909 		if (ret)
910 			return ret;
911 		if (refs == 0) {
912 			ret = -EROFS;
913 			btrfs_std_error(root->fs_info, ret);
914 			return ret;
915 		}
916 	} else {
917 		refs = 1;
918 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
919 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
920 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
921 		else
922 			flags = 0;
923 	}
924 
925 	owner = btrfs_header_owner(buf);
926 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
927 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
928 
929 	if (refs > 1) {
930 		if ((owner == root->root_key.objectid ||
931 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
932 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
933 			ret = btrfs_inc_ref(trans, root, buf, 1, 1);
934 			BUG_ON(ret); /* -ENOMEM */
935 
936 			if (root->root_key.objectid ==
937 			    BTRFS_TREE_RELOC_OBJECTID) {
938 				ret = btrfs_dec_ref(trans, root, buf, 0, 1);
939 				BUG_ON(ret); /* -ENOMEM */
940 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
941 				BUG_ON(ret); /* -ENOMEM */
942 			}
943 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
944 		} else {
945 
946 			if (root->root_key.objectid ==
947 			    BTRFS_TREE_RELOC_OBJECTID)
948 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
949 			else
950 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
951 			BUG_ON(ret); /* -ENOMEM */
952 		}
953 		if (new_flags != 0) {
954 			int level = btrfs_header_level(buf);
955 
956 			ret = btrfs_set_disk_extent_flags(trans, root,
957 							  buf->start,
958 							  buf->len,
959 							  new_flags, level, 0);
960 			if (ret)
961 				return ret;
962 		}
963 	} else {
964 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
965 			if (root->root_key.objectid ==
966 			    BTRFS_TREE_RELOC_OBJECTID)
967 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
968 			else
969 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
970 			BUG_ON(ret); /* -ENOMEM */
971 			ret = btrfs_dec_ref(trans, root, buf, 1, 1);
972 			BUG_ON(ret); /* -ENOMEM */
973 		}
974 		clean_tree_block(trans, root, buf);
975 		*last_ref = 1;
976 	}
977 	return 0;
978 }
979 
980 /*
981  * does the dirty work in cow of a single block.  The parent block (if
982  * supplied) is updated to point to the new cow copy.  The new buffer is marked
983  * dirty and returned locked.  If you modify the block it needs to be marked
984  * dirty again.
985  *
986  * search_start -- an allocation hint for the new block
987  *
988  * empty_size -- a hint that you plan on doing more cow.  This is the size in
989  * bytes the allocator should try to find free next to the block it returns.
990  * This is just a hint and may be ignored by the allocator.
991  */
992 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
993 			     struct btrfs_root *root,
994 			     struct extent_buffer *buf,
995 			     struct extent_buffer *parent, int parent_slot,
996 			     struct extent_buffer **cow_ret,
997 			     u64 search_start, u64 empty_size)
998 {
999 	struct btrfs_disk_key disk_key;
1000 	struct extent_buffer *cow;
1001 	int level, ret;
1002 	int last_ref = 0;
1003 	int unlock_orig = 0;
1004 	u64 parent_start;
1005 
1006 	if (*cow_ret == buf)
1007 		unlock_orig = 1;
1008 
1009 	btrfs_assert_tree_locked(buf);
1010 
1011 	WARN_ON(root->ref_cows && trans->transid !=
1012 		root->fs_info->running_transaction->transid);
1013 	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
1014 
1015 	level = btrfs_header_level(buf);
1016 
1017 	if (level == 0)
1018 		btrfs_item_key(buf, &disk_key, 0);
1019 	else
1020 		btrfs_node_key(buf, &disk_key, 0);
1021 
1022 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1023 		if (parent)
1024 			parent_start = parent->start;
1025 		else
1026 			parent_start = 0;
1027 	} else
1028 		parent_start = 0;
1029 
1030 	cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
1031 				     root->root_key.objectid, &disk_key,
1032 				     level, search_start, empty_size);
1033 	if (IS_ERR(cow))
1034 		return PTR_ERR(cow);
1035 
1036 	/* cow is set to blocking by btrfs_init_new_buffer */
1037 
1038 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
1039 	btrfs_set_header_bytenr(cow, cow->start);
1040 	btrfs_set_header_generation(cow, trans->transid);
1041 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1042 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1043 				     BTRFS_HEADER_FLAG_RELOC);
1044 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1045 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1046 	else
1047 		btrfs_set_header_owner(cow, root->root_key.objectid);
1048 
1049 	write_extent_buffer(cow, root->fs_info->fsid,
1050 			    (unsigned long)btrfs_header_fsid(cow),
1051 			    BTRFS_FSID_SIZE);
1052 
1053 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1054 	if (ret) {
1055 		btrfs_abort_transaction(trans, root, ret);
1056 		return ret;
1057 	}
1058 
1059 	if (root->ref_cows)
1060 		btrfs_reloc_cow_block(trans, root, buf, cow);
1061 
1062 	if (buf == root->node) {
1063 		WARN_ON(parent && parent != buf);
1064 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1065 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1066 			parent_start = buf->start;
1067 		else
1068 			parent_start = 0;
1069 
1070 		extent_buffer_get(cow);
1071 		tree_mod_log_set_root_pointer(root, cow, 1);
1072 		rcu_assign_pointer(root->node, cow);
1073 
1074 		btrfs_free_tree_block(trans, root, buf, parent_start,
1075 				      last_ref);
1076 		free_extent_buffer(buf);
1077 		add_root_to_dirty_list(root);
1078 	} else {
1079 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1080 			parent_start = parent->start;
1081 		else
1082 			parent_start = 0;
1083 
1084 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1085 		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1086 					MOD_LOG_KEY_REPLACE);
1087 		btrfs_set_node_blockptr(parent, parent_slot,
1088 					cow->start);
1089 		btrfs_set_node_ptr_generation(parent, parent_slot,
1090 					      trans->transid);
1091 		btrfs_mark_buffer_dirty(parent);
1092 		if (last_ref)
1093 			tree_mod_log_free_eb(root->fs_info, buf);
1094 		btrfs_free_tree_block(trans, root, buf, parent_start,
1095 				      last_ref);
1096 	}
1097 	if (unlock_orig)
1098 		btrfs_tree_unlock(buf);
1099 	free_extent_buffer_stale(buf);
1100 	btrfs_mark_buffer_dirty(cow);
1101 	*cow_ret = cow;
1102 	return 0;
1103 }
1104 
1105 /*
1106  * returns the logical address of the oldest predecessor of the given root.
1107  * entries older than time_seq are ignored.
1108  */
1109 static struct tree_mod_elem *
1110 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1111 			   struct extent_buffer *eb_root, u64 time_seq)
1112 {
1113 	struct tree_mod_elem *tm;
1114 	struct tree_mod_elem *found = NULL;
1115 	u64 root_logical = eb_root->start;
1116 	int looped = 0;
1117 
1118 	if (!time_seq)
1119 		return 0;
1120 
1121 	/*
1122 	 * the very last operation that's logged for a root is the replacement
1123 	 * operation (if it is replaced at all). this has the index of the *new*
1124 	 * root, making it the very first operation that's logged for this root.
1125 	 */
1126 	while (1) {
1127 		tm = tree_mod_log_search_oldest(fs_info, root_logical,
1128 						time_seq);
1129 		if (!looped && !tm)
1130 			return 0;
1131 		/*
1132 		 * if there are no tree operation for the oldest root, we simply
1133 		 * return it. this should only happen if that (old) root is at
1134 		 * level 0.
1135 		 */
1136 		if (!tm)
1137 			break;
1138 
1139 		/*
1140 		 * if there's an operation that's not a root replacement, we
1141 		 * found the oldest version of our root. normally, we'll find a
1142 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1143 		 */
1144 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1145 			break;
1146 
1147 		found = tm;
1148 		root_logical = tm->old_root.logical;
1149 		looped = 1;
1150 	}
1151 
1152 	/* if there's no old root to return, return what we found instead */
1153 	if (!found)
1154 		found = tm;
1155 
1156 	return found;
1157 }
1158 
1159 /*
1160  * tm is a pointer to the first operation to rewind within eb. then, all
1161  * previous operations will be rewinded (until we reach something older than
1162  * time_seq).
1163  */
1164 static void
1165 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1166 		      u64 time_seq, struct tree_mod_elem *first_tm)
1167 {
1168 	u32 n;
1169 	struct rb_node *next;
1170 	struct tree_mod_elem *tm = first_tm;
1171 	unsigned long o_dst;
1172 	unsigned long o_src;
1173 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1174 
1175 	n = btrfs_header_nritems(eb);
1176 	tree_mod_log_read_lock(fs_info);
1177 	while (tm && tm->seq >= time_seq) {
1178 		/*
1179 		 * all the operations are recorded with the operator used for
1180 		 * the modification. as we're going backwards, we do the
1181 		 * opposite of each operation here.
1182 		 */
1183 		switch (tm->op) {
1184 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1185 			BUG_ON(tm->slot < n);
1186 			/* Fallthrough */
1187 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1188 		case MOD_LOG_KEY_REMOVE:
1189 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1190 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1191 			btrfs_set_node_ptr_generation(eb, tm->slot,
1192 						      tm->generation);
1193 			n++;
1194 			break;
1195 		case MOD_LOG_KEY_REPLACE:
1196 			BUG_ON(tm->slot >= n);
1197 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1198 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1199 			btrfs_set_node_ptr_generation(eb, tm->slot,
1200 						      tm->generation);
1201 			break;
1202 		case MOD_LOG_KEY_ADD:
1203 			/* if a move operation is needed it's in the log */
1204 			n--;
1205 			break;
1206 		case MOD_LOG_MOVE_KEYS:
1207 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1208 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1209 			memmove_extent_buffer(eb, o_dst, o_src,
1210 					      tm->move.nr_items * p_size);
1211 			break;
1212 		case MOD_LOG_ROOT_REPLACE:
1213 			/*
1214 			 * this operation is special. for roots, this must be
1215 			 * handled explicitly before rewinding.
1216 			 * for non-roots, this operation may exist if the node
1217 			 * was a root: root A -> child B; then A gets empty and
1218 			 * B is promoted to the new root. in the mod log, we'll
1219 			 * have a root-replace operation for B, a tree block
1220 			 * that is no root. we simply ignore that operation.
1221 			 */
1222 			break;
1223 		}
1224 		next = rb_next(&tm->node);
1225 		if (!next)
1226 			break;
1227 		tm = container_of(next, struct tree_mod_elem, node);
1228 		if (tm->index != first_tm->index)
1229 			break;
1230 	}
1231 	tree_mod_log_read_unlock(fs_info);
1232 	btrfs_set_header_nritems(eb, n);
1233 }
1234 
1235 /*
1236  * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1237  * is returned. If rewind operations happen, a fresh buffer is returned. The
1238  * returned buffer is always read-locked. If the returned buffer is not the
1239  * input buffer, the lock on the input buffer is released and the input buffer
1240  * is freed (its refcount is decremented).
1241  */
1242 static struct extent_buffer *
1243 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1244 		    u64 time_seq)
1245 {
1246 	struct extent_buffer *eb_rewin;
1247 	struct tree_mod_elem *tm;
1248 
1249 	if (!time_seq)
1250 		return eb;
1251 
1252 	if (btrfs_header_level(eb) == 0)
1253 		return eb;
1254 
1255 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1256 	if (!tm)
1257 		return eb;
1258 
1259 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1260 		BUG_ON(tm->slot != 0);
1261 		eb_rewin = alloc_dummy_extent_buffer(eb->start,
1262 						fs_info->tree_root->nodesize);
1263 		BUG_ON(!eb_rewin);
1264 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1265 		btrfs_set_header_backref_rev(eb_rewin,
1266 					     btrfs_header_backref_rev(eb));
1267 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1268 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1269 	} else {
1270 		eb_rewin = btrfs_clone_extent_buffer(eb);
1271 		BUG_ON(!eb_rewin);
1272 	}
1273 
1274 	btrfs_tree_read_unlock(eb);
1275 	free_extent_buffer(eb);
1276 
1277 	extent_buffer_get(eb_rewin);
1278 	btrfs_tree_read_lock(eb_rewin);
1279 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1280 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1281 		BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1282 
1283 	return eb_rewin;
1284 }
1285 
1286 /*
1287  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1288  * value. If there are no changes, the current root->root_node is returned. If
1289  * anything changed in between, there's a fresh buffer allocated on which the
1290  * rewind operations are done. In any case, the returned buffer is read locked.
1291  * Returns NULL on error (with no locks held).
1292  */
1293 static inline struct extent_buffer *
1294 get_old_root(struct btrfs_root *root, u64 time_seq)
1295 {
1296 	struct tree_mod_elem *tm;
1297 	struct extent_buffer *eb = NULL;
1298 	struct extent_buffer *eb_root;
1299 	struct extent_buffer *old;
1300 	struct tree_mod_root *old_root = NULL;
1301 	u64 old_generation = 0;
1302 	u64 logical;
1303 	u32 blocksize;
1304 
1305 	eb_root = btrfs_read_lock_root_node(root);
1306 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1307 	if (!tm)
1308 		return eb_root;
1309 
1310 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1311 		old_root = &tm->old_root;
1312 		old_generation = tm->generation;
1313 		logical = old_root->logical;
1314 	} else {
1315 		logical = eb_root->start;
1316 	}
1317 
1318 	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1319 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1320 		btrfs_tree_read_unlock(eb_root);
1321 		free_extent_buffer(eb_root);
1322 		blocksize = btrfs_level_size(root, old_root->level);
1323 		old = read_tree_block(root, logical, blocksize, 0);
1324 		if (!old || !extent_buffer_uptodate(old)) {
1325 			free_extent_buffer(old);
1326 			pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1327 				logical);
1328 			WARN_ON(1);
1329 		} else {
1330 			eb = btrfs_clone_extent_buffer(old);
1331 			free_extent_buffer(old);
1332 		}
1333 	} else if (old_root) {
1334 		btrfs_tree_read_unlock(eb_root);
1335 		free_extent_buffer(eb_root);
1336 		eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1337 	} else {
1338 		eb = btrfs_clone_extent_buffer(eb_root);
1339 		btrfs_tree_read_unlock(eb_root);
1340 		free_extent_buffer(eb_root);
1341 	}
1342 
1343 	if (!eb)
1344 		return NULL;
1345 	extent_buffer_get(eb);
1346 	btrfs_tree_read_lock(eb);
1347 	if (old_root) {
1348 		btrfs_set_header_bytenr(eb, eb->start);
1349 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1350 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1351 		btrfs_set_header_level(eb, old_root->level);
1352 		btrfs_set_header_generation(eb, old_generation);
1353 	}
1354 	if (tm)
1355 		__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1356 	else
1357 		WARN_ON(btrfs_header_level(eb) != 0);
1358 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1359 
1360 	return eb;
1361 }
1362 
1363 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1364 {
1365 	struct tree_mod_elem *tm;
1366 	int level;
1367 	struct extent_buffer *eb_root = btrfs_root_node(root);
1368 
1369 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1370 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1371 		level = tm->old_root.level;
1372 	} else {
1373 		level = btrfs_header_level(eb_root);
1374 	}
1375 	free_extent_buffer(eb_root);
1376 
1377 	return level;
1378 }
1379 
1380 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1381 				   struct btrfs_root *root,
1382 				   struct extent_buffer *buf)
1383 {
1384 	/* ensure we can see the force_cow */
1385 	smp_rmb();
1386 
1387 	/*
1388 	 * We do not need to cow a block if
1389 	 * 1) this block is not created or changed in this transaction;
1390 	 * 2) this block does not belong to TREE_RELOC tree;
1391 	 * 3) the root is not forced COW.
1392 	 *
1393 	 * What is forced COW:
1394 	 *    when we create snapshot during commiting the transaction,
1395 	 *    after we've finished coping src root, we must COW the shared
1396 	 *    block to ensure the metadata consistency.
1397 	 */
1398 	if (btrfs_header_generation(buf) == trans->transid &&
1399 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1400 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1401 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1402 	    !root->force_cow)
1403 		return 0;
1404 	return 1;
1405 }
1406 
1407 /*
1408  * cows a single block, see __btrfs_cow_block for the real work.
1409  * This version of it has extra checks so that a block isn't cow'd more than
1410  * once per transaction, as long as it hasn't been written yet
1411  */
1412 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1413 		    struct btrfs_root *root, struct extent_buffer *buf,
1414 		    struct extent_buffer *parent, int parent_slot,
1415 		    struct extent_buffer **cow_ret)
1416 {
1417 	u64 search_start;
1418 	int ret;
1419 
1420 	if (trans->transaction != root->fs_info->running_transaction)
1421 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1422 		       (unsigned long long)trans->transid,
1423 		       (unsigned long long)
1424 		       root->fs_info->running_transaction->transid);
1425 
1426 	if (trans->transid != root->fs_info->generation)
1427 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1428 		       (unsigned long long)trans->transid,
1429 		       (unsigned long long)root->fs_info->generation);
1430 
1431 	if (!should_cow_block(trans, root, buf)) {
1432 		*cow_ret = buf;
1433 		return 0;
1434 	}
1435 
1436 	search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1437 
1438 	if (parent)
1439 		btrfs_set_lock_blocking(parent);
1440 	btrfs_set_lock_blocking(buf);
1441 
1442 	ret = __btrfs_cow_block(trans, root, buf, parent,
1443 				 parent_slot, cow_ret, search_start, 0);
1444 
1445 	trace_btrfs_cow_block(root, buf, *cow_ret);
1446 
1447 	return ret;
1448 }
1449 
1450 /*
1451  * helper function for defrag to decide if two blocks pointed to by a
1452  * node are actually close by
1453  */
1454 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1455 {
1456 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1457 		return 1;
1458 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1459 		return 1;
1460 	return 0;
1461 }
1462 
1463 /*
1464  * compare two keys in a memcmp fashion
1465  */
1466 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1467 {
1468 	struct btrfs_key k1;
1469 
1470 	btrfs_disk_key_to_cpu(&k1, disk);
1471 
1472 	return btrfs_comp_cpu_keys(&k1, k2);
1473 }
1474 
1475 /*
1476  * same as comp_keys only with two btrfs_key's
1477  */
1478 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1479 {
1480 	if (k1->objectid > k2->objectid)
1481 		return 1;
1482 	if (k1->objectid < k2->objectid)
1483 		return -1;
1484 	if (k1->type > k2->type)
1485 		return 1;
1486 	if (k1->type < k2->type)
1487 		return -1;
1488 	if (k1->offset > k2->offset)
1489 		return 1;
1490 	if (k1->offset < k2->offset)
1491 		return -1;
1492 	return 0;
1493 }
1494 
1495 /*
1496  * this is used by the defrag code to go through all the
1497  * leaves pointed to by a node and reallocate them so that
1498  * disk order is close to key order
1499  */
1500 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1501 		       struct btrfs_root *root, struct extent_buffer *parent,
1502 		       int start_slot, u64 *last_ret,
1503 		       struct btrfs_key *progress)
1504 {
1505 	struct extent_buffer *cur;
1506 	u64 blocknr;
1507 	u64 gen;
1508 	u64 search_start = *last_ret;
1509 	u64 last_block = 0;
1510 	u64 other;
1511 	u32 parent_nritems;
1512 	int end_slot;
1513 	int i;
1514 	int err = 0;
1515 	int parent_level;
1516 	int uptodate;
1517 	u32 blocksize;
1518 	int progress_passed = 0;
1519 	struct btrfs_disk_key disk_key;
1520 
1521 	parent_level = btrfs_header_level(parent);
1522 
1523 	WARN_ON(trans->transaction != root->fs_info->running_transaction);
1524 	WARN_ON(trans->transid != root->fs_info->generation);
1525 
1526 	parent_nritems = btrfs_header_nritems(parent);
1527 	blocksize = btrfs_level_size(root, parent_level - 1);
1528 	end_slot = parent_nritems;
1529 
1530 	if (parent_nritems == 1)
1531 		return 0;
1532 
1533 	btrfs_set_lock_blocking(parent);
1534 
1535 	for (i = start_slot; i < end_slot; i++) {
1536 		int close = 1;
1537 
1538 		btrfs_node_key(parent, &disk_key, i);
1539 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1540 			continue;
1541 
1542 		progress_passed = 1;
1543 		blocknr = btrfs_node_blockptr(parent, i);
1544 		gen = btrfs_node_ptr_generation(parent, i);
1545 		if (last_block == 0)
1546 			last_block = blocknr;
1547 
1548 		if (i > 0) {
1549 			other = btrfs_node_blockptr(parent, i - 1);
1550 			close = close_blocks(blocknr, other, blocksize);
1551 		}
1552 		if (!close && i < end_slot - 2) {
1553 			other = btrfs_node_blockptr(parent, i + 1);
1554 			close = close_blocks(blocknr, other, blocksize);
1555 		}
1556 		if (close) {
1557 			last_block = blocknr;
1558 			continue;
1559 		}
1560 
1561 		cur = btrfs_find_tree_block(root, blocknr, blocksize);
1562 		if (cur)
1563 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1564 		else
1565 			uptodate = 0;
1566 		if (!cur || !uptodate) {
1567 			if (!cur) {
1568 				cur = read_tree_block(root, blocknr,
1569 							 blocksize, gen);
1570 				if (!cur || !extent_buffer_uptodate(cur)) {
1571 					free_extent_buffer(cur);
1572 					return -EIO;
1573 				}
1574 			} else if (!uptodate) {
1575 				err = btrfs_read_buffer(cur, gen);
1576 				if (err) {
1577 					free_extent_buffer(cur);
1578 					return err;
1579 				}
1580 			}
1581 		}
1582 		if (search_start == 0)
1583 			search_start = last_block;
1584 
1585 		btrfs_tree_lock(cur);
1586 		btrfs_set_lock_blocking(cur);
1587 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1588 					&cur, search_start,
1589 					min(16 * blocksize,
1590 					    (end_slot - i) * blocksize));
1591 		if (err) {
1592 			btrfs_tree_unlock(cur);
1593 			free_extent_buffer(cur);
1594 			break;
1595 		}
1596 		search_start = cur->start;
1597 		last_block = cur->start;
1598 		*last_ret = search_start;
1599 		btrfs_tree_unlock(cur);
1600 		free_extent_buffer(cur);
1601 	}
1602 	return err;
1603 }
1604 
1605 /*
1606  * The leaf data grows from end-to-front in the node.
1607  * this returns the address of the start of the last item,
1608  * which is the stop of the leaf data stack
1609  */
1610 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1611 					 struct extent_buffer *leaf)
1612 {
1613 	u32 nr = btrfs_header_nritems(leaf);
1614 	if (nr == 0)
1615 		return BTRFS_LEAF_DATA_SIZE(root);
1616 	return btrfs_item_offset_nr(leaf, nr - 1);
1617 }
1618 
1619 
1620 /*
1621  * search for key in the extent_buffer.  The items start at offset p,
1622  * and they are item_size apart.  There are 'max' items in p.
1623  *
1624  * the slot in the array is returned via slot, and it points to
1625  * the place where you would insert key if it is not found in
1626  * the array.
1627  *
1628  * slot may point to max if the key is bigger than all of the keys
1629  */
1630 static noinline int generic_bin_search(struct extent_buffer *eb,
1631 				       unsigned long p,
1632 				       int item_size, struct btrfs_key *key,
1633 				       int max, int *slot)
1634 {
1635 	int low = 0;
1636 	int high = max;
1637 	int mid;
1638 	int ret;
1639 	struct btrfs_disk_key *tmp = NULL;
1640 	struct btrfs_disk_key unaligned;
1641 	unsigned long offset;
1642 	char *kaddr = NULL;
1643 	unsigned long map_start = 0;
1644 	unsigned long map_len = 0;
1645 	int err;
1646 
1647 	while (low < high) {
1648 		mid = (low + high) / 2;
1649 		offset = p + mid * item_size;
1650 
1651 		if (!kaddr || offset < map_start ||
1652 		    (offset + sizeof(struct btrfs_disk_key)) >
1653 		    map_start + map_len) {
1654 
1655 			err = map_private_extent_buffer(eb, offset,
1656 						sizeof(struct btrfs_disk_key),
1657 						&kaddr, &map_start, &map_len);
1658 
1659 			if (!err) {
1660 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1661 							map_start);
1662 			} else {
1663 				read_extent_buffer(eb, &unaligned,
1664 						   offset, sizeof(unaligned));
1665 				tmp = &unaligned;
1666 			}
1667 
1668 		} else {
1669 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1670 							map_start);
1671 		}
1672 		ret = comp_keys(tmp, key);
1673 
1674 		if (ret < 0)
1675 			low = mid + 1;
1676 		else if (ret > 0)
1677 			high = mid;
1678 		else {
1679 			*slot = mid;
1680 			return 0;
1681 		}
1682 	}
1683 	*slot = low;
1684 	return 1;
1685 }
1686 
1687 /*
1688  * simple bin_search frontend that does the right thing for
1689  * leaves vs nodes
1690  */
1691 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1692 		      int level, int *slot)
1693 {
1694 	if (level == 0)
1695 		return generic_bin_search(eb,
1696 					  offsetof(struct btrfs_leaf, items),
1697 					  sizeof(struct btrfs_item),
1698 					  key, btrfs_header_nritems(eb),
1699 					  slot);
1700 	else
1701 		return generic_bin_search(eb,
1702 					  offsetof(struct btrfs_node, ptrs),
1703 					  sizeof(struct btrfs_key_ptr),
1704 					  key, btrfs_header_nritems(eb),
1705 					  slot);
1706 }
1707 
1708 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1709 		     int level, int *slot)
1710 {
1711 	return bin_search(eb, key, level, slot);
1712 }
1713 
1714 static void root_add_used(struct btrfs_root *root, u32 size)
1715 {
1716 	spin_lock(&root->accounting_lock);
1717 	btrfs_set_root_used(&root->root_item,
1718 			    btrfs_root_used(&root->root_item) + size);
1719 	spin_unlock(&root->accounting_lock);
1720 }
1721 
1722 static void root_sub_used(struct btrfs_root *root, u32 size)
1723 {
1724 	spin_lock(&root->accounting_lock);
1725 	btrfs_set_root_used(&root->root_item,
1726 			    btrfs_root_used(&root->root_item) - size);
1727 	spin_unlock(&root->accounting_lock);
1728 }
1729 
1730 /* given a node and slot number, this reads the blocks it points to.  The
1731  * extent buffer is returned with a reference taken (but unlocked).
1732  * NULL is returned on error.
1733  */
1734 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1735 				   struct extent_buffer *parent, int slot)
1736 {
1737 	int level = btrfs_header_level(parent);
1738 	struct extent_buffer *eb;
1739 
1740 	if (slot < 0)
1741 		return NULL;
1742 	if (slot >= btrfs_header_nritems(parent))
1743 		return NULL;
1744 
1745 	BUG_ON(level == 0);
1746 
1747 	eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1748 			     btrfs_level_size(root, level - 1),
1749 			     btrfs_node_ptr_generation(parent, slot));
1750 	if (eb && !extent_buffer_uptodate(eb)) {
1751 		free_extent_buffer(eb);
1752 		eb = NULL;
1753 	}
1754 
1755 	return eb;
1756 }
1757 
1758 /*
1759  * node level balancing, used to make sure nodes are in proper order for
1760  * item deletion.  We balance from the top down, so we have to make sure
1761  * that a deletion won't leave an node completely empty later on.
1762  */
1763 static noinline int balance_level(struct btrfs_trans_handle *trans,
1764 			 struct btrfs_root *root,
1765 			 struct btrfs_path *path, int level)
1766 {
1767 	struct extent_buffer *right = NULL;
1768 	struct extent_buffer *mid;
1769 	struct extent_buffer *left = NULL;
1770 	struct extent_buffer *parent = NULL;
1771 	int ret = 0;
1772 	int wret;
1773 	int pslot;
1774 	int orig_slot = path->slots[level];
1775 	u64 orig_ptr;
1776 
1777 	if (level == 0)
1778 		return 0;
1779 
1780 	mid = path->nodes[level];
1781 
1782 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1783 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1784 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1785 
1786 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1787 
1788 	if (level < BTRFS_MAX_LEVEL - 1) {
1789 		parent = path->nodes[level + 1];
1790 		pslot = path->slots[level + 1];
1791 	}
1792 
1793 	/*
1794 	 * deal with the case where there is only one pointer in the root
1795 	 * by promoting the node below to a root
1796 	 */
1797 	if (!parent) {
1798 		struct extent_buffer *child;
1799 
1800 		if (btrfs_header_nritems(mid) != 1)
1801 			return 0;
1802 
1803 		/* promote the child to a root */
1804 		child = read_node_slot(root, mid, 0);
1805 		if (!child) {
1806 			ret = -EROFS;
1807 			btrfs_std_error(root->fs_info, ret);
1808 			goto enospc;
1809 		}
1810 
1811 		btrfs_tree_lock(child);
1812 		btrfs_set_lock_blocking(child);
1813 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1814 		if (ret) {
1815 			btrfs_tree_unlock(child);
1816 			free_extent_buffer(child);
1817 			goto enospc;
1818 		}
1819 
1820 		tree_mod_log_set_root_pointer(root, child, 1);
1821 		rcu_assign_pointer(root->node, child);
1822 
1823 		add_root_to_dirty_list(root);
1824 		btrfs_tree_unlock(child);
1825 
1826 		path->locks[level] = 0;
1827 		path->nodes[level] = NULL;
1828 		clean_tree_block(trans, root, mid);
1829 		btrfs_tree_unlock(mid);
1830 		/* once for the path */
1831 		free_extent_buffer(mid);
1832 
1833 		root_sub_used(root, mid->len);
1834 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1835 		/* once for the root ptr */
1836 		free_extent_buffer_stale(mid);
1837 		return 0;
1838 	}
1839 	if (btrfs_header_nritems(mid) >
1840 	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1841 		return 0;
1842 
1843 	left = read_node_slot(root, parent, pslot - 1);
1844 	if (left) {
1845 		btrfs_tree_lock(left);
1846 		btrfs_set_lock_blocking(left);
1847 		wret = btrfs_cow_block(trans, root, left,
1848 				       parent, pslot - 1, &left);
1849 		if (wret) {
1850 			ret = wret;
1851 			goto enospc;
1852 		}
1853 	}
1854 	right = read_node_slot(root, parent, pslot + 1);
1855 	if (right) {
1856 		btrfs_tree_lock(right);
1857 		btrfs_set_lock_blocking(right);
1858 		wret = btrfs_cow_block(trans, root, right,
1859 				       parent, pslot + 1, &right);
1860 		if (wret) {
1861 			ret = wret;
1862 			goto enospc;
1863 		}
1864 	}
1865 
1866 	/* first, try to make some room in the middle buffer */
1867 	if (left) {
1868 		orig_slot += btrfs_header_nritems(left);
1869 		wret = push_node_left(trans, root, left, mid, 1);
1870 		if (wret < 0)
1871 			ret = wret;
1872 	}
1873 
1874 	/*
1875 	 * then try to empty the right most buffer into the middle
1876 	 */
1877 	if (right) {
1878 		wret = push_node_left(trans, root, mid, right, 1);
1879 		if (wret < 0 && wret != -ENOSPC)
1880 			ret = wret;
1881 		if (btrfs_header_nritems(right) == 0) {
1882 			clean_tree_block(trans, root, right);
1883 			btrfs_tree_unlock(right);
1884 			del_ptr(root, path, level + 1, pslot + 1);
1885 			root_sub_used(root, right->len);
1886 			btrfs_free_tree_block(trans, root, right, 0, 1);
1887 			free_extent_buffer_stale(right);
1888 			right = NULL;
1889 		} else {
1890 			struct btrfs_disk_key right_key;
1891 			btrfs_node_key(right, &right_key, 0);
1892 			tree_mod_log_set_node_key(root->fs_info, parent,
1893 						  pslot + 1, 0);
1894 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1895 			btrfs_mark_buffer_dirty(parent);
1896 		}
1897 	}
1898 	if (btrfs_header_nritems(mid) == 1) {
1899 		/*
1900 		 * we're not allowed to leave a node with one item in the
1901 		 * tree during a delete.  A deletion from lower in the tree
1902 		 * could try to delete the only pointer in this node.
1903 		 * So, pull some keys from the left.
1904 		 * There has to be a left pointer at this point because
1905 		 * otherwise we would have pulled some pointers from the
1906 		 * right
1907 		 */
1908 		if (!left) {
1909 			ret = -EROFS;
1910 			btrfs_std_error(root->fs_info, ret);
1911 			goto enospc;
1912 		}
1913 		wret = balance_node_right(trans, root, mid, left);
1914 		if (wret < 0) {
1915 			ret = wret;
1916 			goto enospc;
1917 		}
1918 		if (wret == 1) {
1919 			wret = push_node_left(trans, root, left, mid, 1);
1920 			if (wret < 0)
1921 				ret = wret;
1922 		}
1923 		BUG_ON(wret == 1);
1924 	}
1925 	if (btrfs_header_nritems(mid) == 0) {
1926 		clean_tree_block(trans, root, mid);
1927 		btrfs_tree_unlock(mid);
1928 		del_ptr(root, path, level + 1, pslot);
1929 		root_sub_used(root, mid->len);
1930 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1931 		free_extent_buffer_stale(mid);
1932 		mid = NULL;
1933 	} else {
1934 		/* update the parent key to reflect our changes */
1935 		struct btrfs_disk_key mid_key;
1936 		btrfs_node_key(mid, &mid_key, 0);
1937 		tree_mod_log_set_node_key(root->fs_info, parent,
1938 					  pslot, 0);
1939 		btrfs_set_node_key(parent, &mid_key, pslot);
1940 		btrfs_mark_buffer_dirty(parent);
1941 	}
1942 
1943 	/* update the path */
1944 	if (left) {
1945 		if (btrfs_header_nritems(left) > orig_slot) {
1946 			extent_buffer_get(left);
1947 			/* left was locked after cow */
1948 			path->nodes[level] = left;
1949 			path->slots[level + 1] -= 1;
1950 			path->slots[level] = orig_slot;
1951 			if (mid) {
1952 				btrfs_tree_unlock(mid);
1953 				free_extent_buffer(mid);
1954 			}
1955 		} else {
1956 			orig_slot -= btrfs_header_nritems(left);
1957 			path->slots[level] = orig_slot;
1958 		}
1959 	}
1960 	/* double check we haven't messed things up */
1961 	if (orig_ptr !=
1962 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1963 		BUG();
1964 enospc:
1965 	if (right) {
1966 		btrfs_tree_unlock(right);
1967 		free_extent_buffer(right);
1968 	}
1969 	if (left) {
1970 		if (path->nodes[level] != left)
1971 			btrfs_tree_unlock(left);
1972 		free_extent_buffer(left);
1973 	}
1974 	return ret;
1975 }
1976 
1977 /* Node balancing for insertion.  Here we only split or push nodes around
1978  * when they are completely full.  This is also done top down, so we
1979  * have to be pessimistic.
1980  */
1981 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1982 					  struct btrfs_root *root,
1983 					  struct btrfs_path *path, int level)
1984 {
1985 	struct extent_buffer *right = NULL;
1986 	struct extent_buffer *mid;
1987 	struct extent_buffer *left = NULL;
1988 	struct extent_buffer *parent = NULL;
1989 	int ret = 0;
1990 	int wret;
1991 	int pslot;
1992 	int orig_slot = path->slots[level];
1993 
1994 	if (level == 0)
1995 		return 1;
1996 
1997 	mid = path->nodes[level];
1998 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1999 
2000 	if (level < BTRFS_MAX_LEVEL - 1) {
2001 		parent = path->nodes[level + 1];
2002 		pslot = path->slots[level + 1];
2003 	}
2004 
2005 	if (!parent)
2006 		return 1;
2007 
2008 	left = read_node_slot(root, parent, pslot - 1);
2009 
2010 	/* first, try to make some room in the middle buffer */
2011 	if (left) {
2012 		u32 left_nr;
2013 
2014 		btrfs_tree_lock(left);
2015 		btrfs_set_lock_blocking(left);
2016 
2017 		left_nr = btrfs_header_nritems(left);
2018 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2019 			wret = 1;
2020 		} else {
2021 			ret = btrfs_cow_block(trans, root, left, parent,
2022 					      pslot - 1, &left);
2023 			if (ret)
2024 				wret = 1;
2025 			else {
2026 				wret = push_node_left(trans, root,
2027 						      left, mid, 0);
2028 			}
2029 		}
2030 		if (wret < 0)
2031 			ret = wret;
2032 		if (wret == 0) {
2033 			struct btrfs_disk_key disk_key;
2034 			orig_slot += left_nr;
2035 			btrfs_node_key(mid, &disk_key, 0);
2036 			tree_mod_log_set_node_key(root->fs_info, parent,
2037 						  pslot, 0);
2038 			btrfs_set_node_key(parent, &disk_key, pslot);
2039 			btrfs_mark_buffer_dirty(parent);
2040 			if (btrfs_header_nritems(left) > orig_slot) {
2041 				path->nodes[level] = left;
2042 				path->slots[level + 1] -= 1;
2043 				path->slots[level] = orig_slot;
2044 				btrfs_tree_unlock(mid);
2045 				free_extent_buffer(mid);
2046 			} else {
2047 				orig_slot -=
2048 					btrfs_header_nritems(left);
2049 				path->slots[level] = orig_slot;
2050 				btrfs_tree_unlock(left);
2051 				free_extent_buffer(left);
2052 			}
2053 			return 0;
2054 		}
2055 		btrfs_tree_unlock(left);
2056 		free_extent_buffer(left);
2057 	}
2058 	right = read_node_slot(root, parent, pslot + 1);
2059 
2060 	/*
2061 	 * then try to empty the right most buffer into the middle
2062 	 */
2063 	if (right) {
2064 		u32 right_nr;
2065 
2066 		btrfs_tree_lock(right);
2067 		btrfs_set_lock_blocking(right);
2068 
2069 		right_nr = btrfs_header_nritems(right);
2070 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2071 			wret = 1;
2072 		} else {
2073 			ret = btrfs_cow_block(trans, root, right,
2074 					      parent, pslot + 1,
2075 					      &right);
2076 			if (ret)
2077 				wret = 1;
2078 			else {
2079 				wret = balance_node_right(trans, root,
2080 							  right, mid);
2081 			}
2082 		}
2083 		if (wret < 0)
2084 			ret = wret;
2085 		if (wret == 0) {
2086 			struct btrfs_disk_key disk_key;
2087 
2088 			btrfs_node_key(right, &disk_key, 0);
2089 			tree_mod_log_set_node_key(root->fs_info, parent,
2090 						  pslot + 1, 0);
2091 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2092 			btrfs_mark_buffer_dirty(parent);
2093 
2094 			if (btrfs_header_nritems(mid) <= orig_slot) {
2095 				path->nodes[level] = right;
2096 				path->slots[level + 1] += 1;
2097 				path->slots[level] = orig_slot -
2098 					btrfs_header_nritems(mid);
2099 				btrfs_tree_unlock(mid);
2100 				free_extent_buffer(mid);
2101 			} else {
2102 				btrfs_tree_unlock(right);
2103 				free_extent_buffer(right);
2104 			}
2105 			return 0;
2106 		}
2107 		btrfs_tree_unlock(right);
2108 		free_extent_buffer(right);
2109 	}
2110 	return 1;
2111 }
2112 
2113 /*
2114  * readahead one full node of leaves, finding things that are close
2115  * to the block in 'slot', and triggering ra on them.
2116  */
2117 static void reada_for_search(struct btrfs_root *root,
2118 			     struct btrfs_path *path,
2119 			     int level, int slot, u64 objectid)
2120 {
2121 	struct extent_buffer *node;
2122 	struct btrfs_disk_key disk_key;
2123 	u32 nritems;
2124 	u64 search;
2125 	u64 target;
2126 	u64 nread = 0;
2127 	u64 gen;
2128 	int direction = path->reada;
2129 	struct extent_buffer *eb;
2130 	u32 nr;
2131 	u32 blocksize;
2132 	u32 nscan = 0;
2133 
2134 	if (level != 1)
2135 		return;
2136 
2137 	if (!path->nodes[level])
2138 		return;
2139 
2140 	node = path->nodes[level];
2141 
2142 	search = btrfs_node_blockptr(node, slot);
2143 	blocksize = btrfs_level_size(root, level - 1);
2144 	eb = btrfs_find_tree_block(root, search, blocksize);
2145 	if (eb) {
2146 		free_extent_buffer(eb);
2147 		return;
2148 	}
2149 
2150 	target = search;
2151 
2152 	nritems = btrfs_header_nritems(node);
2153 	nr = slot;
2154 
2155 	while (1) {
2156 		if (direction < 0) {
2157 			if (nr == 0)
2158 				break;
2159 			nr--;
2160 		} else if (direction > 0) {
2161 			nr++;
2162 			if (nr >= nritems)
2163 				break;
2164 		}
2165 		if (path->reada < 0 && objectid) {
2166 			btrfs_node_key(node, &disk_key, nr);
2167 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2168 				break;
2169 		}
2170 		search = btrfs_node_blockptr(node, nr);
2171 		if ((search <= target && target - search <= 65536) ||
2172 		    (search > target && search - target <= 65536)) {
2173 			gen = btrfs_node_ptr_generation(node, nr);
2174 			readahead_tree_block(root, search, blocksize, gen);
2175 			nread += blocksize;
2176 		}
2177 		nscan++;
2178 		if ((nread > 65536 || nscan > 32))
2179 			break;
2180 	}
2181 }
2182 
2183 static noinline void reada_for_balance(struct btrfs_root *root,
2184 				       struct btrfs_path *path, int level)
2185 {
2186 	int slot;
2187 	int nritems;
2188 	struct extent_buffer *parent;
2189 	struct extent_buffer *eb;
2190 	u64 gen;
2191 	u64 block1 = 0;
2192 	u64 block2 = 0;
2193 	int blocksize;
2194 
2195 	parent = path->nodes[level + 1];
2196 	if (!parent)
2197 		return;
2198 
2199 	nritems = btrfs_header_nritems(parent);
2200 	slot = path->slots[level + 1];
2201 	blocksize = btrfs_level_size(root, level);
2202 
2203 	if (slot > 0) {
2204 		block1 = btrfs_node_blockptr(parent, slot - 1);
2205 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2206 		eb = btrfs_find_tree_block(root, block1, blocksize);
2207 		/*
2208 		 * if we get -eagain from btrfs_buffer_uptodate, we
2209 		 * don't want to return eagain here.  That will loop
2210 		 * forever
2211 		 */
2212 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2213 			block1 = 0;
2214 		free_extent_buffer(eb);
2215 	}
2216 	if (slot + 1 < nritems) {
2217 		block2 = btrfs_node_blockptr(parent, slot + 1);
2218 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2219 		eb = btrfs_find_tree_block(root, block2, blocksize);
2220 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2221 			block2 = 0;
2222 		free_extent_buffer(eb);
2223 	}
2224 
2225 	if (block1)
2226 		readahead_tree_block(root, block1, blocksize, 0);
2227 	if (block2)
2228 		readahead_tree_block(root, block2, blocksize, 0);
2229 }
2230 
2231 
2232 /*
2233  * when we walk down the tree, it is usually safe to unlock the higher layers
2234  * in the tree.  The exceptions are when our path goes through slot 0, because
2235  * operations on the tree might require changing key pointers higher up in the
2236  * tree.
2237  *
2238  * callers might also have set path->keep_locks, which tells this code to keep
2239  * the lock if the path points to the last slot in the block.  This is part of
2240  * walking through the tree, and selecting the next slot in the higher block.
2241  *
2242  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2243  * if lowest_unlock is 1, level 0 won't be unlocked
2244  */
2245 static noinline void unlock_up(struct btrfs_path *path, int level,
2246 			       int lowest_unlock, int min_write_lock_level,
2247 			       int *write_lock_level)
2248 {
2249 	int i;
2250 	int skip_level = level;
2251 	int no_skips = 0;
2252 	struct extent_buffer *t;
2253 
2254 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2255 		if (!path->nodes[i])
2256 			break;
2257 		if (!path->locks[i])
2258 			break;
2259 		if (!no_skips && path->slots[i] == 0) {
2260 			skip_level = i + 1;
2261 			continue;
2262 		}
2263 		if (!no_skips && path->keep_locks) {
2264 			u32 nritems;
2265 			t = path->nodes[i];
2266 			nritems = btrfs_header_nritems(t);
2267 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2268 				skip_level = i + 1;
2269 				continue;
2270 			}
2271 		}
2272 		if (skip_level < i && i >= lowest_unlock)
2273 			no_skips = 1;
2274 
2275 		t = path->nodes[i];
2276 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2277 			btrfs_tree_unlock_rw(t, path->locks[i]);
2278 			path->locks[i] = 0;
2279 			if (write_lock_level &&
2280 			    i > min_write_lock_level &&
2281 			    i <= *write_lock_level) {
2282 				*write_lock_level = i - 1;
2283 			}
2284 		}
2285 	}
2286 }
2287 
2288 /*
2289  * This releases any locks held in the path starting at level and
2290  * going all the way up to the root.
2291  *
2292  * btrfs_search_slot will keep the lock held on higher nodes in a few
2293  * corner cases, such as COW of the block at slot zero in the node.  This
2294  * ignores those rules, and it should only be called when there are no
2295  * more updates to be done higher up in the tree.
2296  */
2297 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2298 {
2299 	int i;
2300 
2301 	if (path->keep_locks)
2302 		return;
2303 
2304 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2305 		if (!path->nodes[i])
2306 			continue;
2307 		if (!path->locks[i])
2308 			continue;
2309 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2310 		path->locks[i] = 0;
2311 	}
2312 }
2313 
2314 /*
2315  * helper function for btrfs_search_slot.  The goal is to find a block
2316  * in cache without setting the path to blocking.  If we find the block
2317  * we return zero and the path is unchanged.
2318  *
2319  * If we can't find the block, we set the path blocking and do some
2320  * reada.  -EAGAIN is returned and the search must be repeated.
2321  */
2322 static int
2323 read_block_for_search(struct btrfs_trans_handle *trans,
2324 		       struct btrfs_root *root, struct btrfs_path *p,
2325 		       struct extent_buffer **eb_ret, int level, int slot,
2326 		       struct btrfs_key *key, u64 time_seq)
2327 {
2328 	u64 blocknr;
2329 	u64 gen;
2330 	u32 blocksize;
2331 	struct extent_buffer *b = *eb_ret;
2332 	struct extent_buffer *tmp;
2333 	int ret;
2334 
2335 	blocknr = btrfs_node_blockptr(b, slot);
2336 	gen = btrfs_node_ptr_generation(b, slot);
2337 	blocksize = btrfs_level_size(root, level - 1);
2338 
2339 	tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2340 	if (tmp) {
2341 		/* first we do an atomic uptodate check */
2342 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2343 			*eb_ret = tmp;
2344 			return 0;
2345 		}
2346 
2347 		/* the pages were up to date, but we failed
2348 		 * the generation number check.  Do a full
2349 		 * read for the generation number that is correct.
2350 		 * We must do this without dropping locks so
2351 		 * we can trust our generation number
2352 		 */
2353 		btrfs_set_path_blocking(p);
2354 
2355 		/* now we're allowed to do a blocking uptodate check */
2356 		ret = btrfs_read_buffer(tmp, gen);
2357 		if (!ret) {
2358 			*eb_ret = tmp;
2359 			return 0;
2360 		}
2361 		free_extent_buffer(tmp);
2362 		btrfs_release_path(p);
2363 		return -EIO;
2364 	}
2365 
2366 	/*
2367 	 * reduce lock contention at high levels
2368 	 * of the btree by dropping locks before
2369 	 * we read.  Don't release the lock on the current
2370 	 * level because we need to walk this node to figure
2371 	 * out which blocks to read.
2372 	 */
2373 	btrfs_unlock_up_safe(p, level + 1);
2374 	btrfs_set_path_blocking(p);
2375 
2376 	free_extent_buffer(tmp);
2377 	if (p->reada)
2378 		reada_for_search(root, p, level, slot, key->objectid);
2379 
2380 	btrfs_release_path(p);
2381 
2382 	ret = -EAGAIN;
2383 	tmp = read_tree_block(root, blocknr, blocksize, 0);
2384 	if (tmp) {
2385 		/*
2386 		 * If the read above didn't mark this buffer up to date,
2387 		 * it will never end up being up to date.  Set ret to EIO now
2388 		 * and give up so that our caller doesn't loop forever
2389 		 * on our EAGAINs.
2390 		 */
2391 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2392 			ret = -EIO;
2393 		free_extent_buffer(tmp);
2394 	}
2395 	return ret;
2396 }
2397 
2398 /*
2399  * helper function for btrfs_search_slot.  This does all of the checks
2400  * for node-level blocks and does any balancing required based on
2401  * the ins_len.
2402  *
2403  * If no extra work was required, zero is returned.  If we had to
2404  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2405  * start over
2406  */
2407 static int
2408 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2409 		       struct btrfs_root *root, struct btrfs_path *p,
2410 		       struct extent_buffer *b, int level, int ins_len,
2411 		       int *write_lock_level)
2412 {
2413 	int ret;
2414 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2415 	    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2416 		int sret;
2417 
2418 		if (*write_lock_level < level + 1) {
2419 			*write_lock_level = level + 1;
2420 			btrfs_release_path(p);
2421 			goto again;
2422 		}
2423 
2424 		btrfs_set_path_blocking(p);
2425 		reada_for_balance(root, p, level);
2426 		sret = split_node(trans, root, p, level);
2427 		btrfs_clear_path_blocking(p, NULL, 0);
2428 
2429 		BUG_ON(sret > 0);
2430 		if (sret) {
2431 			ret = sret;
2432 			goto done;
2433 		}
2434 		b = p->nodes[level];
2435 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2436 		   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2437 		int sret;
2438 
2439 		if (*write_lock_level < level + 1) {
2440 			*write_lock_level = level + 1;
2441 			btrfs_release_path(p);
2442 			goto again;
2443 		}
2444 
2445 		btrfs_set_path_blocking(p);
2446 		reada_for_balance(root, p, level);
2447 		sret = balance_level(trans, root, p, level);
2448 		btrfs_clear_path_blocking(p, NULL, 0);
2449 
2450 		if (sret) {
2451 			ret = sret;
2452 			goto done;
2453 		}
2454 		b = p->nodes[level];
2455 		if (!b) {
2456 			btrfs_release_path(p);
2457 			goto again;
2458 		}
2459 		BUG_ON(btrfs_header_nritems(b) == 1);
2460 	}
2461 	return 0;
2462 
2463 again:
2464 	ret = -EAGAIN;
2465 done:
2466 	return ret;
2467 }
2468 
2469 /*
2470  * look for key in the tree.  path is filled in with nodes along the way
2471  * if key is found, we return zero and you can find the item in the leaf
2472  * level of the path (level 0)
2473  *
2474  * If the key isn't found, the path points to the slot where it should
2475  * be inserted, and 1 is returned.  If there are other errors during the
2476  * search a negative error number is returned.
2477  *
2478  * if ins_len > 0, nodes and leaves will be split as we walk down the
2479  * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
2480  * possible)
2481  */
2482 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2483 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
2484 		      ins_len, int cow)
2485 {
2486 	struct extent_buffer *b;
2487 	int slot;
2488 	int ret;
2489 	int err;
2490 	int level;
2491 	int lowest_unlock = 1;
2492 	int root_lock;
2493 	/* everything at write_lock_level or lower must be write locked */
2494 	int write_lock_level = 0;
2495 	u8 lowest_level = 0;
2496 	int min_write_lock_level;
2497 
2498 	lowest_level = p->lowest_level;
2499 	WARN_ON(lowest_level && ins_len > 0);
2500 	WARN_ON(p->nodes[0] != NULL);
2501 
2502 	if (ins_len < 0) {
2503 		lowest_unlock = 2;
2504 
2505 		/* when we are removing items, we might have to go up to level
2506 		 * two as we update tree pointers  Make sure we keep write
2507 		 * for those levels as well
2508 		 */
2509 		write_lock_level = 2;
2510 	} else if (ins_len > 0) {
2511 		/*
2512 		 * for inserting items, make sure we have a write lock on
2513 		 * level 1 so we can update keys
2514 		 */
2515 		write_lock_level = 1;
2516 	}
2517 
2518 	if (!cow)
2519 		write_lock_level = -1;
2520 
2521 	if (cow && (p->keep_locks || p->lowest_level))
2522 		write_lock_level = BTRFS_MAX_LEVEL;
2523 
2524 	min_write_lock_level = write_lock_level;
2525 
2526 again:
2527 	/*
2528 	 * we try very hard to do read locks on the root
2529 	 */
2530 	root_lock = BTRFS_READ_LOCK;
2531 	level = 0;
2532 	if (p->search_commit_root) {
2533 		/*
2534 		 * the commit roots are read only
2535 		 * so we always do read locks
2536 		 */
2537 		b = root->commit_root;
2538 		extent_buffer_get(b);
2539 		level = btrfs_header_level(b);
2540 		if (!p->skip_locking)
2541 			btrfs_tree_read_lock(b);
2542 	} else {
2543 		if (p->skip_locking) {
2544 			b = btrfs_root_node(root);
2545 			level = btrfs_header_level(b);
2546 		} else {
2547 			/* we don't know the level of the root node
2548 			 * until we actually have it read locked
2549 			 */
2550 			b = btrfs_read_lock_root_node(root);
2551 			level = btrfs_header_level(b);
2552 			if (level <= write_lock_level) {
2553 				/* whoops, must trade for write lock */
2554 				btrfs_tree_read_unlock(b);
2555 				free_extent_buffer(b);
2556 				b = btrfs_lock_root_node(root);
2557 				root_lock = BTRFS_WRITE_LOCK;
2558 
2559 				/* the level might have changed, check again */
2560 				level = btrfs_header_level(b);
2561 			}
2562 		}
2563 	}
2564 	p->nodes[level] = b;
2565 	if (!p->skip_locking)
2566 		p->locks[level] = root_lock;
2567 
2568 	while (b) {
2569 		level = btrfs_header_level(b);
2570 
2571 		/*
2572 		 * setup the path here so we can release it under lock
2573 		 * contention with the cow code
2574 		 */
2575 		if (cow) {
2576 			/*
2577 			 * if we don't really need to cow this block
2578 			 * then we don't want to set the path blocking,
2579 			 * so we test it here
2580 			 */
2581 			if (!should_cow_block(trans, root, b))
2582 				goto cow_done;
2583 
2584 			btrfs_set_path_blocking(p);
2585 
2586 			/*
2587 			 * must have write locks on this node and the
2588 			 * parent
2589 			 */
2590 			if (level > write_lock_level ||
2591 			    (level + 1 > write_lock_level &&
2592 			    level + 1 < BTRFS_MAX_LEVEL &&
2593 			    p->nodes[level + 1])) {
2594 				write_lock_level = level + 1;
2595 				btrfs_release_path(p);
2596 				goto again;
2597 			}
2598 
2599 			err = btrfs_cow_block(trans, root, b,
2600 					      p->nodes[level + 1],
2601 					      p->slots[level + 1], &b);
2602 			if (err) {
2603 				ret = err;
2604 				goto done;
2605 			}
2606 		}
2607 cow_done:
2608 		BUG_ON(!cow && ins_len);
2609 
2610 		p->nodes[level] = b;
2611 		btrfs_clear_path_blocking(p, NULL, 0);
2612 
2613 		/*
2614 		 * we have a lock on b and as long as we aren't changing
2615 		 * the tree, there is no way to for the items in b to change.
2616 		 * It is safe to drop the lock on our parent before we
2617 		 * go through the expensive btree search on b.
2618 		 *
2619 		 * If cow is true, then we might be changing slot zero,
2620 		 * which may require changing the parent.  So, we can't
2621 		 * drop the lock until after we know which slot we're
2622 		 * operating on.
2623 		 */
2624 		if (!cow)
2625 			btrfs_unlock_up_safe(p, level + 1);
2626 
2627 		ret = bin_search(b, key, level, &slot);
2628 
2629 		if (level != 0) {
2630 			int dec = 0;
2631 			if (ret && slot > 0) {
2632 				dec = 1;
2633 				slot -= 1;
2634 			}
2635 			p->slots[level] = slot;
2636 			err = setup_nodes_for_search(trans, root, p, b, level,
2637 					     ins_len, &write_lock_level);
2638 			if (err == -EAGAIN)
2639 				goto again;
2640 			if (err) {
2641 				ret = err;
2642 				goto done;
2643 			}
2644 			b = p->nodes[level];
2645 			slot = p->slots[level];
2646 
2647 			/*
2648 			 * slot 0 is special, if we change the key
2649 			 * we have to update the parent pointer
2650 			 * which means we must have a write lock
2651 			 * on the parent
2652 			 */
2653 			if (slot == 0 && cow &&
2654 			    write_lock_level < level + 1) {
2655 				write_lock_level = level + 1;
2656 				btrfs_release_path(p);
2657 				goto again;
2658 			}
2659 
2660 			unlock_up(p, level, lowest_unlock,
2661 				  min_write_lock_level, &write_lock_level);
2662 
2663 			if (level == lowest_level) {
2664 				if (dec)
2665 					p->slots[level]++;
2666 				goto done;
2667 			}
2668 
2669 			err = read_block_for_search(trans, root, p,
2670 						    &b, level, slot, key, 0);
2671 			if (err == -EAGAIN)
2672 				goto again;
2673 			if (err) {
2674 				ret = err;
2675 				goto done;
2676 			}
2677 
2678 			if (!p->skip_locking) {
2679 				level = btrfs_header_level(b);
2680 				if (level <= write_lock_level) {
2681 					err = btrfs_try_tree_write_lock(b);
2682 					if (!err) {
2683 						btrfs_set_path_blocking(p);
2684 						btrfs_tree_lock(b);
2685 						btrfs_clear_path_blocking(p, b,
2686 								  BTRFS_WRITE_LOCK);
2687 					}
2688 					p->locks[level] = BTRFS_WRITE_LOCK;
2689 				} else {
2690 					err = btrfs_try_tree_read_lock(b);
2691 					if (!err) {
2692 						btrfs_set_path_blocking(p);
2693 						btrfs_tree_read_lock(b);
2694 						btrfs_clear_path_blocking(p, b,
2695 								  BTRFS_READ_LOCK);
2696 					}
2697 					p->locks[level] = BTRFS_READ_LOCK;
2698 				}
2699 				p->nodes[level] = b;
2700 			}
2701 		} else {
2702 			p->slots[level] = slot;
2703 			if (ins_len > 0 &&
2704 			    btrfs_leaf_free_space(root, b) < ins_len) {
2705 				if (write_lock_level < 1) {
2706 					write_lock_level = 1;
2707 					btrfs_release_path(p);
2708 					goto again;
2709 				}
2710 
2711 				btrfs_set_path_blocking(p);
2712 				err = split_leaf(trans, root, key,
2713 						 p, ins_len, ret == 0);
2714 				btrfs_clear_path_blocking(p, NULL, 0);
2715 
2716 				BUG_ON(err > 0);
2717 				if (err) {
2718 					ret = err;
2719 					goto done;
2720 				}
2721 			}
2722 			if (!p->search_for_split)
2723 				unlock_up(p, level, lowest_unlock,
2724 					  min_write_lock_level, &write_lock_level);
2725 			goto done;
2726 		}
2727 	}
2728 	ret = 1;
2729 done:
2730 	/*
2731 	 * we don't really know what they plan on doing with the path
2732 	 * from here on, so for now just mark it as blocking
2733 	 */
2734 	if (!p->leave_spinning)
2735 		btrfs_set_path_blocking(p);
2736 	if (ret < 0)
2737 		btrfs_release_path(p);
2738 	return ret;
2739 }
2740 
2741 /*
2742  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2743  * current state of the tree together with the operations recorded in the tree
2744  * modification log to search for the key in a previous version of this tree, as
2745  * denoted by the time_seq parameter.
2746  *
2747  * Naturally, there is no support for insert, delete or cow operations.
2748  *
2749  * The resulting path and return value will be set up as if we called
2750  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2751  */
2752 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2753 			  struct btrfs_path *p, u64 time_seq)
2754 {
2755 	struct extent_buffer *b;
2756 	int slot;
2757 	int ret;
2758 	int err;
2759 	int level;
2760 	int lowest_unlock = 1;
2761 	u8 lowest_level = 0;
2762 
2763 	lowest_level = p->lowest_level;
2764 	WARN_ON(p->nodes[0] != NULL);
2765 
2766 	if (p->search_commit_root) {
2767 		BUG_ON(time_seq);
2768 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2769 	}
2770 
2771 again:
2772 	b = get_old_root(root, time_seq);
2773 	level = btrfs_header_level(b);
2774 	p->locks[level] = BTRFS_READ_LOCK;
2775 
2776 	while (b) {
2777 		level = btrfs_header_level(b);
2778 		p->nodes[level] = b;
2779 		btrfs_clear_path_blocking(p, NULL, 0);
2780 
2781 		/*
2782 		 * we have a lock on b and as long as we aren't changing
2783 		 * the tree, there is no way to for the items in b to change.
2784 		 * It is safe to drop the lock on our parent before we
2785 		 * go through the expensive btree search on b.
2786 		 */
2787 		btrfs_unlock_up_safe(p, level + 1);
2788 
2789 		ret = bin_search(b, key, level, &slot);
2790 
2791 		if (level != 0) {
2792 			int dec = 0;
2793 			if (ret && slot > 0) {
2794 				dec = 1;
2795 				slot -= 1;
2796 			}
2797 			p->slots[level] = slot;
2798 			unlock_up(p, level, lowest_unlock, 0, NULL);
2799 
2800 			if (level == lowest_level) {
2801 				if (dec)
2802 					p->slots[level]++;
2803 				goto done;
2804 			}
2805 
2806 			err = read_block_for_search(NULL, root, p, &b, level,
2807 						    slot, key, time_seq);
2808 			if (err == -EAGAIN)
2809 				goto again;
2810 			if (err) {
2811 				ret = err;
2812 				goto done;
2813 			}
2814 
2815 			level = btrfs_header_level(b);
2816 			err = btrfs_try_tree_read_lock(b);
2817 			if (!err) {
2818 				btrfs_set_path_blocking(p);
2819 				btrfs_tree_read_lock(b);
2820 				btrfs_clear_path_blocking(p, b,
2821 							  BTRFS_READ_LOCK);
2822 			}
2823 			b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2824 			p->locks[level] = BTRFS_READ_LOCK;
2825 			p->nodes[level] = b;
2826 		} else {
2827 			p->slots[level] = slot;
2828 			unlock_up(p, level, lowest_unlock, 0, NULL);
2829 			goto done;
2830 		}
2831 	}
2832 	ret = 1;
2833 done:
2834 	if (!p->leave_spinning)
2835 		btrfs_set_path_blocking(p);
2836 	if (ret < 0)
2837 		btrfs_release_path(p);
2838 
2839 	return ret;
2840 }
2841 
2842 /*
2843  * helper to use instead of search slot if no exact match is needed but
2844  * instead the next or previous item should be returned.
2845  * When find_higher is true, the next higher item is returned, the next lower
2846  * otherwise.
2847  * When return_any and find_higher are both true, and no higher item is found,
2848  * return the next lower instead.
2849  * When return_any is true and find_higher is false, and no lower item is found,
2850  * return the next higher instead.
2851  * It returns 0 if any item is found, 1 if none is found (tree empty), and
2852  * < 0 on error
2853  */
2854 int btrfs_search_slot_for_read(struct btrfs_root *root,
2855 			       struct btrfs_key *key, struct btrfs_path *p,
2856 			       int find_higher, int return_any)
2857 {
2858 	int ret;
2859 	struct extent_buffer *leaf;
2860 
2861 again:
2862 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2863 	if (ret <= 0)
2864 		return ret;
2865 	/*
2866 	 * a return value of 1 means the path is at the position where the
2867 	 * item should be inserted. Normally this is the next bigger item,
2868 	 * but in case the previous item is the last in a leaf, path points
2869 	 * to the first free slot in the previous leaf, i.e. at an invalid
2870 	 * item.
2871 	 */
2872 	leaf = p->nodes[0];
2873 
2874 	if (find_higher) {
2875 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2876 			ret = btrfs_next_leaf(root, p);
2877 			if (ret <= 0)
2878 				return ret;
2879 			if (!return_any)
2880 				return 1;
2881 			/*
2882 			 * no higher item found, return the next
2883 			 * lower instead
2884 			 */
2885 			return_any = 0;
2886 			find_higher = 0;
2887 			btrfs_release_path(p);
2888 			goto again;
2889 		}
2890 	} else {
2891 		if (p->slots[0] == 0) {
2892 			ret = btrfs_prev_leaf(root, p);
2893 			if (ret < 0)
2894 				return ret;
2895 			if (!ret) {
2896 				p->slots[0] = btrfs_header_nritems(leaf) - 1;
2897 				return 0;
2898 			}
2899 			if (!return_any)
2900 				return 1;
2901 			/*
2902 			 * no lower item found, return the next
2903 			 * higher instead
2904 			 */
2905 			return_any = 0;
2906 			find_higher = 1;
2907 			btrfs_release_path(p);
2908 			goto again;
2909 		} else {
2910 			--p->slots[0];
2911 		}
2912 	}
2913 	return 0;
2914 }
2915 
2916 /*
2917  * adjust the pointers going up the tree, starting at level
2918  * making sure the right key of each node is points to 'key'.
2919  * This is used after shifting pointers to the left, so it stops
2920  * fixing up pointers when a given leaf/node is not in slot 0 of the
2921  * higher levels
2922  *
2923  */
2924 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
2925 			   struct btrfs_disk_key *key, int level)
2926 {
2927 	int i;
2928 	struct extent_buffer *t;
2929 
2930 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2931 		int tslot = path->slots[i];
2932 		if (!path->nodes[i])
2933 			break;
2934 		t = path->nodes[i];
2935 		tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2936 		btrfs_set_node_key(t, key, tslot);
2937 		btrfs_mark_buffer_dirty(path->nodes[i]);
2938 		if (tslot != 0)
2939 			break;
2940 	}
2941 }
2942 
2943 /*
2944  * update item key.
2945  *
2946  * This function isn't completely safe. It's the caller's responsibility
2947  * that the new key won't break the order
2948  */
2949 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
2950 			     struct btrfs_key *new_key)
2951 {
2952 	struct btrfs_disk_key disk_key;
2953 	struct extent_buffer *eb;
2954 	int slot;
2955 
2956 	eb = path->nodes[0];
2957 	slot = path->slots[0];
2958 	if (slot > 0) {
2959 		btrfs_item_key(eb, &disk_key, slot - 1);
2960 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2961 	}
2962 	if (slot < btrfs_header_nritems(eb) - 1) {
2963 		btrfs_item_key(eb, &disk_key, slot + 1);
2964 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2965 	}
2966 
2967 	btrfs_cpu_key_to_disk(&disk_key, new_key);
2968 	btrfs_set_item_key(eb, &disk_key, slot);
2969 	btrfs_mark_buffer_dirty(eb);
2970 	if (slot == 0)
2971 		fixup_low_keys(root, path, &disk_key, 1);
2972 }
2973 
2974 /*
2975  * try to push data from one node into the next node left in the
2976  * tree.
2977  *
2978  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2979  * error, and > 0 if there was no room in the left hand block.
2980  */
2981 static int push_node_left(struct btrfs_trans_handle *trans,
2982 			  struct btrfs_root *root, struct extent_buffer *dst,
2983 			  struct extent_buffer *src, int empty)
2984 {
2985 	int push_items = 0;
2986 	int src_nritems;
2987 	int dst_nritems;
2988 	int ret = 0;
2989 
2990 	src_nritems = btrfs_header_nritems(src);
2991 	dst_nritems = btrfs_header_nritems(dst);
2992 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2993 	WARN_ON(btrfs_header_generation(src) != trans->transid);
2994 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2995 
2996 	if (!empty && src_nritems <= 8)
2997 		return 1;
2998 
2999 	if (push_items <= 0)
3000 		return 1;
3001 
3002 	if (empty) {
3003 		push_items = min(src_nritems, push_items);
3004 		if (push_items < src_nritems) {
3005 			/* leave at least 8 pointers in the node if
3006 			 * we aren't going to empty it
3007 			 */
3008 			if (src_nritems - push_items < 8) {
3009 				if (push_items <= 8)
3010 					return 1;
3011 				push_items -= 8;
3012 			}
3013 		}
3014 	} else
3015 		push_items = min(src_nritems - 8, push_items);
3016 
3017 	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3018 			     push_items);
3019 	copy_extent_buffer(dst, src,
3020 			   btrfs_node_key_ptr_offset(dst_nritems),
3021 			   btrfs_node_key_ptr_offset(0),
3022 			   push_items * sizeof(struct btrfs_key_ptr));
3023 
3024 	if (push_items < src_nritems) {
3025 		/*
3026 		 * don't call tree_mod_log_eb_move here, key removal was already
3027 		 * fully logged by tree_mod_log_eb_copy above.
3028 		 */
3029 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3030 				      btrfs_node_key_ptr_offset(push_items),
3031 				      (src_nritems - push_items) *
3032 				      sizeof(struct btrfs_key_ptr));
3033 	}
3034 	btrfs_set_header_nritems(src, src_nritems - push_items);
3035 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3036 	btrfs_mark_buffer_dirty(src);
3037 	btrfs_mark_buffer_dirty(dst);
3038 
3039 	return ret;
3040 }
3041 
3042 /*
3043  * try to push data from one node into the next node right in the
3044  * tree.
3045  *
3046  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3047  * error, and > 0 if there was no room in the right hand block.
3048  *
3049  * this will  only push up to 1/2 the contents of the left node over
3050  */
3051 static int balance_node_right(struct btrfs_trans_handle *trans,
3052 			      struct btrfs_root *root,
3053 			      struct extent_buffer *dst,
3054 			      struct extent_buffer *src)
3055 {
3056 	int push_items = 0;
3057 	int max_push;
3058 	int src_nritems;
3059 	int dst_nritems;
3060 	int ret = 0;
3061 
3062 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3063 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3064 
3065 	src_nritems = btrfs_header_nritems(src);
3066 	dst_nritems = btrfs_header_nritems(dst);
3067 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3068 	if (push_items <= 0)
3069 		return 1;
3070 
3071 	if (src_nritems < 4)
3072 		return 1;
3073 
3074 	max_push = src_nritems / 2 + 1;
3075 	/* don't try to empty the node */
3076 	if (max_push >= src_nritems)
3077 		return 1;
3078 
3079 	if (max_push < push_items)
3080 		push_items = max_push;
3081 
3082 	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3083 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3084 				      btrfs_node_key_ptr_offset(0),
3085 				      (dst_nritems) *
3086 				      sizeof(struct btrfs_key_ptr));
3087 
3088 	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3089 			     src_nritems - push_items, push_items);
3090 	copy_extent_buffer(dst, src,
3091 			   btrfs_node_key_ptr_offset(0),
3092 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3093 			   push_items * sizeof(struct btrfs_key_ptr));
3094 
3095 	btrfs_set_header_nritems(src, src_nritems - push_items);
3096 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3097 
3098 	btrfs_mark_buffer_dirty(src);
3099 	btrfs_mark_buffer_dirty(dst);
3100 
3101 	return ret;
3102 }
3103 
3104 /*
3105  * helper function to insert a new root level in the tree.
3106  * A new node is allocated, and a single item is inserted to
3107  * point to the existing root
3108  *
3109  * returns zero on success or < 0 on failure.
3110  */
3111 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3112 			   struct btrfs_root *root,
3113 			   struct btrfs_path *path, int level)
3114 {
3115 	u64 lower_gen;
3116 	struct extent_buffer *lower;
3117 	struct extent_buffer *c;
3118 	struct extent_buffer *old;
3119 	struct btrfs_disk_key lower_key;
3120 
3121 	BUG_ON(path->nodes[level]);
3122 	BUG_ON(path->nodes[level-1] != root->node);
3123 
3124 	lower = path->nodes[level-1];
3125 	if (level == 1)
3126 		btrfs_item_key(lower, &lower_key, 0);
3127 	else
3128 		btrfs_node_key(lower, &lower_key, 0);
3129 
3130 	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3131 				   root->root_key.objectid, &lower_key,
3132 				   level, root->node->start, 0);
3133 	if (IS_ERR(c))
3134 		return PTR_ERR(c);
3135 
3136 	root_add_used(root, root->nodesize);
3137 
3138 	memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3139 	btrfs_set_header_nritems(c, 1);
3140 	btrfs_set_header_level(c, level);
3141 	btrfs_set_header_bytenr(c, c->start);
3142 	btrfs_set_header_generation(c, trans->transid);
3143 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3144 	btrfs_set_header_owner(c, root->root_key.objectid);
3145 
3146 	write_extent_buffer(c, root->fs_info->fsid,
3147 			    (unsigned long)btrfs_header_fsid(c),
3148 			    BTRFS_FSID_SIZE);
3149 
3150 	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3151 			    (unsigned long)btrfs_header_chunk_tree_uuid(c),
3152 			    BTRFS_UUID_SIZE);
3153 
3154 	btrfs_set_node_key(c, &lower_key, 0);
3155 	btrfs_set_node_blockptr(c, 0, lower->start);
3156 	lower_gen = btrfs_header_generation(lower);
3157 	WARN_ON(lower_gen != trans->transid);
3158 
3159 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3160 
3161 	btrfs_mark_buffer_dirty(c);
3162 
3163 	old = root->node;
3164 	tree_mod_log_set_root_pointer(root, c, 0);
3165 	rcu_assign_pointer(root->node, c);
3166 
3167 	/* the super has an extra ref to root->node */
3168 	free_extent_buffer(old);
3169 
3170 	add_root_to_dirty_list(root);
3171 	extent_buffer_get(c);
3172 	path->nodes[level] = c;
3173 	path->locks[level] = BTRFS_WRITE_LOCK;
3174 	path->slots[level] = 0;
3175 	return 0;
3176 }
3177 
3178 /*
3179  * worker function to insert a single pointer in a node.
3180  * the node should have enough room for the pointer already
3181  *
3182  * slot and level indicate where you want the key to go, and
3183  * blocknr is the block the key points to.
3184  */
3185 static void insert_ptr(struct btrfs_trans_handle *trans,
3186 		       struct btrfs_root *root, struct btrfs_path *path,
3187 		       struct btrfs_disk_key *key, u64 bytenr,
3188 		       int slot, int level)
3189 {
3190 	struct extent_buffer *lower;
3191 	int nritems;
3192 	int ret;
3193 
3194 	BUG_ON(!path->nodes[level]);
3195 	btrfs_assert_tree_locked(path->nodes[level]);
3196 	lower = path->nodes[level];
3197 	nritems = btrfs_header_nritems(lower);
3198 	BUG_ON(slot > nritems);
3199 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3200 	if (slot != nritems) {
3201 		if (level)
3202 			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3203 					     slot, nritems - slot);
3204 		memmove_extent_buffer(lower,
3205 			      btrfs_node_key_ptr_offset(slot + 1),
3206 			      btrfs_node_key_ptr_offset(slot),
3207 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3208 	}
3209 	if (level) {
3210 		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3211 					      MOD_LOG_KEY_ADD);
3212 		BUG_ON(ret < 0);
3213 	}
3214 	btrfs_set_node_key(lower, key, slot);
3215 	btrfs_set_node_blockptr(lower, slot, bytenr);
3216 	WARN_ON(trans->transid == 0);
3217 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3218 	btrfs_set_header_nritems(lower, nritems + 1);
3219 	btrfs_mark_buffer_dirty(lower);
3220 }
3221 
3222 /*
3223  * split the node at the specified level in path in two.
3224  * The path is corrected to point to the appropriate node after the split
3225  *
3226  * Before splitting this tries to make some room in the node by pushing
3227  * left and right, if either one works, it returns right away.
3228  *
3229  * returns 0 on success and < 0 on failure
3230  */
3231 static noinline int split_node(struct btrfs_trans_handle *trans,
3232 			       struct btrfs_root *root,
3233 			       struct btrfs_path *path, int level)
3234 {
3235 	struct extent_buffer *c;
3236 	struct extent_buffer *split;
3237 	struct btrfs_disk_key disk_key;
3238 	int mid;
3239 	int ret;
3240 	u32 c_nritems;
3241 
3242 	c = path->nodes[level];
3243 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3244 	if (c == root->node) {
3245 		/*
3246 		 * trying to split the root, lets make a new one
3247 		 *
3248 		 * tree mod log: We don't log_removal old root in
3249 		 * insert_new_root, because that root buffer will be kept as a
3250 		 * normal node. We are going to log removal of half of the
3251 		 * elements below with tree_mod_log_eb_copy. We're holding a
3252 		 * tree lock on the buffer, which is why we cannot race with
3253 		 * other tree_mod_log users.
3254 		 */
3255 		ret = insert_new_root(trans, root, path, level + 1);
3256 		if (ret)
3257 			return ret;
3258 	} else {
3259 		ret = push_nodes_for_insert(trans, root, path, level);
3260 		c = path->nodes[level];
3261 		if (!ret && btrfs_header_nritems(c) <
3262 		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3263 			return 0;
3264 		if (ret < 0)
3265 			return ret;
3266 	}
3267 
3268 	c_nritems = btrfs_header_nritems(c);
3269 	mid = (c_nritems + 1) / 2;
3270 	btrfs_node_key(c, &disk_key, mid);
3271 
3272 	split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3273 					root->root_key.objectid,
3274 					&disk_key, level, c->start, 0);
3275 	if (IS_ERR(split))
3276 		return PTR_ERR(split);
3277 
3278 	root_add_used(root, root->nodesize);
3279 
3280 	memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3281 	btrfs_set_header_level(split, btrfs_header_level(c));
3282 	btrfs_set_header_bytenr(split, split->start);
3283 	btrfs_set_header_generation(split, trans->transid);
3284 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3285 	btrfs_set_header_owner(split, root->root_key.objectid);
3286 	write_extent_buffer(split, root->fs_info->fsid,
3287 			    (unsigned long)btrfs_header_fsid(split),
3288 			    BTRFS_FSID_SIZE);
3289 	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3290 			    (unsigned long)btrfs_header_chunk_tree_uuid(split),
3291 			    BTRFS_UUID_SIZE);
3292 
3293 	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3294 	copy_extent_buffer(split, c,
3295 			   btrfs_node_key_ptr_offset(0),
3296 			   btrfs_node_key_ptr_offset(mid),
3297 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3298 	btrfs_set_header_nritems(split, c_nritems - mid);
3299 	btrfs_set_header_nritems(c, mid);
3300 	ret = 0;
3301 
3302 	btrfs_mark_buffer_dirty(c);
3303 	btrfs_mark_buffer_dirty(split);
3304 
3305 	insert_ptr(trans, root, path, &disk_key, split->start,
3306 		   path->slots[level + 1] + 1, level + 1);
3307 
3308 	if (path->slots[level] >= mid) {
3309 		path->slots[level] -= mid;
3310 		btrfs_tree_unlock(c);
3311 		free_extent_buffer(c);
3312 		path->nodes[level] = split;
3313 		path->slots[level + 1] += 1;
3314 	} else {
3315 		btrfs_tree_unlock(split);
3316 		free_extent_buffer(split);
3317 	}
3318 	return ret;
3319 }
3320 
3321 /*
3322  * how many bytes are required to store the items in a leaf.  start
3323  * and nr indicate which items in the leaf to check.  This totals up the
3324  * space used both by the item structs and the item data
3325  */
3326 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3327 {
3328 	struct btrfs_item *start_item;
3329 	struct btrfs_item *end_item;
3330 	struct btrfs_map_token token;
3331 	int data_len;
3332 	int nritems = btrfs_header_nritems(l);
3333 	int end = min(nritems, start + nr) - 1;
3334 
3335 	if (!nr)
3336 		return 0;
3337 	btrfs_init_map_token(&token);
3338 	start_item = btrfs_item_nr(l, start);
3339 	end_item = btrfs_item_nr(l, end);
3340 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3341 		btrfs_token_item_size(l, start_item, &token);
3342 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3343 	data_len += sizeof(struct btrfs_item) * nr;
3344 	WARN_ON(data_len < 0);
3345 	return data_len;
3346 }
3347 
3348 /*
3349  * The space between the end of the leaf items and
3350  * the start of the leaf data.  IOW, how much room
3351  * the leaf has left for both items and data
3352  */
3353 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3354 				   struct extent_buffer *leaf)
3355 {
3356 	int nritems = btrfs_header_nritems(leaf);
3357 	int ret;
3358 	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3359 	if (ret < 0) {
3360 		printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3361 		       "used %d nritems %d\n",
3362 		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3363 		       leaf_space_used(leaf, 0, nritems), nritems);
3364 	}
3365 	return ret;
3366 }
3367 
3368 /*
3369  * min slot controls the lowest index we're willing to push to the
3370  * right.  We'll push up to and including min_slot, but no lower
3371  */
3372 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3373 				      struct btrfs_root *root,
3374 				      struct btrfs_path *path,
3375 				      int data_size, int empty,
3376 				      struct extent_buffer *right,
3377 				      int free_space, u32 left_nritems,
3378 				      u32 min_slot)
3379 {
3380 	struct extent_buffer *left = path->nodes[0];
3381 	struct extent_buffer *upper = path->nodes[1];
3382 	struct btrfs_map_token token;
3383 	struct btrfs_disk_key disk_key;
3384 	int slot;
3385 	u32 i;
3386 	int push_space = 0;
3387 	int push_items = 0;
3388 	struct btrfs_item *item;
3389 	u32 nr;
3390 	u32 right_nritems;
3391 	u32 data_end;
3392 	u32 this_item_size;
3393 
3394 	btrfs_init_map_token(&token);
3395 
3396 	if (empty)
3397 		nr = 0;
3398 	else
3399 		nr = max_t(u32, 1, min_slot);
3400 
3401 	if (path->slots[0] >= left_nritems)
3402 		push_space += data_size;
3403 
3404 	slot = path->slots[1];
3405 	i = left_nritems - 1;
3406 	while (i >= nr) {
3407 		item = btrfs_item_nr(left, i);
3408 
3409 		if (!empty && push_items > 0) {
3410 			if (path->slots[0] > i)
3411 				break;
3412 			if (path->slots[0] == i) {
3413 				int space = btrfs_leaf_free_space(root, left);
3414 				if (space + push_space * 2 > free_space)
3415 					break;
3416 			}
3417 		}
3418 
3419 		if (path->slots[0] == i)
3420 			push_space += data_size;
3421 
3422 		this_item_size = btrfs_item_size(left, item);
3423 		if (this_item_size + sizeof(*item) + push_space > free_space)
3424 			break;
3425 
3426 		push_items++;
3427 		push_space += this_item_size + sizeof(*item);
3428 		if (i == 0)
3429 			break;
3430 		i--;
3431 	}
3432 
3433 	if (push_items == 0)
3434 		goto out_unlock;
3435 
3436 	WARN_ON(!empty && push_items == left_nritems);
3437 
3438 	/* push left to right */
3439 	right_nritems = btrfs_header_nritems(right);
3440 
3441 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3442 	push_space -= leaf_data_end(root, left);
3443 
3444 	/* make room in the right data area */
3445 	data_end = leaf_data_end(root, right);
3446 	memmove_extent_buffer(right,
3447 			      btrfs_leaf_data(right) + data_end - push_space,
3448 			      btrfs_leaf_data(right) + data_end,
3449 			      BTRFS_LEAF_DATA_SIZE(root) - data_end);
3450 
3451 	/* copy from the left data area */
3452 	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3453 		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
3454 		     btrfs_leaf_data(left) + leaf_data_end(root, left),
3455 		     push_space);
3456 
3457 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3458 			      btrfs_item_nr_offset(0),
3459 			      right_nritems * sizeof(struct btrfs_item));
3460 
3461 	/* copy the items from left to right */
3462 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3463 		   btrfs_item_nr_offset(left_nritems - push_items),
3464 		   push_items * sizeof(struct btrfs_item));
3465 
3466 	/* update the item pointers */
3467 	right_nritems += push_items;
3468 	btrfs_set_header_nritems(right, right_nritems);
3469 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3470 	for (i = 0; i < right_nritems; i++) {
3471 		item = btrfs_item_nr(right, i);
3472 		push_space -= btrfs_token_item_size(right, item, &token);
3473 		btrfs_set_token_item_offset(right, item, push_space, &token);
3474 	}
3475 
3476 	left_nritems -= push_items;
3477 	btrfs_set_header_nritems(left, left_nritems);
3478 
3479 	if (left_nritems)
3480 		btrfs_mark_buffer_dirty(left);
3481 	else
3482 		clean_tree_block(trans, root, left);
3483 
3484 	btrfs_mark_buffer_dirty(right);
3485 
3486 	btrfs_item_key(right, &disk_key, 0);
3487 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3488 	btrfs_mark_buffer_dirty(upper);
3489 
3490 	/* then fixup the leaf pointer in the path */
3491 	if (path->slots[0] >= left_nritems) {
3492 		path->slots[0] -= left_nritems;
3493 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3494 			clean_tree_block(trans, root, path->nodes[0]);
3495 		btrfs_tree_unlock(path->nodes[0]);
3496 		free_extent_buffer(path->nodes[0]);
3497 		path->nodes[0] = right;
3498 		path->slots[1] += 1;
3499 	} else {
3500 		btrfs_tree_unlock(right);
3501 		free_extent_buffer(right);
3502 	}
3503 	return 0;
3504 
3505 out_unlock:
3506 	btrfs_tree_unlock(right);
3507 	free_extent_buffer(right);
3508 	return 1;
3509 }
3510 
3511 /*
3512  * push some data in the path leaf to the right, trying to free up at
3513  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3514  *
3515  * returns 1 if the push failed because the other node didn't have enough
3516  * room, 0 if everything worked out and < 0 if there were major errors.
3517  *
3518  * this will push starting from min_slot to the end of the leaf.  It won't
3519  * push any slot lower than min_slot
3520  */
3521 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3522 			   *root, struct btrfs_path *path,
3523 			   int min_data_size, int data_size,
3524 			   int empty, u32 min_slot)
3525 {
3526 	struct extent_buffer *left = path->nodes[0];
3527 	struct extent_buffer *right;
3528 	struct extent_buffer *upper;
3529 	int slot;
3530 	int free_space;
3531 	u32 left_nritems;
3532 	int ret;
3533 
3534 	if (!path->nodes[1])
3535 		return 1;
3536 
3537 	slot = path->slots[1];
3538 	upper = path->nodes[1];
3539 	if (slot >= btrfs_header_nritems(upper) - 1)
3540 		return 1;
3541 
3542 	btrfs_assert_tree_locked(path->nodes[1]);
3543 
3544 	right = read_node_slot(root, upper, slot + 1);
3545 	if (right == NULL)
3546 		return 1;
3547 
3548 	btrfs_tree_lock(right);
3549 	btrfs_set_lock_blocking(right);
3550 
3551 	free_space = btrfs_leaf_free_space(root, right);
3552 	if (free_space < data_size)
3553 		goto out_unlock;
3554 
3555 	/* cow and double check */
3556 	ret = btrfs_cow_block(trans, root, right, upper,
3557 			      slot + 1, &right);
3558 	if (ret)
3559 		goto out_unlock;
3560 
3561 	free_space = btrfs_leaf_free_space(root, right);
3562 	if (free_space < data_size)
3563 		goto out_unlock;
3564 
3565 	left_nritems = btrfs_header_nritems(left);
3566 	if (left_nritems == 0)
3567 		goto out_unlock;
3568 
3569 	return __push_leaf_right(trans, root, path, min_data_size, empty,
3570 				right, free_space, left_nritems, min_slot);
3571 out_unlock:
3572 	btrfs_tree_unlock(right);
3573 	free_extent_buffer(right);
3574 	return 1;
3575 }
3576 
3577 /*
3578  * push some data in the path leaf to the left, trying to free up at
3579  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3580  *
3581  * max_slot can put a limit on how far into the leaf we'll push items.  The
3582  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3583  * items
3584  */
3585 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3586 				     struct btrfs_root *root,
3587 				     struct btrfs_path *path, int data_size,
3588 				     int empty, struct extent_buffer *left,
3589 				     int free_space, u32 right_nritems,
3590 				     u32 max_slot)
3591 {
3592 	struct btrfs_disk_key disk_key;
3593 	struct extent_buffer *right = path->nodes[0];
3594 	int i;
3595 	int push_space = 0;
3596 	int push_items = 0;
3597 	struct btrfs_item *item;
3598 	u32 old_left_nritems;
3599 	u32 nr;
3600 	int ret = 0;
3601 	u32 this_item_size;
3602 	u32 old_left_item_size;
3603 	struct btrfs_map_token token;
3604 
3605 	btrfs_init_map_token(&token);
3606 
3607 	if (empty)
3608 		nr = min(right_nritems, max_slot);
3609 	else
3610 		nr = min(right_nritems - 1, max_slot);
3611 
3612 	for (i = 0; i < nr; i++) {
3613 		item = btrfs_item_nr(right, i);
3614 
3615 		if (!empty && push_items > 0) {
3616 			if (path->slots[0] < i)
3617 				break;
3618 			if (path->slots[0] == i) {
3619 				int space = btrfs_leaf_free_space(root, right);
3620 				if (space + push_space * 2 > free_space)
3621 					break;
3622 			}
3623 		}
3624 
3625 		if (path->slots[0] == i)
3626 			push_space += data_size;
3627 
3628 		this_item_size = btrfs_item_size(right, item);
3629 		if (this_item_size + sizeof(*item) + push_space > free_space)
3630 			break;
3631 
3632 		push_items++;
3633 		push_space += this_item_size + sizeof(*item);
3634 	}
3635 
3636 	if (push_items == 0) {
3637 		ret = 1;
3638 		goto out;
3639 	}
3640 	if (!empty && push_items == btrfs_header_nritems(right))
3641 		WARN_ON(1);
3642 
3643 	/* push data from right to left */
3644 	copy_extent_buffer(left, right,
3645 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3646 			   btrfs_item_nr_offset(0),
3647 			   push_items * sizeof(struct btrfs_item));
3648 
3649 	push_space = BTRFS_LEAF_DATA_SIZE(root) -
3650 		     btrfs_item_offset_nr(right, push_items - 1);
3651 
3652 	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3653 		     leaf_data_end(root, left) - push_space,
3654 		     btrfs_leaf_data(right) +
3655 		     btrfs_item_offset_nr(right, push_items - 1),
3656 		     push_space);
3657 	old_left_nritems = btrfs_header_nritems(left);
3658 	BUG_ON(old_left_nritems <= 0);
3659 
3660 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3661 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3662 		u32 ioff;
3663 
3664 		item = btrfs_item_nr(left, i);
3665 
3666 		ioff = btrfs_token_item_offset(left, item, &token);
3667 		btrfs_set_token_item_offset(left, item,
3668 		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3669 		      &token);
3670 	}
3671 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3672 
3673 	/* fixup right node */
3674 	if (push_items > right_nritems)
3675 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3676 		       right_nritems);
3677 
3678 	if (push_items < right_nritems) {
3679 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3680 						  leaf_data_end(root, right);
3681 		memmove_extent_buffer(right, btrfs_leaf_data(right) +
3682 				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
3683 				      btrfs_leaf_data(right) +
3684 				      leaf_data_end(root, right), push_space);
3685 
3686 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3687 			      btrfs_item_nr_offset(push_items),
3688 			     (btrfs_header_nritems(right) - push_items) *
3689 			     sizeof(struct btrfs_item));
3690 	}
3691 	right_nritems -= push_items;
3692 	btrfs_set_header_nritems(right, right_nritems);
3693 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3694 	for (i = 0; i < right_nritems; i++) {
3695 		item = btrfs_item_nr(right, i);
3696 
3697 		push_space = push_space - btrfs_token_item_size(right,
3698 								item, &token);
3699 		btrfs_set_token_item_offset(right, item, push_space, &token);
3700 	}
3701 
3702 	btrfs_mark_buffer_dirty(left);
3703 	if (right_nritems)
3704 		btrfs_mark_buffer_dirty(right);
3705 	else
3706 		clean_tree_block(trans, root, right);
3707 
3708 	btrfs_item_key(right, &disk_key, 0);
3709 	fixup_low_keys(root, path, &disk_key, 1);
3710 
3711 	/* then fixup the leaf pointer in the path */
3712 	if (path->slots[0] < push_items) {
3713 		path->slots[0] += old_left_nritems;
3714 		btrfs_tree_unlock(path->nodes[0]);
3715 		free_extent_buffer(path->nodes[0]);
3716 		path->nodes[0] = left;
3717 		path->slots[1] -= 1;
3718 	} else {
3719 		btrfs_tree_unlock(left);
3720 		free_extent_buffer(left);
3721 		path->slots[0] -= push_items;
3722 	}
3723 	BUG_ON(path->slots[0] < 0);
3724 	return ret;
3725 out:
3726 	btrfs_tree_unlock(left);
3727 	free_extent_buffer(left);
3728 	return ret;
3729 }
3730 
3731 /*
3732  * push some data in the path leaf to the left, trying to free up at
3733  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3734  *
3735  * max_slot can put a limit on how far into the leaf we'll push items.  The
3736  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3737  * items
3738  */
3739 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3740 			  *root, struct btrfs_path *path, int min_data_size,
3741 			  int data_size, int empty, u32 max_slot)
3742 {
3743 	struct extent_buffer *right = path->nodes[0];
3744 	struct extent_buffer *left;
3745 	int slot;
3746 	int free_space;
3747 	u32 right_nritems;
3748 	int ret = 0;
3749 
3750 	slot = path->slots[1];
3751 	if (slot == 0)
3752 		return 1;
3753 	if (!path->nodes[1])
3754 		return 1;
3755 
3756 	right_nritems = btrfs_header_nritems(right);
3757 	if (right_nritems == 0)
3758 		return 1;
3759 
3760 	btrfs_assert_tree_locked(path->nodes[1]);
3761 
3762 	left = read_node_slot(root, path->nodes[1], slot - 1);
3763 	if (left == NULL)
3764 		return 1;
3765 
3766 	btrfs_tree_lock(left);
3767 	btrfs_set_lock_blocking(left);
3768 
3769 	free_space = btrfs_leaf_free_space(root, left);
3770 	if (free_space < data_size) {
3771 		ret = 1;
3772 		goto out;
3773 	}
3774 
3775 	/* cow and double check */
3776 	ret = btrfs_cow_block(trans, root, left,
3777 			      path->nodes[1], slot - 1, &left);
3778 	if (ret) {
3779 		/* we hit -ENOSPC, but it isn't fatal here */
3780 		if (ret == -ENOSPC)
3781 			ret = 1;
3782 		goto out;
3783 	}
3784 
3785 	free_space = btrfs_leaf_free_space(root, left);
3786 	if (free_space < data_size) {
3787 		ret = 1;
3788 		goto out;
3789 	}
3790 
3791 	return __push_leaf_left(trans, root, path, min_data_size,
3792 			       empty, left, free_space, right_nritems,
3793 			       max_slot);
3794 out:
3795 	btrfs_tree_unlock(left);
3796 	free_extent_buffer(left);
3797 	return ret;
3798 }
3799 
3800 /*
3801  * split the path's leaf in two, making sure there is at least data_size
3802  * available for the resulting leaf level of the path.
3803  */
3804 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3805 				    struct btrfs_root *root,
3806 				    struct btrfs_path *path,
3807 				    struct extent_buffer *l,
3808 				    struct extent_buffer *right,
3809 				    int slot, int mid, int nritems)
3810 {
3811 	int data_copy_size;
3812 	int rt_data_off;
3813 	int i;
3814 	struct btrfs_disk_key disk_key;
3815 	struct btrfs_map_token token;
3816 
3817 	btrfs_init_map_token(&token);
3818 
3819 	nritems = nritems - mid;
3820 	btrfs_set_header_nritems(right, nritems);
3821 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3822 
3823 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3824 			   btrfs_item_nr_offset(mid),
3825 			   nritems * sizeof(struct btrfs_item));
3826 
3827 	copy_extent_buffer(right, l,
3828 		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3829 		     data_copy_size, btrfs_leaf_data(l) +
3830 		     leaf_data_end(root, l), data_copy_size);
3831 
3832 	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3833 		      btrfs_item_end_nr(l, mid);
3834 
3835 	for (i = 0; i < nritems; i++) {
3836 		struct btrfs_item *item = btrfs_item_nr(right, i);
3837 		u32 ioff;
3838 
3839 		ioff = btrfs_token_item_offset(right, item, &token);
3840 		btrfs_set_token_item_offset(right, item,
3841 					    ioff + rt_data_off, &token);
3842 	}
3843 
3844 	btrfs_set_header_nritems(l, mid);
3845 	btrfs_item_key(right, &disk_key, 0);
3846 	insert_ptr(trans, root, path, &disk_key, right->start,
3847 		   path->slots[1] + 1, 1);
3848 
3849 	btrfs_mark_buffer_dirty(right);
3850 	btrfs_mark_buffer_dirty(l);
3851 	BUG_ON(path->slots[0] != slot);
3852 
3853 	if (mid <= slot) {
3854 		btrfs_tree_unlock(path->nodes[0]);
3855 		free_extent_buffer(path->nodes[0]);
3856 		path->nodes[0] = right;
3857 		path->slots[0] -= mid;
3858 		path->slots[1] += 1;
3859 	} else {
3860 		btrfs_tree_unlock(right);
3861 		free_extent_buffer(right);
3862 	}
3863 
3864 	BUG_ON(path->slots[0] < 0);
3865 }
3866 
3867 /*
3868  * double splits happen when we need to insert a big item in the middle
3869  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
3870  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3871  *          A                 B                 C
3872  *
3873  * We avoid this by trying to push the items on either side of our target
3874  * into the adjacent leaves.  If all goes well we can avoid the double split
3875  * completely.
3876  */
3877 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3878 					  struct btrfs_root *root,
3879 					  struct btrfs_path *path,
3880 					  int data_size)
3881 {
3882 	int ret;
3883 	int progress = 0;
3884 	int slot;
3885 	u32 nritems;
3886 
3887 	slot = path->slots[0];
3888 
3889 	/*
3890 	 * try to push all the items after our slot into the
3891 	 * right leaf
3892 	 */
3893 	ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3894 	if (ret < 0)
3895 		return ret;
3896 
3897 	if (ret == 0)
3898 		progress++;
3899 
3900 	nritems = btrfs_header_nritems(path->nodes[0]);
3901 	/*
3902 	 * our goal is to get our slot at the start or end of a leaf.  If
3903 	 * we've done so we're done
3904 	 */
3905 	if (path->slots[0] == 0 || path->slots[0] == nritems)
3906 		return 0;
3907 
3908 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3909 		return 0;
3910 
3911 	/* try to push all the items before our slot into the next leaf */
3912 	slot = path->slots[0];
3913 	ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3914 	if (ret < 0)
3915 		return ret;
3916 
3917 	if (ret == 0)
3918 		progress++;
3919 
3920 	if (progress)
3921 		return 0;
3922 	return 1;
3923 }
3924 
3925 /*
3926  * split the path's leaf in two, making sure there is at least data_size
3927  * available for the resulting leaf level of the path.
3928  *
3929  * returns 0 if all went well and < 0 on failure.
3930  */
3931 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3932 			       struct btrfs_root *root,
3933 			       struct btrfs_key *ins_key,
3934 			       struct btrfs_path *path, int data_size,
3935 			       int extend)
3936 {
3937 	struct btrfs_disk_key disk_key;
3938 	struct extent_buffer *l;
3939 	u32 nritems;
3940 	int mid;
3941 	int slot;
3942 	struct extent_buffer *right;
3943 	int ret = 0;
3944 	int wret;
3945 	int split;
3946 	int num_doubles = 0;
3947 	int tried_avoid_double = 0;
3948 
3949 	l = path->nodes[0];
3950 	slot = path->slots[0];
3951 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
3952 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3953 		return -EOVERFLOW;
3954 
3955 	/* first try to make some room by pushing left and right */
3956 	if (data_size && path->nodes[1]) {
3957 		wret = push_leaf_right(trans, root, path, data_size,
3958 				       data_size, 0, 0);
3959 		if (wret < 0)
3960 			return wret;
3961 		if (wret) {
3962 			wret = push_leaf_left(trans, root, path, data_size,
3963 					      data_size, 0, (u32)-1);
3964 			if (wret < 0)
3965 				return wret;
3966 		}
3967 		l = path->nodes[0];
3968 
3969 		/* did the pushes work? */
3970 		if (btrfs_leaf_free_space(root, l) >= data_size)
3971 			return 0;
3972 	}
3973 
3974 	if (!path->nodes[1]) {
3975 		ret = insert_new_root(trans, root, path, 1);
3976 		if (ret)
3977 			return ret;
3978 	}
3979 again:
3980 	split = 1;
3981 	l = path->nodes[0];
3982 	slot = path->slots[0];
3983 	nritems = btrfs_header_nritems(l);
3984 	mid = (nritems + 1) / 2;
3985 
3986 	if (mid <= slot) {
3987 		if (nritems == 1 ||
3988 		    leaf_space_used(l, mid, nritems - mid) + data_size >
3989 			BTRFS_LEAF_DATA_SIZE(root)) {
3990 			if (slot >= nritems) {
3991 				split = 0;
3992 			} else {
3993 				mid = slot;
3994 				if (mid != nritems &&
3995 				    leaf_space_used(l, mid, nritems - mid) +
3996 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3997 					if (data_size && !tried_avoid_double)
3998 						goto push_for_double;
3999 					split = 2;
4000 				}
4001 			}
4002 		}
4003 	} else {
4004 		if (leaf_space_used(l, 0, mid) + data_size >
4005 			BTRFS_LEAF_DATA_SIZE(root)) {
4006 			if (!extend && data_size && slot == 0) {
4007 				split = 0;
4008 			} else if ((extend || !data_size) && slot == 0) {
4009 				mid = 1;
4010 			} else {
4011 				mid = slot;
4012 				if (mid != nritems &&
4013 				    leaf_space_used(l, mid, nritems - mid) +
4014 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4015 					if (data_size && !tried_avoid_double)
4016 						goto push_for_double;
4017 					split = 2 ;
4018 				}
4019 			}
4020 		}
4021 	}
4022 
4023 	if (split == 0)
4024 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4025 	else
4026 		btrfs_item_key(l, &disk_key, mid);
4027 
4028 	right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4029 					root->root_key.objectid,
4030 					&disk_key, 0, l->start, 0);
4031 	if (IS_ERR(right))
4032 		return PTR_ERR(right);
4033 
4034 	root_add_used(root, root->leafsize);
4035 
4036 	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4037 	btrfs_set_header_bytenr(right, right->start);
4038 	btrfs_set_header_generation(right, trans->transid);
4039 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4040 	btrfs_set_header_owner(right, root->root_key.objectid);
4041 	btrfs_set_header_level(right, 0);
4042 	write_extent_buffer(right, root->fs_info->fsid,
4043 			    (unsigned long)btrfs_header_fsid(right),
4044 			    BTRFS_FSID_SIZE);
4045 
4046 	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4047 			    (unsigned long)btrfs_header_chunk_tree_uuid(right),
4048 			    BTRFS_UUID_SIZE);
4049 
4050 	if (split == 0) {
4051 		if (mid <= slot) {
4052 			btrfs_set_header_nritems(right, 0);
4053 			insert_ptr(trans, root, path, &disk_key, right->start,
4054 				   path->slots[1] + 1, 1);
4055 			btrfs_tree_unlock(path->nodes[0]);
4056 			free_extent_buffer(path->nodes[0]);
4057 			path->nodes[0] = right;
4058 			path->slots[0] = 0;
4059 			path->slots[1] += 1;
4060 		} else {
4061 			btrfs_set_header_nritems(right, 0);
4062 			insert_ptr(trans, root, path, &disk_key, right->start,
4063 					  path->slots[1], 1);
4064 			btrfs_tree_unlock(path->nodes[0]);
4065 			free_extent_buffer(path->nodes[0]);
4066 			path->nodes[0] = right;
4067 			path->slots[0] = 0;
4068 			if (path->slots[1] == 0)
4069 				fixup_low_keys(root, path, &disk_key, 1);
4070 		}
4071 		btrfs_mark_buffer_dirty(right);
4072 		return ret;
4073 	}
4074 
4075 	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4076 
4077 	if (split == 2) {
4078 		BUG_ON(num_doubles != 0);
4079 		num_doubles++;
4080 		goto again;
4081 	}
4082 
4083 	return 0;
4084 
4085 push_for_double:
4086 	push_for_double_split(trans, root, path, data_size);
4087 	tried_avoid_double = 1;
4088 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4089 		return 0;
4090 	goto again;
4091 }
4092 
4093 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4094 					 struct btrfs_root *root,
4095 					 struct btrfs_path *path, int ins_len)
4096 {
4097 	struct btrfs_key key;
4098 	struct extent_buffer *leaf;
4099 	struct btrfs_file_extent_item *fi;
4100 	u64 extent_len = 0;
4101 	u32 item_size;
4102 	int ret;
4103 
4104 	leaf = path->nodes[0];
4105 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4106 
4107 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4108 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4109 
4110 	if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4111 		return 0;
4112 
4113 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4114 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4115 		fi = btrfs_item_ptr(leaf, path->slots[0],
4116 				    struct btrfs_file_extent_item);
4117 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4118 	}
4119 	btrfs_release_path(path);
4120 
4121 	path->keep_locks = 1;
4122 	path->search_for_split = 1;
4123 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4124 	path->search_for_split = 0;
4125 	if (ret < 0)
4126 		goto err;
4127 
4128 	ret = -EAGAIN;
4129 	leaf = path->nodes[0];
4130 	/* if our item isn't there or got smaller, return now */
4131 	if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4132 		goto err;
4133 
4134 	/* the leaf has  changed, it now has room.  return now */
4135 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4136 		goto err;
4137 
4138 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4139 		fi = btrfs_item_ptr(leaf, path->slots[0],
4140 				    struct btrfs_file_extent_item);
4141 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4142 			goto err;
4143 	}
4144 
4145 	btrfs_set_path_blocking(path);
4146 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4147 	if (ret)
4148 		goto err;
4149 
4150 	path->keep_locks = 0;
4151 	btrfs_unlock_up_safe(path, 1);
4152 	return 0;
4153 err:
4154 	path->keep_locks = 0;
4155 	return ret;
4156 }
4157 
4158 static noinline int split_item(struct btrfs_trans_handle *trans,
4159 			       struct btrfs_root *root,
4160 			       struct btrfs_path *path,
4161 			       struct btrfs_key *new_key,
4162 			       unsigned long split_offset)
4163 {
4164 	struct extent_buffer *leaf;
4165 	struct btrfs_item *item;
4166 	struct btrfs_item *new_item;
4167 	int slot;
4168 	char *buf;
4169 	u32 nritems;
4170 	u32 item_size;
4171 	u32 orig_offset;
4172 	struct btrfs_disk_key disk_key;
4173 
4174 	leaf = path->nodes[0];
4175 	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4176 
4177 	btrfs_set_path_blocking(path);
4178 
4179 	item = btrfs_item_nr(leaf, path->slots[0]);
4180 	orig_offset = btrfs_item_offset(leaf, item);
4181 	item_size = btrfs_item_size(leaf, item);
4182 
4183 	buf = kmalloc(item_size, GFP_NOFS);
4184 	if (!buf)
4185 		return -ENOMEM;
4186 
4187 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4188 			    path->slots[0]), item_size);
4189 
4190 	slot = path->slots[0] + 1;
4191 	nritems = btrfs_header_nritems(leaf);
4192 	if (slot != nritems) {
4193 		/* shift the items */
4194 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4195 				btrfs_item_nr_offset(slot),
4196 				(nritems - slot) * sizeof(struct btrfs_item));
4197 	}
4198 
4199 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4200 	btrfs_set_item_key(leaf, &disk_key, slot);
4201 
4202 	new_item = btrfs_item_nr(leaf, slot);
4203 
4204 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4205 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4206 
4207 	btrfs_set_item_offset(leaf, item,
4208 			      orig_offset + item_size - split_offset);
4209 	btrfs_set_item_size(leaf, item, split_offset);
4210 
4211 	btrfs_set_header_nritems(leaf, nritems + 1);
4212 
4213 	/* write the data for the start of the original item */
4214 	write_extent_buffer(leaf, buf,
4215 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4216 			    split_offset);
4217 
4218 	/* write the data for the new item */
4219 	write_extent_buffer(leaf, buf + split_offset,
4220 			    btrfs_item_ptr_offset(leaf, slot),
4221 			    item_size - split_offset);
4222 	btrfs_mark_buffer_dirty(leaf);
4223 
4224 	BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4225 	kfree(buf);
4226 	return 0;
4227 }
4228 
4229 /*
4230  * This function splits a single item into two items,
4231  * giving 'new_key' to the new item and splitting the
4232  * old one at split_offset (from the start of the item).
4233  *
4234  * The path may be released by this operation.  After
4235  * the split, the path is pointing to the old item.  The
4236  * new item is going to be in the same node as the old one.
4237  *
4238  * Note, the item being split must be smaller enough to live alone on
4239  * a tree block with room for one extra struct btrfs_item
4240  *
4241  * This allows us to split the item in place, keeping a lock on the
4242  * leaf the entire time.
4243  */
4244 int btrfs_split_item(struct btrfs_trans_handle *trans,
4245 		     struct btrfs_root *root,
4246 		     struct btrfs_path *path,
4247 		     struct btrfs_key *new_key,
4248 		     unsigned long split_offset)
4249 {
4250 	int ret;
4251 	ret = setup_leaf_for_split(trans, root, path,
4252 				   sizeof(struct btrfs_item));
4253 	if (ret)
4254 		return ret;
4255 
4256 	ret = split_item(trans, root, path, new_key, split_offset);
4257 	return ret;
4258 }
4259 
4260 /*
4261  * This function duplicate a item, giving 'new_key' to the new item.
4262  * It guarantees both items live in the same tree leaf and the new item
4263  * is contiguous with the original item.
4264  *
4265  * This allows us to split file extent in place, keeping a lock on the
4266  * leaf the entire time.
4267  */
4268 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4269 			 struct btrfs_root *root,
4270 			 struct btrfs_path *path,
4271 			 struct btrfs_key *new_key)
4272 {
4273 	struct extent_buffer *leaf;
4274 	int ret;
4275 	u32 item_size;
4276 
4277 	leaf = path->nodes[0];
4278 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4279 	ret = setup_leaf_for_split(trans, root, path,
4280 				   item_size + sizeof(struct btrfs_item));
4281 	if (ret)
4282 		return ret;
4283 
4284 	path->slots[0]++;
4285 	setup_items_for_insert(root, path, new_key, &item_size,
4286 			       item_size, item_size +
4287 			       sizeof(struct btrfs_item), 1);
4288 	leaf = path->nodes[0];
4289 	memcpy_extent_buffer(leaf,
4290 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4291 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4292 			     item_size);
4293 	return 0;
4294 }
4295 
4296 /*
4297  * make the item pointed to by the path smaller.  new_size indicates
4298  * how small to make it, and from_end tells us if we just chop bytes
4299  * off the end of the item or if we shift the item to chop bytes off
4300  * the front.
4301  */
4302 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4303 			 u32 new_size, int from_end)
4304 {
4305 	int slot;
4306 	struct extent_buffer *leaf;
4307 	struct btrfs_item *item;
4308 	u32 nritems;
4309 	unsigned int data_end;
4310 	unsigned int old_data_start;
4311 	unsigned int old_size;
4312 	unsigned int size_diff;
4313 	int i;
4314 	struct btrfs_map_token token;
4315 
4316 	btrfs_init_map_token(&token);
4317 
4318 	leaf = path->nodes[0];
4319 	slot = path->slots[0];
4320 
4321 	old_size = btrfs_item_size_nr(leaf, slot);
4322 	if (old_size == new_size)
4323 		return;
4324 
4325 	nritems = btrfs_header_nritems(leaf);
4326 	data_end = leaf_data_end(root, leaf);
4327 
4328 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4329 
4330 	size_diff = old_size - new_size;
4331 
4332 	BUG_ON(slot < 0);
4333 	BUG_ON(slot >= nritems);
4334 
4335 	/*
4336 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4337 	 */
4338 	/* first correct the data pointers */
4339 	for (i = slot; i < nritems; i++) {
4340 		u32 ioff;
4341 		item = btrfs_item_nr(leaf, i);
4342 
4343 		ioff = btrfs_token_item_offset(leaf, item, &token);
4344 		btrfs_set_token_item_offset(leaf, item,
4345 					    ioff + size_diff, &token);
4346 	}
4347 
4348 	/* shift the data */
4349 	if (from_end) {
4350 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4351 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4352 			      data_end, old_data_start + new_size - data_end);
4353 	} else {
4354 		struct btrfs_disk_key disk_key;
4355 		u64 offset;
4356 
4357 		btrfs_item_key(leaf, &disk_key, slot);
4358 
4359 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4360 			unsigned long ptr;
4361 			struct btrfs_file_extent_item *fi;
4362 
4363 			fi = btrfs_item_ptr(leaf, slot,
4364 					    struct btrfs_file_extent_item);
4365 			fi = (struct btrfs_file_extent_item *)(
4366 			     (unsigned long)fi - size_diff);
4367 
4368 			if (btrfs_file_extent_type(leaf, fi) ==
4369 			    BTRFS_FILE_EXTENT_INLINE) {
4370 				ptr = btrfs_item_ptr_offset(leaf, slot);
4371 				memmove_extent_buffer(leaf, ptr,
4372 				      (unsigned long)fi,
4373 				      offsetof(struct btrfs_file_extent_item,
4374 						 disk_bytenr));
4375 			}
4376 		}
4377 
4378 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4379 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4380 			      data_end, old_data_start - data_end);
4381 
4382 		offset = btrfs_disk_key_offset(&disk_key);
4383 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4384 		btrfs_set_item_key(leaf, &disk_key, slot);
4385 		if (slot == 0)
4386 			fixup_low_keys(root, path, &disk_key, 1);
4387 	}
4388 
4389 	item = btrfs_item_nr(leaf, slot);
4390 	btrfs_set_item_size(leaf, item, new_size);
4391 	btrfs_mark_buffer_dirty(leaf);
4392 
4393 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4394 		btrfs_print_leaf(root, leaf);
4395 		BUG();
4396 	}
4397 }
4398 
4399 /*
4400  * make the item pointed to by the path bigger, data_size is the added size.
4401  */
4402 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4403 		       u32 data_size)
4404 {
4405 	int slot;
4406 	struct extent_buffer *leaf;
4407 	struct btrfs_item *item;
4408 	u32 nritems;
4409 	unsigned int data_end;
4410 	unsigned int old_data;
4411 	unsigned int old_size;
4412 	int i;
4413 	struct btrfs_map_token token;
4414 
4415 	btrfs_init_map_token(&token);
4416 
4417 	leaf = path->nodes[0];
4418 
4419 	nritems = btrfs_header_nritems(leaf);
4420 	data_end = leaf_data_end(root, leaf);
4421 
4422 	if (btrfs_leaf_free_space(root, leaf) < data_size) {
4423 		btrfs_print_leaf(root, leaf);
4424 		BUG();
4425 	}
4426 	slot = path->slots[0];
4427 	old_data = btrfs_item_end_nr(leaf, slot);
4428 
4429 	BUG_ON(slot < 0);
4430 	if (slot >= nritems) {
4431 		btrfs_print_leaf(root, leaf);
4432 		printk(KERN_CRIT "slot %d too large, nritems %d\n",
4433 		       slot, nritems);
4434 		BUG_ON(1);
4435 	}
4436 
4437 	/*
4438 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4439 	 */
4440 	/* first correct the data pointers */
4441 	for (i = slot; i < nritems; i++) {
4442 		u32 ioff;
4443 		item = btrfs_item_nr(leaf, i);
4444 
4445 		ioff = btrfs_token_item_offset(leaf, item, &token);
4446 		btrfs_set_token_item_offset(leaf, item,
4447 					    ioff - data_size, &token);
4448 	}
4449 
4450 	/* shift the data */
4451 	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4452 		      data_end - data_size, btrfs_leaf_data(leaf) +
4453 		      data_end, old_data - data_end);
4454 
4455 	data_end = old_data;
4456 	old_size = btrfs_item_size_nr(leaf, slot);
4457 	item = btrfs_item_nr(leaf, slot);
4458 	btrfs_set_item_size(leaf, item, old_size + data_size);
4459 	btrfs_mark_buffer_dirty(leaf);
4460 
4461 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4462 		btrfs_print_leaf(root, leaf);
4463 		BUG();
4464 	}
4465 }
4466 
4467 /*
4468  * this is a helper for btrfs_insert_empty_items, the main goal here is
4469  * to save stack depth by doing the bulk of the work in a function
4470  * that doesn't call btrfs_search_slot
4471  */
4472 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4473 			    struct btrfs_key *cpu_key, u32 *data_size,
4474 			    u32 total_data, u32 total_size, int nr)
4475 {
4476 	struct btrfs_item *item;
4477 	int i;
4478 	u32 nritems;
4479 	unsigned int data_end;
4480 	struct btrfs_disk_key disk_key;
4481 	struct extent_buffer *leaf;
4482 	int slot;
4483 	struct btrfs_map_token token;
4484 
4485 	btrfs_init_map_token(&token);
4486 
4487 	leaf = path->nodes[0];
4488 	slot = path->slots[0];
4489 
4490 	nritems = btrfs_header_nritems(leaf);
4491 	data_end = leaf_data_end(root, leaf);
4492 
4493 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
4494 		btrfs_print_leaf(root, leaf);
4495 		printk(KERN_CRIT "not enough freespace need %u have %d\n",
4496 		       total_size, btrfs_leaf_free_space(root, leaf));
4497 		BUG();
4498 	}
4499 
4500 	if (slot != nritems) {
4501 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4502 
4503 		if (old_data < data_end) {
4504 			btrfs_print_leaf(root, leaf);
4505 			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4506 			       slot, old_data, data_end);
4507 			BUG_ON(1);
4508 		}
4509 		/*
4510 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4511 		 */
4512 		/* first correct the data pointers */
4513 		for (i = slot; i < nritems; i++) {
4514 			u32 ioff;
4515 
4516 			item = btrfs_item_nr(leaf, i);
4517 			ioff = btrfs_token_item_offset(leaf, item, &token);
4518 			btrfs_set_token_item_offset(leaf, item,
4519 						    ioff - total_data, &token);
4520 		}
4521 		/* shift the items */
4522 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4523 			      btrfs_item_nr_offset(slot),
4524 			      (nritems - slot) * sizeof(struct btrfs_item));
4525 
4526 		/* shift the data */
4527 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4528 			      data_end - total_data, btrfs_leaf_data(leaf) +
4529 			      data_end, old_data - data_end);
4530 		data_end = old_data;
4531 	}
4532 
4533 	/* setup the item for the new data */
4534 	for (i = 0; i < nr; i++) {
4535 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4536 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4537 		item = btrfs_item_nr(leaf, slot + i);
4538 		btrfs_set_token_item_offset(leaf, item,
4539 					    data_end - data_size[i], &token);
4540 		data_end -= data_size[i];
4541 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4542 	}
4543 
4544 	btrfs_set_header_nritems(leaf, nritems + nr);
4545 
4546 	if (slot == 0) {
4547 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4548 		fixup_low_keys(root, path, &disk_key, 1);
4549 	}
4550 	btrfs_unlock_up_safe(path, 1);
4551 	btrfs_mark_buffer_dirty(leaf);
4552 
4553 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4554 		btrfs_print_leaf(root, leaf);
4555 		BUG();
4556 	}
4557 }
4558 
4559 /*
4560  * Given a key and some data, insert items into the tree.
4561  * This does all the path init required, making room in the tree if needed.
4562  */
4563 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4564 			    struct btrfs_root *root,
4565 			    struct btrfs_path *path,
4566 			    struct btrfs_key *cpu_key, u32 *data_size,
4567 			    int nr)
4568 {
4569 	int ret = 0;
4570 	int slot;
4571 	int i;
4572 	u32 total_size = 0;
4573 	u32 total_data = 0;
4574 
4575 	for (i = 0; i < nr; i++)
4576 		total_data += data_size[i];
4577 
4578 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4579 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4580 	if (ret == 0)
4581 		return -EEXIST;
4582 	if (ret < 0)
4583 		return ret;
4584 
4585 	slot = path->slots[0];
4586 	BUG_ON(slot < 0);
4587 
4588 	setup_items_for_insert(root, path, cpu_key, data_size,
4589 			       total_data, total_size, nr);
4590 	return 0;
4591 }
4592 
4593 /*
4594  * Given a key and some data, insert an item into the tree.
4595  * This does all the path init required, making room in the tree if needed.
4596  */
4597 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4598 		      *root, struct btrfs_key *cpu_key, void *data, u32
4599 		      data_size)
4600 {
4601 	int ret = 0;
4602 	struct btrfs_path *path;
4603 	struct extent_buffer *leaf;
4604 	unsigned long ptr;
4605 
4606 	path = btrfs_alloc_path();
4607 	if (!path)
4608 		return -ENOMEM;
4609 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4610 	if (!ret) {
4611 		leaf = path->nodes[0];
4612 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4613 		write_extent_buffer(leaf, data, ptr, data_size);
4614 		btrfs_mark_buffer_dirty(leaf);
4615 	}
4616 	btrfs_free_path(path);
4617 	return ret;
4618 }
4619 
4620 /*
4621  * delete the pointer from a given node.
4622  *
4623  * the tree should have been previously balanced so the deletion does not
4624  * empty a node.
4625  */
4626 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4627 		    int level, int slot)
4628 {
4629 	struct extent_buffer *parent = path->nodes[level];
4630 	u32 nritems;
4631 	int ret;
4632 
4633 	nritems = btrfs_header_nritems(parent);
4634 	if (slot != nritems - 1) {
4635 		if (level)
4636 			tree_mod_log_eb_move(root->fs_info, parent, slot,
4637 					     slot + 1, nritems - slot - 1);
4638 		memmove_extent_buffer(parent,
4639 			      btrfs_node_key_ptr_offset(slot),
4640 			      btrfs_node_key_ptr_offset(slot + 1),
4641 			      sizeof(struct btrfs_key_ptr) *
4642 			      (nritems - slot - 1));
4643 	} else if (level) {
4644 		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4645 					      MOD_LOG_KEY_REMOVE);
4646 		BUG_ON(ret < 0);
4647 	}
4648 
4649 	nritems--;
4650 	btrfs_set_header_nritems(parent, nritems);
4651 	if (nritems == 0 && parent == root->node) {
4652 		BUG_ON(btrfs_header_level(root->node) != 1);
4653 		/* just turn the root into a leaf and break */
4654 		btrfs_set_header_level(root->node, 0);
4655 	} else if (slot == 0) {
4656 		struct btrfs_disk_key disk_key;
4657 
4658 		btrfs_node_key(parent, &disk_key, 0);
4659 		fixup_low_keys(root, path, &disk_key, level + 1);
4660 	}
4661 	btrfs_mark_buffer_dirty(parent);
4662 }
4663 
4664 /*
4665  * a helper function to delete the leaf pointed to by path->slots[1] and
4666  * path->nodes[1].
4667  *
4668  * This deletes the pointer in path->nodes[1] and frees the leaf
4669  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4670  *
4671  * The path must have already been setup for deleting the leaf, including
4672  * all the proper balancing.  path->nodes[1] must be locked.
4673  */
4674 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4675 				    struct btrfs_root *root,
4676 				    struct btrfs_path *path,
4677 				    struct extent_buffer *leaf)
4678 {
4679 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4680 	del_ptr(root, path, 1, path->slots[1]);
4681 
4682 	/*
4683 	 * btrfs_free_extent is expensive, we want to make sure we
4684 	 * aren't holding any locks when we call it
4685 	 */
4686 	btrfs_unlock_up_safe(path, 0);
4687 
4688 	root_sub_used(root, leaf->len);
4689 
4690 	extent_buffer_get(leaf);
4691 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4692 	free_extent_buffer_stale(leaf);
4693 }
4694 /*
4695  * delete the item at the leaf level in path.  If that empties
4696  * the leaf, remove it from the tree
4697  */
4698 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4699 		    struct btrfs_path *path, int slot, int nr)
4700 {
4701 	struct extent_buffer *leaf;
4702 	struct btrfs_item *item;
4703 	int last_off;
4704 	int dsize = 0;
4705 	int ret = 0;
4706 	int wret;
4707 	int i;
4708 	u32 nritems;
4709 	struct btrfs_map_token token;
4710 
4711 	btrfs_init_map_token(&token);
4712 
4713 	leaf = path->nodes[0];
4714 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4715 
4716 	for (i = 0; i < nr; i++)
4717 		dsize += btrfs_item_size_nr(leaf, slot + i);
4718 
4719 	nritems = btrfs_header_nritems(leaf);
4720 
4721 	if (slot + nr != nritems) {
4722 		int data_end = leaf_data_end(root, leaf);
4723 
4724 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4725 			      data_end + dsize,
4726 			      btrfs_leaf_data(leaf) + data_end,
4727 			      last_off - data_end);
4728 
4729 		for (i = slot + nr; i < nritems; i++) {
4730 			u32 ioff;
4731 
4732 			item = btrfs_item_nr(leaf, i);
4733 			ioff = btrfs_token_item_offset(leaf, item, &token);
4734 			btrfs_set_token_item_offset(leaf, item,
4735 						    ioff + dsize, &token);
4736 		}
4737 
4738 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4739 			      btrfs_item_nr_offset(slot + nr),
4740 			      sizeof(struct btrfs_item) *
4741 			      (nritems - slot - nr));
4742 	}
4743 	btrfs_set_header_nritems(leaf, nritems - nr);
4744 	nritems -= nr;
4745 
4746 	/* delete the leaf if we've emptied it */
4747 	if (nritems == 0) {
4748 		if (leaf == root->node) {
4749 			btrfs_set_header_level(leaf, 0);
4750 		} else {
4751 			btrfs_set_path_blocking(path);
4752 			clean_tree_block(trans, root, leaf);
4753 			btrfs_del_leaf(trans, root, path, leaf);
4754 		}
4755 	} else {
4756 		int used = leaf_space_used(leaf, 0, nritems);
4757 		if (slot == 0) {
4758 			struct btrfs_disk_key disk_key;
4759 
4760 			btrfs_item_key(leaf, &disk_key, 0);
4761 			fixup_low_keys(root, path, &disk_key, 1);
4762 		}
4763 
4764 		/* delete the leaf if it is mostly empty */
4765 		if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4766 			/* push_leaf_left fixes the path.
4767 			 * make sure the path still points to our leaf
4768 			 * for possible call to del_ptr below
4769 			 */
4770 			slot = path->slots[1];
4771 			extent_buffer_get(leaf);
4772 
4773 			btrfs_set_path_blocking(path);
4774 			wret = push_leaf_left(trans, root, path, 1, 1,
4775 					      1, (u32)-1);
4776 			if (wret < 0 && wret != -ENOSPC)
4777 				ret = wret;
4778 
4779 			if (path->nodes[0] == leaf &&
4780 			    btrfs_header_nritems(leaf)) {
4781 				wret = push_leaf_right(trans, root, path, 1,
4782 						       1, 1, 0);
4783 				if (wret < 0 && wret != -ENOSPC)
4784 					ret = wret;
4785 			}
4786 
4787 			if (btrfs_header_nritems(leaf) == 0) {
4788 				path->slots[1] = slot;
4789 				btrfs_del_leaf(trans, root, path, leaf);
4790 				free_extent_buffer(leaf);
4791 				ret = 0;
4792 			} else {
4793 				/* if we're still in the path, make sure
4794 				 * we're dirty.  Otherwise, one of the
4795 				 * push_leaf functions must have already
4796 				 * dirtied this buffer
4797 				 */
4798 				if (path->nodes[0] == leaf)
4799 					btrfs_mark_buffer_dirty(leaf);
4800 				free_extent_buffer(leaf);
4801 			}
4802 		} else {
4803 			btrfs_mark_buffer_dirty(leaf);
4804 		}
4805 	}
4806 	return ret;
4807 }
4808 
4809 /*
4810  * search the tree again to find a leaf with lesser keys
4811  * returns 0 if it found something or 1 if there are no lesser leaves.
4812  * returns < 0 on io errors.
4813  *
4814  * This may release the path, and so you may lose any locks held at the
4815  * time you call it.
4816  */
4817 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4818 {
4819 	struct btrfs_key key;
4820 	struct btrfs_disk_key found_key;
4821 	int ret;
4822 
4823 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4824 
4825 	if (key.offset > 0)
4826 		key.offset--;
4827 	else if (key.type > 0)
4828 		key.type--;
4829 	else if (key.objectid > 0)
4830 		key.objectid--;
4831 	else
4832 		return 1;
4833 
4834 	btrfs_release_path(path);
4835 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4836 	if (ret < 0)
4837 		return ret;
4838 	btrfs_item_key(path->nodes[0], &found_key, 0);
4839 	ret = comp_keys(&found_key, &key);
4840 	if (ret < 0)
4841 		return 0;
4842 	return 1;
4843 }
4844 
4845 /*
4846  * A helper function to walk down the tree starting at min_key, and looking
4847  * for nodes or leaves that are have a minimum transaction id.
4848  * This is used by the btree defrag code, and tree logging
4849  *
4850  * This does not cow, but it does stuff the starting key it finds back
4851  * into min_key, so you can call btrfs_search_slot with cow=1 on the
4852  * key and get a writable path.
4853  *
4854  * This does lock as it descends, and path->keep_locks should be set
4855  * to 1 by the caller.
4856  *
4857  * This honors path->lowest_level to prevent descent past a given level
4858  * of the tree.
4859  *
4860  * min_trans indicates the oldest transaction that you are interested
4861  * in walking through.  Any nodes or leaves older than min_trans are
4862  * skipped over (without reading them).
4863  *
4864  * returns zero if something useful was found, < 0 on error and 1 if there
4865  * was nothing in the tree that matched the search criteria.
4866  */
4867 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4868 			 struct btrfs_key *max_key,
4869 			 struct btrfs_path *path,
4870 			 u64 min_trans)
4871 {
4872 	struct extent_buffer *cur;
4873 	struct btrfs_key found_key;
4874 	int slot;
4875 	int sret;
4876 	u32 nritems;
4877 	int level;
4878 	int ret = 1;
4879 
4880 	WARN_ON(!path->keep_locks);
4881 again:
4882 	cur = btrfs_read_lock_root_node(root);
4883 	level = btrfs_header_level(cur);
4884 	WARN_ON(path->nodes[level]);
4885 	path->nodes[level] = cur;
4886 	path->locks[level] = BTRFS_READ_LOCK;
4887 
4888 	if (btrfs_header_generation(cur) < min_trans) {
4889 		ret = 1;
4890 		goto out;
4891 	}
4892 	while (1) {
4893 		nritems = btrfs_header_nritems(cur);
4894 		level = btrfs_header_level(cur);
4895 		sret = bin_search(cur, min_key, level, &slot);
4896 
4897 		/* at the lowest level, we're done, setup the path and exit */
4898 		if (level == path->lowest_level) {
4899 			if (slot >= nritems)
4900 				goto find_next_key;
4901 			ret = 0;
4902 			path->slots[level] = slot;
4903 			btrfs_item_key_to_cpu(cur, &found_key, slot);
4904 			goto out;
4905 		}
4906 		if (sret && slot > 0)
4907 			slot--;
4908 		/*
4909 		 * check this node pointer against the min_trans parameters.
4910 		 * If it is too old, old, skip to the next one.
4911 		 */
4912 		while (slot < nritems) {
4913 			u64 blockptr;
4914 			u64 gen;
4915 
4916 			blockptr = btrfs_node_blockptr(cur, slot);
4917 			gen = btrfs_node_ptr_generation(cur, slot);
4918 			if (gen < min_trans) {
4919 				slot++;
4920 				continue;
4921 			}
4922 			break;
4923 		}
4924 find_next_key:
4925 		/*
4926 		 * we didn't find a candidate key in this node, walk forward
4927 		 * and find another one
4928 		 */
4929 		if (slot >= nritems) {
4930 			path->slots[level] = slot;
4931 			btrfs_set_path_blocking(path);
4932 			sret = btrfs_find_next_key(root, path, min_key, level,
4933 						  min_trans);
4934 			if (sret == 0) {
4935 				btrfs_release_path(path);
4936 				goto again;
4937 			} else {
4938 				goto out;
4939 			}
4940 		}
4941 		/* save our key for returning back */
4942 		btrfs_node_key_to_cpu(cur, &found_key, slot);
4943 		path->slots[level] = slot;
4944 		if (level == path->lowest_level) {
4945 			ret = 0;
4946 			unlock_up(path, level, 1, 0, NULL);
4947 			goto out;
4948 		}
4949 		btrfs_set_path_blocking(path);
4950 		cur = read_node_slot(root, cur, slot);
4951 		BUG_ON(!cur); /* -ENOMEM */
4952 
4953 		btrfs_tree_read_lock(cur);
4954 
4955 		path->locks[level - 1] = BTRFS_READ_LOCK;
4956 		path->nodes[level - 1] = cur;
4957 		unlock_up(path, level, 1, 0, NULL);
4958 		btrfs_clear_path_blocking(path, NULL, 0);
4959 	}
4960 out:
4961 	if (ret == 0)
4962 		memcpy(min_key, &found_key, sizeof(found_key));
4963 	btrfs_set_path_blocking(path);
4964 	return ret;
4965 }
4966 
4967 static void tree_move_down(struct btrfs_root *root,
4968 			   struct btrfs_path *path,
4969 			   int *level, int root_level)
4970 {
4971 	BUG_ON(*level == 0);
4972 	path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4973 					path->slots[*level]);
4974 	path->slots[*level - 1] = 0;
4975 	(*level)--;
4976 }
4977 
4978 static int tree_move_next_or_upnext(struct btrfs_root *root,
4979 				    struct btrfs_path *path,
4980 				    int *level, int root_level)
4981 {
4982 	int ret = 0;
4983 	int nritems;
4984 	nritems = btrfs_header_nritems(path->nodes[*level]);
4985 
4986 	path->slots[*level]++;
4987 
4988 	while (path->slots[*level] >= nritems) {
4989 		if (*level == root_level)
4990 			return -1;
4991 
4992 		/* move upnext */
4993 		path->slots[*level] = 0;
4994 		free_extent_buffer(path->nodes[*level]);
4995 		path->nodes[*level] = NULL;
4996 		(*level)++;
4997 		path->slots[*level]++;
4998 
4999 		nritems = btrfs_header_nritems(path->nodes[*level]);
5000 		ret = 1;
5001 	}
5002 	return ret;
5003 }
5004 
5005 /*
5006  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5007  * or down.
5008  */
5009 static int tree_advance(struct btrfs_root *root,
5010 			struct btrfs_path *path,
5011 			int *level, int root_level,
5012 			int allow_down,
5013 			struct btrfs_key *key)
5014 {
5015 	int ret;
5016 
5017 	if (*level == 0 || !allow_down) {
5018 		ret = tree_move_next_or_upnext(root, path, level, root_level);
5019 	} else {
5020 		tree_move_down(root, path, level, root_level);
5021 		ret = 0;
5022 	}
5023 	if (ret >= 0) {
5024 		if (*level == 0)
5025 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5026 					path->slots[*level]);
5027 		else
5028 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5029 					path->slots[*level]);
5030 	}
5031 	return ret;
5032 }
5033 
5034 static int tree_compare_item(struct btrfs_root *left_root,
5035 			     struct btrfs_path *left_path,
5036 			     struct btrfs_path *right_path,
5037 			     char *tmp_buf)
5038 {
5039 	int cmp;
5040 	int len1, len2;
5041 	unsigned long off1, off2;
5042 
5043 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5044 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5045 	if (len1 != len2)
5046 		return 1;
5047 
5048 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5049 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5050 				right_path->slots[0]);
5051 
5052 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5053 
5054 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5055 	if (cmp)
5056 		return 1;
5057 	return 0;
5058 }
5059 
5060 #define ADVANCE 1
5061 #define ADVANCE_ONLY_NEXT -1
5062 
5063 /*
5064  * This function compares two trees and calls the provided callback for
5065  * every changed/new/deleted item it finds.
5066  * If shared tree blocks are encountered, whole subtrees are skipped, making
5067  * the compare pretty fast on snapshotted subvolumes.
5068  *
5069  * This currently works on commit roots only. As commit roots are read only,
5070  * we don't do any locking. The commit roots are protected with transactions.
5071  * Transactions are ended and rejoined when a commit is tried in between.
5072  *
5073  * This function checks for modifications done to the trees while comparing.
5074  * If it detects a change, it aborts immediately.
5075  */
5076 int btrfs_compare_trees(struct btrfs_root *left_root,
5077 			struct btrfs_root *right_root,
5078 			btrfs_changed_cb_t changed_cb, void *ctx)
5079 {
5080 	int ret;
5081 	int cmp;
5082 	struct btrfs_trans_handle *trans = NULL;
5083 	struct btrfs_path *left_path = NULL;
5084 	struct btrfs_path *right_path = NULL;
5085 	struct btrfs_key left_key;
5086 	struct btrfs_key right_key;
5087 	char *tmp_buf = NULL;
5088 	int left_root_level;
5089 	int right_root_level;
5090 	int left_level;
5091 	int right_level;
5092 	int left_end_reached;
5093 	int right_end_reached;
5094 	int advance_left;
5095 	int advance_right;
5096 	u64 left_blockptr;
5097 	u64 right_blockptr;
5098 	u64 left_start_ctransid;
5099 	u64 right_start_ctransid;
5100 	u64 ctransid;
5101 
5102 	left_path = btrfs_alloc_path();
5103 	if (!left_path) {
5104 		ret = -ENOMEM;
5105 		goto out;
5106 	}
5107 	right_path = btrfs_alloc_path();
5108 	if (!right_path) {
5109 		ret = -ENOMEM;
5110 		goto out;
5111 	}
5112 
5113 	tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5114 	if (!tmp_buf) {
5115 		ret = -ENOMEM;
5116 		goto out;
5117 	}
5118 
5119 	left_path->search_commit_root = 1;
5120 	left_path->skip_locking = 1;
5121 	right_path->search_commit_root = 1;
5122 	right_path->skip_locking = 1;
5123 
5124 	spin_lock(&left_root->root_item_lock);
5125 	left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5126 	spin_unlock(&left_root->root_item_lock);
5127 
5128 	spin_lock(&right_root->root_item_lock);
5129 	right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5130 	spin_unlock(&right_root->root_item_lock);
5131 
5132 	trans = btrfs_join_transaction(left_root);
5133 	if (IS_ERR(trans)) {
5134 		ret = PTR_ERR(trans);
5135 		trans = NULL;
5136 		goto out;
5137 	}
5138 
5139 	/*
5140 	 * Strategy: Go to the first items of both trees. Then do
5141 	 *
5142 	 * If both trees are at level 0
5143 	 *   Compare keys of current items
5144 	 *     If left < right treat left item as new, advance left tree
5145 	 *       and repeat
5146 	 *     If left > right treat right item as deleted, advance right tree
5147 	 *       and repeat
5148 	 *     If left == right do deep compare of items, treat as changed if
5149 	 *       needed, advance both trees and repeat
5150 	 * If both trees are at the same level but not at level 0
5151 	 *   Compare keys of current nodes/leafs
5152 	 *     If left < right advance left tree and repeat
5153 	 *     If left > right advance right tree and repeat
5154 	 *     If left == right compare blockptrs of the next nodes/leafs
5155 	 *       If they match advance both trees but stay at the same level
5156 	 *         and repeat
5157 	 *       If they don't match advance both trees while allowing to go
5158 	 *         deeper and repeat
5159 	 * If tree levels are different
5160 	 *   Advance the tree that needs it and repeat
5161 	 *
5162 	 * Advancing a tree means:
5163 	 *   If we are at level 0, try to go to the next slot. If that's not
5164 	 *   possible, go one level up and repeat. Stop when we found a level
5165 	 *   where we could go to the next slot. We may at this point be on a
5166 	 *   node or a leaf.
5167 	 *
5168 	 *   If we are not at level 0 and not on shared tree blocks, go one
5169 	 *   level deeper.
5170 	 *
5171 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5172 	 *   the right if possible or go up and right.
5173 	 */
5174 
5175 	left_level = btrfs_header_level(left_root->commit_root);
5176 	left_root_level = left_level;
5177 	left_path->nodes[left_level] = left_root->commit_root;
5178 	extent_buffer_get(left_path->nodes[left_level]);
5179 
5180 	right_level = btrfs_header_level(right_root->commit_root);
5181 	right_root_level = right_level;
5182 	right_path->nodes[right_level] = right_root->commit_root;
5183 	extent_buffer_get(right_path->nodes[right_level]);
5184 
5185 	if (left_level == 0)
5186 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5187 				&left_key, left_path->slots[left_level]);
5188 	else
5189 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5190 				&left_key, left_path->slots[left_level]);
5191 	if (right_level == 0)
5192 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5193 				&right_key, right_path->slots[right_level]);
5194 	else
5195 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5196 				&right_key, right_path->slots[right_level]);
5197 
5198 	left_end_reached = right_end_reached = 0;
5199 	advance_left = advance_right = 0;
5200 
5201 	while (1) {
5202 		/*
5203 		 * We need to make sure the transaction does not get committed
5204 		 * while we do anything on commit roots. This means, we need to
5205 		 * join and leave transactions for every item that we process.
5206 		 */
5207 		if (trans && btrfs_should_end_transaction(trans, left_root)) {
5208 			btrfs_release_path(left_path);
5209 			btrfs_release_path(right_path);
5210 
5211 			ret = btrfs_end_transaction(trans, left_root);
5212 			trans = NULL;
5213 			if (ret < 0)
5214 				goto out;
5215 		}
5216 		/* now rejoin the transaction */
5217 		if (!trans) {
5218 			trans = btrfs_join_transaction(left_root);
5219 			if (IS_ERR(trans)) {
5220 				ret = PTR_ERR(trans);
5221 				trans = NULL;
5222 				goto out;
5223 			}
5224 
5225 			spin_lock(&left_root->root_item_lock);
5226 			ctransid = btrfs_root_ctransid(&left_root->root_item);
5227 			spin_unlock(&left_root->root_item_lock);
5228 			if (ctransid != left_start_ctransid)
5229 				left_start_ctransid = 0;
5230 
5231 			spin_lock(&right_root->root_item_lock);
5232 			ctransid = btrfs_root_ctransid(&right_root->root_item);
5233 			spin_unlock(&right_root->root_item_lock);
5234 			if (ctransid != right_start_ctransid)
5235 				right_start_ctransid = 0;
5236 
5237 			if (!left_start_ctransid || !right_start_ctransid) {
5238 				WARN(1, KERN_WARNING
5239 					"btrfs: btrfs_compare_tree detected "
5240 					"a change in one of the trees while "
5241 					"iterating. This is probably a "
5242 					"bug.\n");
5243 				ret = -EIO;
5244 				goto out;
5245 			}
5246 
5247 			/*
5248 			 * the commit root may have changed, so start again
5249 			 * where we stopped
5250 			 */
5251 			left_path->lowest_level = left_level;
5252 			right_path->lowest_level = right_level;
5253 			ret = btrfs_search_slot(NULL, left_root,
5254 					&left_key, left_path, 0, 0);
5255 			if (ret < 0)
5256 				goto out;
5257 			ret = btrfs_search_slot(NULL, right_root,
5258 					&right_key, right_path, 0, 0);
5259 			if (ret < 0)
5260 				goto out;
5261 		}
5262 
5263 		if (advance_left && !left_end_reached) {
5264 			ret = tree_advance(left_root, left_path, &left_level,
5265 					left_root_level,
5266 					advance_left != ADVANCE_ONLY_NEXT,
5267 					&left_key);
5268 			if (ret < 0)
5269 				left_end_reached = ADVANCE;
5270 			advance_left = 0;
5271 		}
5272 		if (advance_right && !right_end_reached) {
5273 			ret = tree_advance(right_root, right_path, &right_level,
5274 					right_root_level,
5275 					advance_right != ADVANCE_ONLY_NEXT,
5276 					&right_key);
5277 			if (ret < 0)
5278 				right_end_reached = ADVANCE;
5279 			advance_right = 0;
5280 		}
5281 
5282 		if (left_end_reached && right_end_reached) {
5283 			ret = 0;
5284 			goto out;
5285 		} else if (left_end_reached) {
5286 			if (right_level == 0) {
5287 				ret = changed_cb(left_root, right_root,
5288 						left_path, right_path,
5289 						&right_key,
5290 						BTRFS_COMPARE_TREE_DELETED,
5291 						ctx);
5292 				if (ret < 0)
5293 					goto out;
5294 			}
5295 			advance_right = ADVANCE;
5296 			continue;
5297 		} else if (right_end_reached) {
5298 			if (left_level == 0) {
5299 				ret = changed_cb(left_root, right_root,
5300 						left_path, right_path,
5301 						&left_key,
5302 						BTRFS_COMPARE_TREE_NEW,
5303 						ctx);
5304 				if (ret < 0)
5305 					goto out;
5306 			}
5307 			advance_left = ADVANCE;
5308 			continue;
5309 		}
5310 
5311 		if (left_level == 0 && right_level == 0) {
5312 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5313 			if (cmp < 0) {
5314 				ret = changed_cb(left_root, right_root,
5315 						left_path, right_path,
5316 						&left_key,
5317 						BTRFS_COMPARE_TREE_NEW,
5318 						ctx);
5319 				if (ret < 0)
5320 					goto out;
5321 				advance_left = ADVANCE;
5322 			} else if (cmp > 0) {
5323 				ret = changed_cb(left_root, right_root,
5324 						left_path, right_path,
5325 						&right_key,
5326 						BTRFS_COMPARE_TREE_DELETED,
5327 						ctx);
5328 				if (ret < 0)
5329 					goto out;
5330 				advance_right = ADVANCE;
5331 			} else {
5332 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5333 				ret = tree_compare_item(left_root, left_path,
5334 						right_path, tmp_buf);
5335 				if (ret) {
5336 					WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5337 					ret = changed_cb(left_root, right_root,
5338 						left_path, right_path,
5339 						&left_key,
5340 						BTRFS_COMPARE_TREE_CHANGED,
5341 						ctx);
5342 					if (ret < 0)
5343 						goto out;
5344 				}
5345 				advance_left = ADVANCE;
5346 				advance_right = ADVANCE;
5347 			}
5348 		} else if (left_level == right_level) {
5349 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5350 			if (cmp < 0) {
5351 				advance_left = ADVANCE;
5352 			} else if (cmp > 0) {
5353 				advance_right = ADVANCE;
5354 			} else {
5355 				left_blockptr = btrfs_node_blockptr(
5356 						left_path->nodes[left_level],
5357 						left_path->slots[left_level]);
5358 				right_blockptr = btrfs_node_blockptr(
5359 						right_path->nodes[right_level],
5360 						right_path->slots[right_level]);
5361 				if (left_blockptr == right_blockptr) {
5362 					/*
5363 					 * As we're on a shared block, don't
5364 					 * allow to go deeper.
5365 					 */
5366 					advance_left = ADVANCE_ONLY_NEXT;
5367 					advance_right = ADVANCE_ONLY_NEXT;
5368 				} else {
5369 					advance_left = ADVANCE;
5370 					advance_right = ADVANCE;
5371 				}
5372 			}
5373 		} else if (left_level < right_level) {
5374 			advance_right = ADVANCE;
5375 		} else {
5376 			advance_left = ADVANCE;
5377 		}
5378 	}
5379 
5380 out:
5381 	btrfs_free_path(left_path);
5382 	btrfs_free_path(right_path);
5383 	kfree(tmp_buf);
5384 
5385 	if (trans) {
5386 		if (!ret)
5387 			ret = btrfs_end_transaction(trans, left_root);
5388 		else
5389 			btrfs_end_transaction(trans, left_root);
5390 	}
5391 
5392 	return ret;
5393 }
5394 
5395 /*
5396  * this is similar to btrfs_next_leaf, but does not try to preserve
5397  * and fixup the path.  It looks for and returns the next key in the
5398  * tree based on the current path and the min_trans parameters.
5399  *
5400  * 0 is returned if another key is found, < 0 if there are any errors
5401  * and 1 is returned if there are no higher keys in the tree
5402  *
5403  * path->keep_locks should be set to 1 on the search made before
5404  * calling this function.
5405  */
5406 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5407 			struct btrfs_key *key, int level, u64 min_trans)
5408 {
5409 	int slot;
5410 	struct extent_buffer *c;
5411 
5412 	WARN_ON(!path->keep_locks);
5413 	while (level < BTRFS_MAX_LEVEL) {
5414 		if (!path->nodes[level])
5415 			return 1;
5416 
5417 		slot = path->slots[level] + 1;
5418 		c = path->nodes[level];
5419 next:
5420 		if (slot >= btrfs_header_nritems(c)) {
5421 			int ret;
5422 			int orig_lowest;
5423 			struct btrfs_key cur_key;
5424 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5425 			    !path->nodes[level + 1])
5426 				return 1;
5427 
5428 			if (path->locks[level + 1]) {
5429 				level++;
5430 				continue;
5431 			}
5432 
5433 			slot = btrfs_header_nritems(c) - 1;
5434 			if (level == 0)
5435 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5436 			else
5437 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5438 
5439 			orig_lowest = path->lowest_level;
5440 			btrfs_release_path(path);
5441 			path->lowest_level = level;
5442 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5443 						0, 0);
5444 			path->lowest_level = orig_lowest;
5445 			if (ret < 0)
5446 				return ret;
5447 
5448 			c = path->nodes[level];
5449 			slot = path->slots[level];
5450 			if (ret == 0)
5451 				slot++;
5452 			goto next;
5453 		}
5454 
5455 		if (level == 0)
5456 			btrfs_item_key_to_cpu(c, key, slot);
5457 		else {
5458 			u64 gen = btrfs_node_ptr_generation(c, slot);
5459 
5460 			if (gen < min_trans) {
5461 				slot++;
5462 				goto next;
5463 			}
5464 			btrfs_node_key_to_cpu(c, key, slot);
5465 		}
5466 		return 0;
5467 	}
5468 	return 1;
5469 }
5470 
5471 /*
5472  * search the tree again to find a leaf with greater keys
5473  * returns 0 if it found something or 1 if there are no greater leaves.
5474  * returns < 0 on io errors.
5475  */
5476 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5477 {
5478 	return btrfs_next_old_leaf(root, path, 0);
5479 }
5480 
5481 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5482 			u64 time_seq)
5483 {
5484 	int slot;
5485 	int level;
5486 	struct extent_buffer *c;
5487 	struct extent_buffer *next;
5488 	struct btrfs_key key;
5489 	u32 nritems;
5490 	int ret;
5491 	int old_spinning = path->leave_spinning;
5492 	int next_rw_lock = 0;
5493 
5494 	nritems = btrfs_header_nritems(path->nodes[0]);
5495 	if (nritems == 0)
5496 		return 1;
5497 
5498 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5499 again:
5500 	level = 1;
5501 	next = NULL;
5502 	next_rw_lock = 0;
5503 	btrfs_release_path(path);
5504 
5505 	path->keep_locks = 1;
5506 	path->leave_spinning = 1;
5507 
5508 	if (time_seq)
5509 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5510 	else
5511 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5512 	path->keep_locks = 0;
5513 
5514 	if (ret < 0)
5515 		return ret;
5516 
5517 	nritems = btrfs_header_nritems(path->nodes[0]);
5518 	/*
5519 	 * by releasing the path above we dropped all our locks.  A balance
5520 	 * could have added more items next to the key that used to be
5521 	 * at the very end of the block.  So, check again here and
5522 	 * advance the path if there are now more items available.
5523 	 */
5524 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5525 		if (ret == 0)
5526 			path->slots[0]++;
5527 		ret = 0;
5528 		goto done;
5529 	}
5530 
5531 	while (level < BTRFS_MAX_LEVEL) {
5532 		if (!path->nodes[level]) {
5533 			ret = 1;
5534 			goto done;
5535 		}
5536 
5537 		slot = path->slots[level] + 1;
5538 		c = path->nodes[level];
5539 		if (slot >= btrfs_header_nritems(c)) {
5540 			level++;
5541 			if (level == BTRFS_MAX_LEVEL) {
5542 				ret = 1;
5543 				goto done;
5544 			}
5545 			continue;
5546 		}
5547 
5548 		if (next) {
5549 			btrfs_tree_unlock_rw(next, next_rw_lock);
5550 			free_extent_buffer(next);
5551 		}
5552 
5553 		next = c;
5554 		next_rw_lock = path->locks[level];
5555 		ret = read_block_for_search(NULL, root, path, &next, level,
5556 					    slot, &key, 0);
5557 		if (ret == -EAGAIN)
5558 			goto again;
5559 
5560 		if (ret < 0) {
5561 			btrfs_release_path(path);
5562 			goto done;
5563 		}
5564 
5565 		if (!path->skip_locking) {
5566 			ret = btrfs_try_tree_read_lock(next);
5567 			if (!ret && time_seq) {
5568 				/*
5569 				 * If we don't get the lock, we may be racing
5570 				 * with push_leaf_left, holding that lock while
5571 				 * itself waiting for the leaf we've currently
5572 				 * locked. To solve this situation, we give up
5573 				 * on our lock and cycle.
5574 				 */
5575 				free_extent_buffer(next);
5576 				btrfs_release_path(path);
5577 				cond_resched();
5578 				goto again;
5579 			}
5580 			if (!ret) {
5581 				btrfs_set_path_blocking(path);
5582 				btrfs_tree_read_lock(next);
5583 				btrfs_clear_path_blocking(path, next,
5584 							  BTRFS_READ_LOCK);
5585 			}
5586 			next_rw_lock = BTRFS_READ_LOCK;
5587 		}
5588 		break;
5589 	}
5590 	path->slots[level] = slot;
5591 	while (1) {
5592 		level--;
5593 		c = path->nodes[level];
5594 		if (path->locks[level])
5595 			btrfs_tree_unlock_rw(c, path->locks[level]);
5596 
5597 		free_extent_buffer(c);
5598 		path->nodes[level] = next;
5599 		path->slots[level] = 0;
5600 		if (!path->skip_locking)
5601 			path->locks[level] = next_rw_lock;
5602 		if (!level)
5603 			break;
5604 
5605 		ret = read_block_for_search(NULL, root, path, &next, level,
5606 					    0, &key, 0);
5607 		if (ret == -EAGAIN)
5608 			goto again;
5609 
5610 		if (ret < 0) {
5611 			btrfs_release_path(path);
5612 			goto done;
5613 		}
5614 
5615 		if (!path->skip_locking) {
5616 			ret = btrfs_try_tree_read_lock(next);
5617 			if (!ret) {
5618 				btrfs_set_path_blocking(path);
5619 				btrfs_tree_read_lock(next);
5620 				btrfs_clear_path_blocking(path, next,
5621 							  BTRFS_READ_LOCK);
5622 			}
5623 			next_rw_lock = BTRFS_READ_LOCK;
5624 		}
5625 	}
5626 	ret = 0;
5627 done:
5628 	unlock_up(path, 0, 1, 0, NULL);
5629 	path->leave_spinning = old_spinning;
5630 	if (!old_spinning)
5631 		btrfs_set_path_blocking(path);
5632 
5633 	return ret;
5634 }
5635 
5636 /*
5637  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5638  * searching until it gets past min_objectid or finds an item of 'type'
5639  *
5640  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5641  */
5642 int btrfs_previous_item(struct btrfs_root *root,
5643 			struct btrfs_path *path, u64 min_objectid,
5644 			int type)
5645 {
5646 	struct btrfs_key found_key;
5647 	struct extent_buffer *leaf;
5648 	u32 nritems;
5649 	int ret;
5650 
5651 	while (1) {
5652 		if (path->slots[0] == 0) {
5653 			btrfs_set_path_blocking(path);
5654 			ret = btrfs_prev_leaf(root, path);
5655 			if (ret != 0)
5656 				return ret;
5657 		} else {
5658 			path->slots[0]--;
5659 		}
5660 		leaf = path->nodes[0];
5661 		nritems = btrfs_header_nritems(leaf);
5662 		if (nritems == 0)
5663 			return 1;
5664 		if (path->slots[0] == nritems)
5665 			path->slots[0]--;
5666 
5667 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5668 		if (found_key.objectid < min_objectid)
5669 			break;
5670 		if (found_key.type == type)
5671 			return 0;
5672 		if (found_key.objectid == min_objectid &&
5673 		    found_key.type < type)
5674 			break;
5675 	}
5676 	return 1;
5677 }
5678